From fb494b9ce268e074a1cd41aa9a7e2759453ed084 Mon Sep 17 00:00:00 2001 From: harshalmore31 Date: Thu, 19 Jun 2025 10:32:24 +0530 Subject: [PATCH 01/86] refactor: enhance error handling and response validation in tool execution and output processing for multi-modal functioanlity --- swarms/structs/agent.py | 92 +++++++++++++++++++++------------ swarms/tools/base_tool.py | 77 ++++++++++++++++++++------- swarms/utils/litellm_wrapper.py | 73 ++++++++++++++++++++------ 3 files changed, 173 insertions(+), 69 deletions(-) diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index dce3c2c2..1f4a0b5a 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -2800,48 +2800,72 @@ class Agent: ) def execute_tools(self, response: any, loop_count: int): + try: + output = ( + self.tool_struct.execute_function_calls_from_api_response( + response + ) + ) + + # Handle empty or None output + if not output: + logger.info("No tool function calls found in response") + return - output = ( - self.tool_struct.execute_function_calls_from_api_response( - response + self.short_memory.add( + role="Tool Executor", + content=format_data_structure(output), ) - ) - self.short_memory.add( - role="Tool Executor", - content=format_data_structure(output), - ) + self.pretty_print( + f"{format_data_structure(output)}", + loop_count, + ) - self.pretty_print( - f"{format_data_structure(output)}", - loop_count, - ) + # Now run the LLM again without tools - create a temporary LLM instance + # instead of modifying the cached one + # Create a temporary LLM instance without tools for the follow-up call + try: + temp_llm = self.temp_llm_instance_for_tool_summary() - # Now run the LLM again without tools - create a temporary LLM instance - # instead of modifying the cached one - # Create a temporary LLM instance without tools for the follow-up call - temp_llm = self.temp_llm_instance_for_tool_summary() + tool_response = temp_llm.run( + f""" + Please analyze and summarize the following tool execution output in a clear and concise way. + Focus on the key information and insights that would be most relevant to the user's original request. + If there are any errors or issues, highlight them prominently. + + Tool Output: + {output} + """ + ) - tool_response = temp_llm.run( - f""" - Please analyze and summarize the following tool execution output in a clear and concise way. - Focus on the key information and insights that would be most relevant to the user's original request. - If there are any errors or issues, highlight them prominently. - - Tool Output: - {output} - """ - ) + self.short_memory.add( + role=self.agent_name, + content=tool_response, + ) - self.short_memory.add( - role=self.agent_name, - content=tool_response, - ) + self.pretty_print( + f"{tool_response}", + loop_count, + ) + except Exception as e: + logger.error(f"Error in tool summary generation: {e}") + # Add a fallback summary + fallback_summary = f"Tool execution completed. Output: {format_data_structure(output)}" + self.short_memory.add( + role=self.agent_name, + content=fallback_summary, + ) + self.pretty_print(fallback_summary, loop_count) - self.pretty_print( - f"{tool_response}", - loop_count, - ) + except Exception as e: + logger.error(f"Error in tool execution: {e}") + error_message = f"Tool execution failed: {str(e)}" + self.short_memory.add( + role="Tool Executor", + content=error_message, + ) + self.pretty_print(error_message, loop_count) def list_output_types(self): return OutputType diff --git a/swarms/tools/base_tool.py b/swarms/tools/base_tool.py index 04add0c7..97366998 100644 --- a/swarms/tools/base_tool.py +++ b/swarms/tools/base_tool.py @@ -2253,17 +2253,31 @@ class BaseTool(BaseModel): else: # Convert string to dict if needed if isinstance(api_response, str): + # Handle empty or whitespace-only strings + api_response = api_response.strip() + if not api_response: + self._log_if_verbose( + "warning", + "Empty API response string received, returning empty list" + ) + return [] + try: api_response = json.loads(api_response) except json.JSONDecodeError as e: - raise ToolValidationError( - f"Invalid JSON in API response: {e}" - ) from e + self._log_if_verbose( + "error", + f"Failed to parse JSON from API response: {e}. Response: '{api_response[:100]}...'" + ) + # If JSON parsing fails, try to continue without function calls + return [] if not isinstance(api_response, dict): - raise ToolValidationError( - "API response must be a dictionary, JSON string, BaseModel, or list of tool calls" + self._log_if_verbose( + "warning", + f"API response is not a dictionary (type: {type(api_response)}), returning empty list" ) + return [] # Extract function calls from dictionary response function_calls = ( @@ -2387,11 +2401,15 @@ class BaseTool(BaseModel): if name: try: # Parse arguments JSON string - arguments = ( - json.loads(arguments_str) - if isinstance(arguments_str, str) - else arguments_str - ) + if isinstance(arguments_str, str): + # Handle empty or whitespace-only arguments + arguments_str = arguments_str.strip() + if not arguments_str: + arguments = {} + else: + arguments = json.loads(arguments_str) + else: + arguments = arguments_str if arguments_str is not None else {} function_calls.append( { @@ -2404,7 +2422,16 @@ class BaseTool(BaseModel): except json.JSONDecodeError as e: self._log_if_verbose( "error", - f"Failed to parse arguments for {name}: {e}", + f"Failed to parse arguments for {name}: {e}. Using empty dict instead.", + ) + # Use empty dict as fallback + function_calls.append( + { + "name": name, + "arguments": {}, + "id": response.get("id"), + "type": "openai", + } ) # Check for choices[].message.tool_calls format @@ -2885,12 +2912,15 @@ class BaseTool(BaseModel): if name: try: - # Parse arguments JSON string - arguments = ( - json.loads(arguments_str) - if isinstance(arguments_str, str) - else arguments_str - ) + # Parse arguments JSON string with better error handling + if isinstance(arguments_str, str): + arguments_str = arguments_str.strip() + if not arguments_str: + arguments = {} + else: + arguments = json.loads(arguments_str) + else: + arguments = arguments_str if arguments_str is not None else {} function_calls.append( { @@ -2905,7 +2935,18 @@ class BaseTool(BaseModel): except json.JSONDecodeError as e: self._log_if_verbose( "error", - f"Failed to parse arguments for {name}: {e}", + f"Failed to parse arguments for {name}: {e}. Using empty dict instead.", + ) + # Use empty dict as fallback + function_calls.append( + { + "name": name, + "arguments": {}, + "id": getattr( + tool_call, "id", None + ), + "type": "openai", + } ) # Handle dictionary representations of tool calls diff --git a/swarms/utils/litellm_wrapper.py b/swarms/utils/litellm_wrapper.py index 6aa5c7d3..7f05f576 100644 --- a/swarms/utils/litellm_wrapper.py +++ b/swarms/utils/litellm_wrapper.py @@ -152,21 +152,45 @@ class LiteLLM: ) def output_for_tools(self, response: any): - if self.mcp_call is True: - out = response.choices[0].message.tool_calls[0].function - output = { - "function": { - "name": out.name, - "arguments": out.arguments, - } - } - return output - else: - out = response.choices[0].message.tool_calls - - if isinstance(out, BaseModel): - out = out.model_dump() - return out + try: + if self.mcp_call is True: + # Validate response structure for MCP calls + if (hasattr(response, 'choices') and + len(response.choices) > 0 and + hasattr(response.choices[0], 'message') and + hasattr(response.choices[0].message, 'tool_calls') and + response.choices[0].message.tool_calls and + len(response.choices[0].message.tool_calls) > 0): + + out = response.choices[0].message.tool_calls[0].function + output = { + "function": { + "name": out.name, + "arguments": out.arguments, + } + } + return output + else: + logger.warning("Invalid MCP response structure, returning empty dict") + return {} + else: + # Validate response structure for regular tool calls + if (hasattr(response, 'choices') and + len(response.choices) > 0 and + hasattr(response.choices[0], 'message') and + hasattr(response.choices[0].message, 'tool_calls')): + + out = response.choices[0].message.tool_calls + + if isinstance(out, BaseModel): + out = out.model_dump() + return out + else: + logger.warning("Invalid tool response structure, returning empty list") + return [] + except Exception as e: + logger.error(f"Error processing tool response: {e}") + return {} if self.mcp_call else [] def _prepare_messages( self, @@ -449,14 +473,29 @@ class LiteLLM: # Make the completion call response = completion(**completion_params) + # Validate response structure before processing + if not hasattr(response, 'choices') or not response.choices: + logger.error("Invalid response: no choices found") + return "Error: Invalid response from API" + + if not hasattr(response.choices[0], 'message'): + logger.error("Invalid response: no message found in first choice") + return "Error: Invalid response structure" + # Handle tool-based response - if self.tools_list_dictionary is not None: + if (self.tools_list_dictionary is not None and + hasattr(response.choices[0].message, 'tool_calls') and + response.choices[0].message.tool_calls is not None): return self.output_for_tools(response) elif self.return_all is True: return response.model_dump() else: # Return standard response content - return response.choices[0].message.content + content = response.choices[0].message.content + if content is None: + logger.warning("Response content is None, returning empty string") + return "" + return content except LiteLLMException as error: logger.error( From 227f8b36cce8b7d286ba606b69fe45e00a62d2f6 Mon Sep 17 00:00:00 2001 From: harshalmore31 Date: Thu, 19 Jun 2025 10:37:56 +0530 Subject: [PATCH 02/86] Update swarms/tools/base_tool.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- swarms/tools/base_tool.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/swarms/tools/base_tool.py b/swarms/tools/base_tool.py index 97366998..43c35917 100644 --- a/swarms/tools/base_tool.py +++ b/swarms/tools/base_tool.py @@ -2402,12 +2402,7 @@ class BaseTool(BaseModel): try: # Parse arguments JSON string if isinstance(arguments_str, str): - # Handle empty or whitespace-only arguments - arguments_str = arguments_str.strip() - if not arguments_str: - arguments = {} - else: - arguments = json.loads(arguments_str) + arguments = self._parse_json_string(arguments_str) else: arguments = arguments_str if arguments_str is not None else {} From d6fa21af464a02b85a79b4b0df3a38ace3f37f0e Mon Sep 17 00:00:00 2001 From: harshalmore31 Date: Fri, 20 Jun 2025 18:57:32 +0530 Subject: [PATCH 03/86] refactor: streamline tool response handling and improve error logging in LiteLLM and BaseTool --- swarms/structs/agent.py | 92 ++++++++++++--------------------- swarms/tools/base_tool.py | 58 +++++---------------- swarms/utils/litellm_wrapper.py | 73 ++++++-------------------- 3 files changed, 64 insertions(+), 159 deletions(-) diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 1f4a0b5a..dce3c2c2 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -2800,72 +2800,48 @@ class Agent: ) def execute_tools(self, response: any, loop_count: int): - try: - output = ( - self.tool_struct.execute_function_calls_from_api_response( - response - ) - ) - - # Handle empty or None output - if not output: - logger.info("No tool function calls found in response") - return - self.short_memory.add( - role="Tool Executor", - content=format_data_structure(output), + output = ( + self.tool_struct.execute_function_calls_from_api_response( + response ) + ) - self.pretty_print( - f"{format_data_structure(output)}", - loop_count, - ) + self.short_memory.add( + role="Tool Executor", + content=format_data_structure(output), + ) - # Now run the LLM again without tools - create a temporary LLM instance - # instead of modifying the cached one - # Create a temporary LLM instance without tools for the follow-up call - try: - temp_llm = self.temp_llm_instance_for_tool_summary() + self.pretty_print( + f"{format_data_structure(output)}", + loop_count, + ) - tool_response = temp_llm.run( - f""" - Please analyze and summarize the following tool execution output in a clear and concise way. - Focus on the key information and insights that would be most relevant to the user's original request. - If there are any errors or issues, highlight them prominently. - - Tool Output: - {output} - """ - ) + # Now run the LLM again without tools - create a temporary LLM instance + # instead of modifying the cached one + # Create a temporary LLM instance without tools for the follow-up call + temp_llm = self.temp_llm_instance_for_tool_summary() - self.short_memory.add( - role=self.agent_name, - content=tool_response, - ) + tool_response = temp_llm.run( + f""" + Please analyze and summarize the following tool execution output in a clear and concise way. + Focus on the key information and insights that would be most relevant to the user's original request. + If there are any errors or issues, highlight them prominently. + + Tool Output: + {output} + """ + ) - self.pretty_print( - f"{tool_response}", - loop_count, - ) - except Exception as e: - logger.error(f"Error in tool summary generation: {e}") - # Add a fallback summary - fallback_summary = f"Tool execution completed. Output: {format_data_structure(output)}" - self.short_memory.add( - role=self.agent_name, - content=fallback_summary, - ) - self.pretty_print(fallback_summary, loop_count) + self.short_memory.add( + role=self.agent_name, + content=tool_response, + ) - except Exception as e: - logger.error(f"Error in tool execution: {e}") - error_message = f"Tool execution failed: {str(e)}" - self.short_memory.add( - role="Tool Executor", - content=error_message, - ) - self.pretty_print(error_message, loop_count) + self.pretty_print( + f"{tool_response}", + loop_count, + ) def list_output_types(self): return OutputType diff --git a/swarms/tools/base_tool.py b/swarms/tools/base_tool.py index 43c35917..f8662fa2 100644 --- a/swarms/tools/base_tool.py +++ b/swarms/tools/base_tool.py @@ -2253,15 +2253,6 @@ class BaseTool(BaseModel): else: # Convert string to dict if needed if isinstance(api_response, str): - # Handle empty or whitespace-only strings - api_response = api_response.strip() - if not api_response: - self._log_if_verbose( - "warning", - "Empty API response string received, returning empty list" - ) - return [] - try: api_response = json.loads(api_response) except json.JSONDecodeError as e: @@ -2269,7 +2260,6 @@ class BaseTool(BaseModel): "error", f"Failed to parse JSON from API response: {e}. Response: '{api_response[:100]}...'" ) - # If JSON parsing fails, try to continue without function calls return [] if not isinstance(api_response, dict): @@ -2401,10 +2391,11 @@ class BaseTool(BaseModel): if name: try: # Parse arguments JSON string - if isinstance(arguments_str, str): - arguments = self._parse_json_string(arguments_str) - else: - arguments = arguments_str if arguments_str is not None else {} + arguments = ( + json.loads(arguments_str) + if isinstance(arguments_str, str) + else arguments_str + ) function_calls.append( { @@ -2417,16 +2408,7 @@ class BaseTool(BaseModel): except json.JSONDecodeError as e: self._log_if_verbose( "error", - f"Failed to parse arguments for {name}: {e}. Using empty dict instead.", - ) - # Use empty dict as fallback - function_calls.append( - { - "name": name, - "arguments": {}, - "id": response.get("id"), - "type": "openai", - } + f"Failed to parse arguments for {name}: {e}", ) # Check for choices[].message.tool_calls format @@ -2907,15 +2889,12 @@ class BaseTool(BaseModel): if name: try: - # Parse arguments JSON string with better error handling - if isinstance(arguments_str, str): - arguments_str = arguments_str.strip() - if not arguments_str: - arguments = {} - else: - arguments = json.loads(arguments_str) - else: - arguments = arguments_str if arguments_str is not None else {} + # Parse arguments JSON string + arguments = ( + json.loads(arguments_str) + if isinstance(arguments_str, str) + else arguments_str + ) function_calls.append( { @@ -2930,18 +2909,7 @@ class BaseTool(BaseModel): except json.JSONDecodeError as e: self._log_if_verbose( "error", - f"Failed to parse arguments for {name}: {e}. Using empty dict instead.", - ) - # Use empty dict as fallback - function_calls.append( - { - "name": name, - "arguments": {}, - "id": getattr( - tool_call, "id", None - ), - "type": "openai", - } + f"Failed to parse arguments for {name}: {e}", ) # Handle dictionary representations of tool calls diff --git a/swarms/utils/litellm_wrapper.py b/swarms/utils/litellm_wrapper.py index 7f05f576..6aa5c7d3 100644 --- a/swarms/utils/litellm_wrapper.py +++ b/swarms/utils/litellm_wrapper.py @@ -152,45 +152,21 @@ class LiteLLM: ) def output_for_tools(self, response: any): - try: - if self.mcp_call is True: - # Validate response structure for MCP calls - if (hasattr(response, 'choices') and - len(response.choices) > 0 and - hasattr(response.choices[0], 'message') and - hasattr(response.choices[0].message, 'tool_calls') and - response.choices[0].message.tool_calls and - len(response.choices[0].message.tool_calls) > 0): - - out = response.choices[0].message.tool_calls[0].function - output = { - "function": { - "name": out.name, - "arguments": out.arguments, - } - } - return output - else: - logger.warning("Invalid MCP response structure, returning empty dict") - return {} - else: - # Validate response structure for regular tool calls - if (hasattr(response, 'choices') and - len(response.choices) > 0 and - hasattr(response.choices[0], 'message') and - hasattr(response.choices[0].message, 'tool_calls')): - - out = response.choices[0].message.tool_calls - - if isinstance(out, BaseModel): - out = out.model_dump() - return out - else: - logger.warning("Invalid tool response structure, returning empty list") - return [] - except Exception as e: - logger.error(f"Error processing tool response: {e}") - return {} if self.mcp_call else [] + if self.mcp_call is True: + out = response.choices[0].message.tool_calls[0].function + output = { + "function": { + "name": out.name, + "arguments": out.arguments, + } + } + return output + else: + out = response.choices[0].message.tool_calls + + if isinstance(out, BaseModel): + out = out.model_dump() + return out def _prepare_messages( self, @@ -473,29 +449,14 @@ class LiteLLM: # Make the completion call response = completion(**completion_params) - # Validate response structure before processing - if not hasattr(response, 'choices') or not response.choices: - logger.error("Invalid response: no choices found") - return "Error: Invalid response from API" - - if not hasattr(response.choices[0], 'message'): - logger.error("Invalid response: no message found in first choice") - return "Error: Invalid response structure" - # Handle tool-based response - if (self.tools_list_dictionary is not None and - hasattr(response.choices[0].message, 'tool_calls') and - response.choices[0].message.tool_calls is not None): + if self.tools_list_dictionary is not None: return self.output_for_tools(response) elif self.return_all is True: return response.model_dump() else: # Return standard response content - content = response.choices[0].message.content - if content is None: - logger.warning("Response content is None, returning empty string") - return "" - return content + return response.choices[0].message.content except LiteLLMException as error: logger.error( From fcf52332d141ffe60020037c46154f3adb0bb2bf Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Tue, 24 Jun 2025 15:58:27 -0700 Subject: [PATCH 04/86] docs, contributing.md, multi tool mcp execution, and more --- CONTRIBUTING.md | 204 ++++++- docs/mkdocs.yml | 5 +- docs/swarms/examples/templates_index.md | 72 +++ example.py | 3 +- .../agent_use => mcp}/agent_mcp.py | 4 +- examples/mcp/agent_multi_mcp_connections.py | 49 ++ .../{tools => mcp}/mcp_examples/agent_mcp.py | 0 .../agent_use/agent_tools_dict_example.py | 0 .../mcp_examples/agent_use/mcp_exampler.py | 0 .../mcp_examples/utils/find_tools_on_mcp.py | 0 .../mcp_examples/utils/mcp_execute_example.py | 0 .../utils/mcp_load_tools_example.py | 0 .../utils/mcp_multiserver_tool_fetch.py | 0 examples/mcp/mcp_utils/mcp_client_call.py | 12 + .../mcp_utils/mcp_multiple_servers_example.py | 234 ++++++++ .../servers => mcp/mcp_utils}/mcp_test.py | 0 .../mcp_utils}/okx_crypto_server.py | 0 .../mcp_utils/test_multiple_mcp_servers.py | 54 ++ examples/models/reasoning_duo_batched.py | 2 +- .../mar/multi_agent_router_minimal.py | 4 +- .../tools}/new_tools_examples.py | 14 +- .../tools/swarms_tools_example.py | 20 + .../tools_examples/swarms_tools_example.py | 31 -- .../anthropic_vision_test.py | 4 +- .../{vision_examples => vision}/image.jpg | Bin .../image_batch_example.py | 0 .../single_agent/vision/vision_and_tools.py | 0 .../vision_test.py | 0 .../tools/agent_as_tools.py | 0 pyproject.toml | 2 +- swarms/agents/reasoning_agents.py | 22 +- swarms/structs/agent.py | 391 ++++++++------ swarms/structs/conversation.py | 6 + swarms/structs/csv_to_agent.py | 4 +- swarms/telemetry/__init__.py | 4 - swarms/telemetry/main.py | 146 +---- swarms/tools/__init__.py | 10 + swarms/tools/mcp_client_call.py | 511 ++++++++++++++++++ swarms/tools/py_func_to_openai_func_str.py | 1 - swarms/utils/__init__.py | 4 + swarms/utils/auto_download_check_packages.py | 7 +- swarms/utils/check_all_model_max_tokens.py | 43 ++ swarms/utils/history_output_formatter.py | 2 + swarms/utils/output_types.py | 2 +- 44 files changed, 1481 insertions(+), 386 deletions(-) create mode 100644 docs/swarms/examples/templates_index.md rename examples/{tools/mcp_examples/agent_use => mcp}/agent_mcp.py (75%) create mode 100644 examples/mcp/agent_multi_mcp_connections.py rename examples/{tools => mcp}/mcp_examples/agent_mcp.py (100%) rename examples/{tools => mcp}/mcp_examples/agent_use/agent_tools_dict_example.py (100%) rename examples/{tools => mcp}/mcp_examples/agent_use/mcp_exampler.py (100%) rename examples/{tools => mcp}/mcp_examples/utils/find_tools_on_mcp.py (100%) rename examples/{tools => mcp}/mcp_examples/utils/mcp_execute_example.py (100%) rename examples/{tools => mcp}/mcp_examples/utils/mcp_load_tools_example.py (100%) rename examples/{tools => mcp}/mcp_examples/utils/mcp_multiserver_tool_fetch.py (100%) create mode 100644 examples/mcp/mcp_utils/mcp_client_call.py create mode 100644 examples/mcp/mcp_utils/mcp_multiple_servers_example.py rename examples/{tools/mcp_examples/servers => mcp/mcp_utils}/mcp_test.py (100%) rename examples/{tools/mcp_examples/servers => mcp/mcp_utils}/okx_crypto_server.py (100%) create mode 100644 examples/mcp/mcp_utils/test_multiple_mcp_servers.py rename examples/{tools/multii_tool_use => single_agent/tools}/new_tools_examples.py (96%) create mode 100644 examples/single_agent/tools/swarms_tools_example.py delete mode 100644 examples/single_agent/tools/tools_examples/swarms_tools_example.py rename examples/single_agent/{vision_examples => vision}/anthropic_vision_test.py (89%) rename examples/single_agent/{vision_examples => vision}/image.jpg (100%) rename examples/single_agent/{vision_examples => vision}/image_batch_example.py (100%) rename vision_and_tools.py => examples/single_agent/vision/vision_and_tools.py (100%) rename examples/single_agent/{vision_examples => vision}/vision_test.py (100%) rename agent_as_tools.py => examples/tools/agent_as_tools.py (100%) create mode 100644 swarms/utils/check_all_model_max_tokens.py diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3cf89799..827c2515 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,5 +1,15 @@ # Contribution Guidelines +
+ + Swarms Logo + +
+ +

+ The Enterprise-Grade Production-Ready Multi-Agent Orchestration Framework +

+ --- ## Table of Contents @@ -7,10 +17,12 @@ - [Project Overview](#project-overview) - [Getting Started](#getting-started) - [Installation](#installation) + - [Environment Configuration](#environment-configuration) - [Project Structure](#project-structure) - [How to Contribute](#how-to-contribute) - [Reporting Issues](#reporting-issues) - [Submitting Pull Requests](#submitting-pull-requests) + - [Good First Issues](#good-first-issues) - [Coding Standards](#coding-standards) - [Type Annotations](#type-annotations) - [Docstrings and Documentation](#docstrings-and-documentation) @@ -19,7 +31,13 @@ - [Areas Needing Contributions](#areas-needing-contributions) - [Writing Tests](#writing-tests) - [Improving Documentation](#improving-documentation) - - [Creating Training Scripts](#creating-training-scripts) + - [Adding New Swarm Architectures](#adding-new-swarm-architectures) + - [Enhancing Agent Capabilities](#enhancing-agent-capabilities) + - [Removing Defunct Code](#removing-defunct-code) +- [Development Resources](#development-resources) + - [Documentation](#documentation) + - [Examples and Tutorials](#examples-and-tutorials) + - [API Reference](#api-reference) - [Community and Support](#community-and-support) - [License](#license) @@ -27,16 +45,24 @@ ## Project Overview -**swarms** is a library focused on making it simple to orchestrate agents to automate real-world activities. The goal is to automate the world economy with these swarms of agents. +**Swarms** is an enterprise-grade, production-ready multi-agent orchestration framework focused on making it simple to orchestrate agents to automate real-world activities. The goal is to automate the world economy with these swarms of agents. -We need your help to: +### Key Features -- **Write Tests**: Ensure the reliability and correctness of the codebase. -- **Improve Documentation**: Maintain clear and comprehensive documentation. -- **Add New Orchestration Methods**: Add multi-agent orchestration methods -- **Removing Defunct Code**: Removing bad code +| Category | Features | Benefits | +|----------|----------|-----------| +| šŸ¢ Enterprise Architecture | • Production-Ready Infrastructure
• High Reliability Systems
• Modular Design
• Comprehensive Logging | • Reduced downtime
• Easier maintenance
• Better debugging
• Enhanced monitoring | +| šŸ¤– Agent Orchestration | • Hierarchical Swarms
• Parallel Processing
• Sequential Workflows
• Graph-based Workflows
• Dynamic Agent Rearrangement | • Complex task handling
• Improved performance
• Flexible workflows
• Optimized execution | +| šŸ”„ Integration Capabilities | • Multi-Model Support
• Custom Agent Creation
• Extensive Tool Library
• Multiple Memory Systems | • Provider flexibility
• Custom solutions
• Extended functionality
• Enhanced memory management | +### We Need Your Help To: +- **Write Tests**: Ensure the reliability and correctness of the codebase +- **Improve Documentation**: Maintain clear and comprehensive documentation +- **Add New Orchestration Methods**: Add multi-agent orchestration methods +- **Remove Defunct Code**: Clean up and remove bad code +- **Enhance Agent Capabilities**: Improve existing agents and add new ones +- **Optimize Performance**: Improve speed and efficiency of swarm operations Your contributions will help us push the boundaries of AI and make this library a valuable resource for the community. @@ -46,24 +72,65 @@ Your contributions will help us push the boundaries of AI and make this library ### Installation -You can install swarms using `pip`: +#### Using pip +```bash +pip3 install -U swarms +``` + +#### Using uv (Recommended) +[uv](https://github.com/astral-sh/uv) is a fast Python package installer and resolver, written in Rust. + +```bash +# Install uv +curl -LsSf https://astral.sh/uv/install.sh | sh + +# Install swarms using uv +uv pip install swarms +``` + +#### Using poetry +```bash +# Install poetry if you haven't already +curl -sSL https://install.python-poetry.org | python3 - + +# Add swarms to your project +poetry add swarms +``` +#### From source ```bash -pip3 install swarms +# Clone the repository +git clone https://github.com/kyegomez/swarms.git +cd swarms + +# Install with pip +pip install -e . ``` -Alternatively, you can clone the repository: +### Environment Configuration + +Create a `.env` file in your project root with the following variables: ```bash -git clone https://github.com/kyegomez/swarms +OPENAI_API_KEY="" +WORKSPACE_DIR="agent_workspace" +ANTHROPIC_API_KEY="" +GROQ_API_KEY="" ``` +- [Learn more about environment configuration here](https://docs.swarms.world/en/latest/swarms/install/env/) + ### Project Structure -- **`swarms/`**: Contains all the source code for the library. -- **`examples/`**: Includes example scripts and notebooks demonstrating how to use the library. -- **`tests/`**: (To be created) Will contain unit tests for the library. -- **`docs/`**: (To be maintained) Contains documentation files. +- **`swarms/`**: Contains all the source code for the library + - **`agents/`**: Agent implementations and base classes + - **`structs/`**: Swarm orchestration structures (SequentialWorkflow, AgentRearrange, etc.) + - **`tools/`**: Tool implementations and base classes + - **`prompts/`**: System prompts and prompt templates + - **`utils/`**: Utility functions and helpers +- **`examples/`**: Includes example scripts and notebooks demonstrating how to use the library +- **`tests/`**: Unit tests for the library +- **`docs/`**: Documentation files and guides --- @@ -79,6 +146,10 @@ If you find any bugs, inconsistencies, or have suggestions for enhancements, ple - **Description**: Detailed description, steps to reproduce, expected behavior, and any relevant logs or screenshots. 3. **Label Appropriately**: Use labels to categorize the issue (e.g., bug, enhancement, documentation). +**Issue Templates**: Use our issue templates for bug reports and feature requests: +- [Bug Report](https://github.com/kyegomez/swarms/issues/new?template=bug_report.md) +- [Feature Request](https://github.com/kyegomez/swarms/issues/new?template=feature_request.md) + ### Submitting Pull Requests We welcome pull requests (PRs) for bug fixes, improvements, and new features. Please follow these guidelines: @@ -88,6 +159,7 @@ We welcome pull requests (PRs) for bug fixes, improvements, and new features. Pl ```bash git clone https://github.com/kyegomez/swarms.git + cd swarms ``` 3. **Create a New Branch**: Use a descriptive branch name. @@ -121,6 +193,13 @@ We welcome pull requests (PRs) for bug fixes, improvements, and new features. Pl **Note**: It's recommended to create small and focused PRs for easier review and faster integration. +### Good First Issues + +The easiest way to contribute is to pick any issue with the `good first issue` tag šŸ’Ŗ. These are specifically designed for new contributors: + +- [Good First Issues](https://github.com/kyegomez/swarms/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) +- [Contributing Board](https://github.com/users/kyegomez/projects/1) - Participate in Roadmap discussions! + --- ## Coding Standards @@ -204,6 +283,7 @@ We have several areas where contributions are particularly welcome. - Write unit tests for existing code in `swarms/`. - Identify edge cases and potential failure points. - Ensure tests are repeatable and independent. + - Add integration tests for swarm orchestration methods. ### Improving Documentation @@ -212,27 +292,113 @@ We have several areas where contributions are particularly welcome. - Update docstrings to reflect any changes. - Add examples and tutorials in the `examples/` directory. - Improve or expand the content in the `docs/` directory. + - Create video tutorials and walkthroughs. + +### Adding New Swarm Architectures + +- **Goal**: Provide new multi-agent orchestration methods. +- **Current Architectures**: + - [SequentialWorkflow](https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/) + - [AgentRearrange](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/) + - [MixtureOfAgents](https://docs.swarms.world/en/latest/swarms/structs/moa/) + - [SpreadSheetSwarm](https://docs.swarms.world/en/latest/swarms/structs/spreadsheet_swarm/) + - [ForestSwarm](https://docs.swarms.world/en/latest/swarms/structs/forest_swarm/) + - [GraphWorkflow](https://docs.swarms.world/en/latest/swarms/structs/graph_swarm/) + - [GroupChat](https://docs.swarms.world/en/latest/swarms/structs/group_chat/) + - [SwarmRouter](https://docs.swarms.world/en/latest/swarms/structs/swarm_router/) + +### Enhancing Agent Capabilities + +- **Goal**: Improve existing agents and add new specialized agents. +- **Areas of Focus**: + - Financial analysis agents + - Medical diagnosis agents + - Code generation and review agents + - Research and analysis agents + - Creative content generation agents + +### Removing Defunct Code + +- **Goal**: Clean up and remove bad code to improve maintainability. +- **Tasks**: + - Identify unused or deprecated code. + - Remove duplicate implementations. + - Simplify complex functions. + - Update outdated dependencies. + +--- + +## Development Resources + +### Documentation + +- **Official Documentation**: [docs.swarms.world](https://docs.swarms.world) +- **Installation Guide**: [Installation](https://docs.swarms.world/en/latest/swarms/install/install/) +- **Quickstart Guide**: [Get Started](https://docs.swarms.world/en/latest/swarms/install/quickstart/) +- **Agent Architecture**: [Agent Internal Mechanisms](https://docs.swarms.world/en/latest/swarms/framework/agents_explained/) +- **Agent API**: [Agent API](https://docs.swarms.world/en/latest/swarms/structs/agent/) + +### Examples and Tutorials + +- **Basic Examples**: [examples/](https://github.com/kyegomez/swarms/tree/master/examples) +- **Agent Examples**: [examples/single_agent/](https://github.com/kyegomez/swarms/tree/master/examples/single_agent) +- **Multi-Agent Examples**: [examples/multi_agent/](https://github.com/kyegomez/swarms/tree/master/examples/multi_agent) +- **Tool Examples**: [examples/tools/](https://github.com/kyegomez/swarms/tree/master/examples/tools) -### Creating Multi-Agent Orchestration Methods +### API Reference -- **Goal**: Provide new multi-agent orchestration methods +- **Core Classes**: [swarms/structs/](https://github.com/kyegomez/swarms/tree/master/swarms/structs) +- **Agent Implementations**: [swarms/agents/](https://github.com/kyegomez/swarms/tree/master/swarms/agents) +- **Tool Implementations**: [swarms/tools/](https://github.com/kyegomez/swarms/tree/master/swarms/tools) +- **Utility Functions**: [swarms/utils/](https://github.com/kyegomez/swarms/tree/master/swarms/utils) --- ## Community and Support +### Connect With Us + +| Platform | Link | Description | +|----------|------|-------------| +| šŸ“š Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides | +| šŸ“ Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles | +| šŸ’¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support | +| 🐦 Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements | +| šŸ‘„ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates | +| šŸ“ŗ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos | +| šŸŽ« Events | [Sign up here](https://lu.ma/5p2jnc2v) | Join our community events | + +### Onboarding Session + +Get onboarded with the creator and lead maintainer of Swarms, Kye Gomez, who will show you how to get started with the installation, usage examples, and starting to build your custom use case! [CLICK HERE](https://cal.com/swarms/swarms-onboarding-session) + +### Community Guidelines + - **Communication**: Engage with the community by participating in discussions on issues and pull requests. - **Respect**: Maintain a respectful and inclusive environment. - **Feedback**: Be open to receiving and providing constructive feedback. +- **Collaboration**: Work together to improve the project for everyone. --- ## License -By contributing to swarms, you agree that your contributions will be licensed under the [MIT License](LICENSE). +By contributing to swarms, you agree that your contributions will be licensed under the [Apache License](LICENSE). + +--- + +## Citation + +If you use **swarms** in your research, please cite the project by referencing the metadata in [CITATION.cff](./CITATION.cff). --- Thank you for contributing to swarms! Your efforts help make this project better for everyone. -If you have any questions or need assistance, please feel free to open an issue or reach out to the maintainers. \ No newline at end of file +If you have any questions or need assistance, please feel free to: +- Open an issue on GitHub +- Join our Discord community +- Reach out to the maintainers +- Schedule an onboarding session + +**Happy contributing! šŸš€** \ No newline at end of file diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 9b1a95e8..3440933d 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -236,10 +236,10 @@ nav: - SpreadSheetSwarm: "swarms/structs/spreadsheet_swarm.md" - ForestSwarm: "swarms/structs/forest_swarm.md" - SwarmRouter: "swarms/structs/swarm_router.md" - - TaskQueueSwarm: "swarms/structs/taskqueue_swarm.md" + # - TaskQueueSwarm: "swarms/structs/taskqueue_swarm.md" - SwarmRearrange: "swarms/structs/swarm_rearrange.md" - MultiAgentRouter: "swarms/structs/multi_agent_router.md" - - MatrixSwarm: "swarms/structs/matrix_swarm.md" + # - MatrixSwarm: "swarms/structs/matrix_swarm.md" - ModelRouter: "swarms/structs/model_router.md" - MALT: "swarms/structs/malt.md" - Interactive Group Chat: "swarms/structs/interactive_groupchat.md" @@ -308,6 +308,7 @@ nav: - Examples: - Overview: "examples/index.md" - CookBook Index: "examples/cookbook_index.md" + - PreBuilt Templates: "examples/templates_index.md" - Customizing Agents: - Basic Agent: "swarms/examples/basic_agent.md" - Agents with Callable Tools: "swarms/examples/agent_with_tools.md" diff --git a/docs/swarms/examples/templates_index.md b/docs/swarms/examples/templates_index.md new file mode 100644 index 00000000..fd64d448 --- /dev/null +++ b/docs/swarms/examples/templates_index.md @@ -0,0 +1,72 @@ +# The Swarms Index + +The Swarms Index is a comprehensive catalog of repositories under The Swarm Corporation, showcasing a wide array of tools, frameworks, and templates designed for building, deploying, and managing autonomous AI agents and multi-agent systems. These repositories focus on enterprise-grade solutions, spanning industries like healthcare, finance, marketing, and more, with an emphasis on scalability, security, and performance. Many repositories include templates to help developers quickly set up production-ready applications. + +| Name | Description | Link | +|------|-------------|------| +| Phala-Deployment-Template | A guide and template for running Swarms Agents in a Trusted Execution Environment (TEE) using Phala Cloud, ensuring secure and isolated execution. | [https://github.com/The-Swarm-Corporation/Phala-Deployment-Template](https://github.com/The-Swarm-Corporation/Phala-Deployment-Template) | +| Swarms-API-Status-Page | A status page for monitoring the health and performance of the Swarms API. | [https://github.com/The-Swarm-Corporation/Swarms-API-Status-Page](https://github.com/The-Swarm-Corporation/Swarms-API-Status-Page) | +| Swarms-API-Phala-Template | A deployment solution template for running Swarms API on Phala Cloud, optimized for secure and scalable agent orchestration. | [https://github.com/The-Swarm-Corporation/Swarms-API-Phala-Template](https://github.com/The-Swarm-Corporation/Swarms-API-Phala-Template) | +| DevSwarm | Develop production-grade applications effortlessly with a single prompt, powered by a swarm of v0-driven autonomous agents operating 24/7 for fully autonomous software development. | [https://github.com/The-Swarm-Corporation/DevSwarm](https://github.com/The-Swarm-Corporation/DevSwarm) | +| Enterprise-Grade-Agents-Course | A comprehensive course teaching students to build, deploy, and manage autonomous agents for enterprise workflows using the Swarms library, focusing on scalability and integration. | [https://github.com/The-Swarm-Corporation/Enterprise-Grade-Agents-Course](https://github.com/The-Swarm-Corporation/Enterprise-Grade-Agents-Course) | +| agentverse | A collection of agents from top frameworks like Langchain, Griptape, and CrewAI, integrated into the Swarms ecosystem. | [https://github.com/The-Swarm-Corporation/agentverse](https://github.com/The-Swarm-Corporation/agentverse) | +| InsuranceSwarm | A swarm of agents to automate document processing and fraud detection in insurance claims. | [https://github.com/The-Swarm-Corporation/InsuranceSwarm](https://github.com/The-Swarm-Corporation/InsuranceSwarm) | +| swarms-examples | A vast array of examples for enterprise-grade and production-ready applications using the Swarms framework. | [https://github.com/The-Swarm-Corporation/swarms-examples](https://github.com/The-Swarm-Corporation/swarms-examples) | +| auto-ai-research-team | Automates AI research at an OpenAI level to accelerate innovation using swarms of agents. | [https://github.com/The-Swarm-Corporation/auto-ai-research-team](https://github.com/The-Swarm-Corporation/auto-ai-research-team) | +| Agents-Beginner-Guide | A definitive beginner's guide to AI agents and multi-agent systems, explaining fundamentals and industry applications. | [https://github.com/The-Swarm-Corporation/Agents-Beginner-Guide](https://github.com/The-Swarm-Corporation/Agents-Beginner-Guide) | +| Solana-Ecosystem-MCP | A collection of Solana tools wrapped in MCP servers for blockchain development. | [https://github.com/The-Swarm-Corporation/Solana-Ecosystem-MCP](https://github.com/The-Swarm-Corporation/Solana-Ecosystem-MCP) | +| automated-crypto-fund | A fully automated crypto fund leveraging swarms of LLM agents for real-money trading. | [https://github.com/The-Swarm-Corporation/automated-crypto-fund](https://github.com/The-Swarm-Corporation/automated-crypto-fund) | +| Mryaid | The first multi-agent social media platform powered by Swarms. | [https://github.com/The-Swarm-Corporation/Mryaid](https://github.com/The-Swarm-Corporation/Mryaid) | +| pharma-swarm | A swarm of autonomous agents for chemical analysis in the pharmaceutical industry. | [https://github.com/The-Swarm-Corporation/pharma-swarm](https://github.com/The-Swarm-Corporation/pharma-swarm) | +| Automated-Prompt-Engineering-Hub | A hub for tools and resources focused on automated prompt engineering for generative AI. | [https://github.com/The-Swarm-Corporation/Automated-Prompt-Engineering-Hub](https://github.com/The-Swarm-Corporation/Automated-Prompt-Engineering-Hub) | +| Multi-Agent-Template-App | A simple, reliable, and high-performance template for building multi-agent applications. | [https://github.com/The-Swarm-Corporation/Multi-Agent-Template-App](https://github.com/The-Swarm-Corporation/Multi-Agent-Template-App) | +| Cookbook | Examples and guides for using the Swarms Framework effectively. | [https://github.com/The-Swarm-Corporation/Cookbook](https://github.com/The-Swarm-Corporation/Cookbook) | +| SwarmDB | A production-grade message queue system for agent communication and LLM backend load balancing. | [https://github.com/The-Swarm-Corporation/SwarmDB](https://github.com/The-Swarm-Corporation/SwarmDB) | +| CryptoTaxSwarm | A personal advisory tax swarm for cryptocurrency transactions. | [https://github.com/The-Swarm-Corporation/CryptoTaxSwarm](https://github.com/The-Swarm-Corporation/CryptoTaxSwarm) | +| Multi-Agent-Marketing-Course | A course on automating marketing operations with enterprise-grade multi-agent collaboration. | [https://github.com/The-Swarm-Corporation/Multi-Agent-Marketing-Course](https://github.com/The-Swarm-Corporation/Multi-Agent-Marketing-Course) | +| Swarms-BrandBook | Branding guidelines and assets for Swarms.ai, embodying innovation and collaboration. | [https://github.com/The-Swarm-Corporation/Swarms-BrandBook](https://github.com/The-Swarm-Corporation/Swarms-BrandBook) | +| AgentAPI | A definitive API for managing and interacting with AI agents. | [https://github.com/The-Swarm-Corporation/AgentAPI](https://github.com/The-Swarm-Corporation/AgentAPI) | +| Research-Paper-Writer-Swarm | Automates the creation of high-quality research papers in LaTeX using Swarms agents. | [https://github.com/The-Swarm-Corporation/Research-Paper-Writer-Swarm](https://github.com/The-Swarm-Corporation/Research-Paper-Writer-Swarm) | +| swarms-sdk | A Python client for the Swarms API, providing a simple interface for managing AI swarms. | [https://github.com/The-Swarm-Corporation/swarms-sdk](https://github.com/The-Swarm-Corporation/swarms-sdk) | +| FluidAPI | A framework for interacting with APIs using natural language, simplifying complex requests. | [https://github.com/The-Swarm-Corporation/FluidAPI](https://github.com/The-Swarm-Corporation/FluidAPI) | +| MedicalCoderSwarm | A multi-agent system for comprehensive medical diagnosis and coding using specialized AI agents. | [https://github.com/The-Swarm-Corporation/MedicalCoderSwarm](https://github.com/The-Swarm-Corporation/MedicalCoderSwarm) | +| BackTesterAgent | An AI-powered backtesting framework for automated trading strategy validation and optimization. | [https://github.com/The-Swarm-Corporation/BackTesterAgent](https://github.com/The-Swarm-Corporation/BackTesterAgent) | +| .ai | The first natural language programming language powered by Swarms. | [https://github.com/The-Swarm-Corporation/.ai](https://github.com/The-Swarm-Corporation/.ai) | +| AutoHedge | An autonomous hedge fund leveraging swarm intelligence for market analysis and trade execution. | [https://github.com/The-Swarm-Corporation/AutoHedge](https://github.com/The-Swarm-Corporation/AutoHedge) | +| radiology-swarm | A multi-agent system for advanced radiological analysis, diagnosis, and treatment planning. | [https://github.com/The-Swarm-Corporation/radiology-swarm](https://github.com/The-Swarm-Corporation/radiology-swarm) | +| MedGuard | A Python library ensuring HIPAA compliance for LLM agents in healthcare applications. | [https://github.com/The-Swarm-Corporation/MedGuard](https://github.com/The-Swarm-Corporation/MedGuard) | +| doc-master | A lightweight Python library for automated file reading and content extraction. | [https://github.com/The-Swarm-Corporation/doc-master](https://github.com/The-Swarm-Corporation/doc-master) | +| Open-Aladdin | An open-source risk-management tool for stock and security risk analysis. | [https://github.com/The-Swarm-Corporation/Open-Aladdin](https://github.com/The-Swarm-Corporation/Open-Aladdin) | +| TickrAgent | A scalable Python library for building financial agents for comprehensive stock analysis. | [https://github.com/The-Swarm-Corporation/TickrAgent](https://github.com/The-Swarm-Corporation/TickrAgent) | +| NewsAgent | An enterprise-grade news aggregation agent for fetching, querying, and summarizing news. | [https://github.com/The-Swarm-Corporation/NewsAgent](https://github.com/The-Swarm-Corporation/NewsAgent) | +| Research-Paper-Hive | A platform for discovering and engaging with relevant research papers efficiently. | [https://github.com/The-Swarm-Corporation/Research-Paper-Hive](https://github.com/The-Swarm-Corporation/Research-Paper-Hive) | +| MedInsight-Pro | Revolutionizes medical research summarization for healthcare innovators. | [https://github.com/The-Swarm-Corporation/MedInsight-Pro](https://github.com/The-Swarm-Corporation/MedInsight-Pro) | +| swarms-memory | Pre-built wrappers for RAG systems like ChromaDB, Weaviate, and Pinecone. | [https://github.com/The-Swarm-Corporation/swarms-memory](https://github.com/The-Swarm-Corporation/swarms-memory) | +| CryptoAgent | An enterprise-grade solution for fetching, analyzing, and summarizing cryptocurrency data. | [https://github.com/The-Swarm-Corporation/CryptoAgent](https://github.com/The-Swarm-Corporation/CryptoAgent) | +| AgentParse | A high-performance parsing library for mapping structured data into agent-understandable blocks. | [https://github.com/The-Swarm-Corporation/AgentParse](https://github.com/The-Swarm-Corporation/AgentParse) | +| CodeGuardian | An intelligent agent for automating the generation of production-grade unit tests for Python code. | [https://github.com/The-Swarm-Corporation/CodeGuardian](https://github.com/The-Swarm-Corporation/CodeGuardian) | +| Marketing-Swarm-Template | A framework for creating multi-platform marketing content using Swarms AI agents. | [https://github.com/The-Swarm-Corporation/Marketing-Swarm-Template](https://github.com/The-Swarm-Corporation/Marketing-Swarm-Template) | +| HTX-Swarm | A multi-agent system for real-time market analysis of HTX exchange data. | [https://github.com/The-Swarm-Corporation/HTX-Swarm](https://github.com/The-Swarm-Corporation/HTX-Swarm) | +| MultiModelOptimizer | A hierarchical parameter synchronization approach for joint training of transformer models. | [https://github.com/The-Swarm-Corporation/MultiModelOptimizer](https://github.com/The-Swarm-Corporation/MultiModelOptimizer) | +| MortgageUnderwritingSwarm | A multi-agent pipeline for automating mortgage underwriting processes. | [https://github.com/The-Swarm-Corporation/MortgageUnderwritingSwarm](https://github.com/The-Swarm-Corporation/MortgageUnderwritingSwarm) | +| DermaSwarm | A multi-agent system for dermatologists to diagnose and treat skin conditions collaboratively. | [https://github.com/The-Swarm-Corporation/DermaSwarm](https://github.com/The-Swarm-Corporation/DermaSwarm) | +| IoTAgents | Integrates IoT data with AI agents for seamless parsing and processing of data streams. | [https://github.com/The-Swarm-Corporation/IoTAgents](https://github.com/The-Swarm-Corporation/IoTAgents) | +| eth-agent | An autonomous agent for analyzing on-chain Ethereum data. | [https://github.com/The-Swarm-Corporation/eth-agent](https://github.com/The-Swarm-Corporation/eth-agent) | +| Medical-Swarm-One-Click | A template for building safe, reliable, and production-grade medical multi-agent systems. | [https://github.com/The-Swarm-Corporation/Medical-Swarm-One-Click](https://github.com/The-Swarm-Corporation/Medical-Swarm-One-Click) | +| Swarms-Example-1-Click-Template | A one-click template for building Swarms applications quickly. | [https://github.com/The-Swarm-Corporation/Swarms-Example-1-Click-Template](https://github.com/The-Swarm-Corporation/Swarms-Example-1-Click-Template) | +| Custom-Swarms-Spec-Template | An official specification template for custom swarm development using the Swarms Framework. | [https://github.com/The-Swarm-Corporation/Custom-Swarms-Spec-Template](https://github.com/The-Swarm-Corporation/Custom-Swarms-Spec-Template) | +| Swarms-LlamaIndex-RAG-Template | A template for integrating Llama Index into Swarms applications for RAG capabilities. | [https://github.com/The-Swarm-Corporation/Swarms-LlamaIndex-RAG-Template](https://github.com/The-Swarm-Corporation/Swarms-LlamaIndex-RAG-Template) | +| ForexTreeSwarm | A forex market analysis system using a swarm of AI agents organized in a forest structure. | [https://github.com/The-Swarm-Corporation/ForexTreeSwarm](https://github.com/The-Swarm-Corporation/ForexTreeSwarm) | +| Generalist-Mathematician-Swarm | A swarm of agents for solving complex mathematical problems collaboratively. | [https://github.com/The-Swarm-Corporation/Generalist-Mathematician-Swarm](https://github.com/The-Swarm-Corporation/Generalist-Mathematician-Swarm) | +| Multi-Modal-XRAY-Diagnosis-Medical-Swarm-Template | A template for analyzing X-rays, MRIs, and more using a swarm of agents. | [https://github.com/The-Swarm-Corporation/Multi-Modal-XRAY-Diagnosis-Medical-Swarm-Template](https://github.com/The-Swarm-Corporation/Multi-Modal-XRAY-Diagnosis-Medical-Swarm-Template) | +| AgentRAGProtocol | A protocol for integrating Retrieval-Augmented Generation (RAG) into AI agents. | [https://github.com/The-Swarm-Corporation/AgentRAGProtocol](https://github.com/The-Swarm-Corporation/AgentRAGProtocol) | +| Multi-Agent-RAG-Template | A template for creating collaborative AI agent teams for document processing and analysis. | [https://github.com/The-Swarm-Corporation/Multi-Agent-RAG-Template](https://github.com/The-Swarm-Corporation/Multi-Agent-RAG-Template) | +| REACT-Yaml-Agent | An implementation of a REACT agent using YAML instead of JSON. | [https://github.com/The-Swarm-Corporation/REACT-Yaml-Agent](https://github.com/The-Swarm-Corporation/REACT-Yaml-Agent) | +| SwarmsXGCP | A template for deploying Swarms agents on Google Cloud Run. | [https://github.com/The-Swarm-Corporation/SwarmsXGCP](https://github.com/The-Swarm-Corporation/SwarmsXGCP) | +| Legal-Swarm-Template | A one-click template for building legal-focused Swarms applications. | [https://github.com/The-Swarm-Corporation/Legal-Swarm-Template](https://github.com/The-Swarm-Corporation/Legal-Swarm-Template) | +| swarms_sim | A simulation of a swarm of agents in a professional workplace environment. | [https://github.com/The-Swarm-Corporation/swarms_sim](https://github.com/The-Swarm-Corporation/swarms_sim) | +| medical-problems | A repository for medical problems to create Swarms applications for. | [https://github.com/The-Swarm-Corporation/medical-problems](https://github.com/The-Swarm-Corporation/medical-problems) | +| swarm-ecosystem | An overview of the Swarm Ecosystem and its components. | [https://github.com/The-Swarm-Corporation/swarm-ecosystem](https://github.com/The-Swarm-Corporation/swarm-ecosystem) | +| swarms_ecosystem_md | MDX documentation for the Swarm Ecosystem. | [https://github.com/The-Swarm-Corporation/swarms_ecosystem_md](https://github.com/The-Swarm-Corporation/swarms_ecosystem_md) | + + diff --git a/example.py b/example.py index 35ef36a4..34ed764e 100644 --- a/example.py +++ b/example.py @@ -38,7 +38,8 @@ agent = Agent( model_name="gpt-4o-mini", dynamic_temperature_enabled=True, output_type="all", - safety_prompt_on=True, + max_tokens=16384, + # dashboard=True ) out = agent.run("What are the best top 3 etfs for gold coverage?") diff --git a/examples/tools/mcp_examples/agent_use/agent_mcp.py b/examples/mcp/agent_mcp.py similarity index 75% rename from examples/tools/mcp_examples/agent_use/agent_mcp.py rename to examples/mcp/agent_mcp.py index 6307790c..13ab9bff 100644 --- a/examples/tools/mcp_examples/agent_use/agent_mcp.py +++ b/examples/mcp/agent_mcp.py @@ -11,11 +11,13 @@ agent = Agent( system_prompt=FINANCIAL_AGENT_SYS_PROMPT, max_loops=1, mcp_url="http://0.0.0.0:8000/sse", + model_name="gpt-4o-mini", + output_type="all", ) # Create a markdown file with initial content out = agent.run( - "Use any of the tools available to you", + "Use the get_okx_crypto_volume to get the volume of BTC just put the name of the coin", ) print(out) diff --git a/examples/mcp/agent_multi_mcp_connections.py b/examples/mcp/agent_multi_mcp_connections.py new file mode 100644 index 00000000..46e22cbc --- /dev/null +++ b/examples/mcp/agent_multi_mcp_connections.py @@ -0,0 +1,49 @@ +from swarms import Agent + +# Initialize the agent +agent = Agent( + agent_name="Quantitative-Trading-Agent", + agent_description="Advanced quantitative trading and algorithmic analysis agent", + system_prompt=""" + You are an expert quantitative trading agent with deep expertise in: + - Algorithmic trading strategies and implementation + - Statistical arbitrage and market making + - Risk management and portfolio optimization + - High-frequency trading systems + - Market microstructure analysis + - Quantitative research methodologies + - Financial mathematics and stochastic processes + - Machine learning applications in trading + + Your core responsibilities include: + 1. Developing and backtesting trading strategies + 2. Analyzing market data and identifying alpha opportunities + 3. Implementing risk management frameworks + 4. Optimizing portfolio allocations + 5. Conducting quantitative research + 6. Monitoring market microstructure + 7. Evaluating trading system performance + + You maintain strict adherence to: + - Mathematical rigor in all analyses + - Statistical significance in strategy development + - Risk-adjusted return optimization + - Market impact minimization + - Regulatory compliance + - Transaction cost analysis + - Performance attribution + + You communicate in precise, technical terms while maintaining clarity for stakeholders.""", + max_loops=1, + model_name="gpt-4o-mini", + dynamic_temperature_enabled=True, + output_type="all", + mcp_urls=[ + "http://0.0.0.0:8000/sse", + "http://0.0.0.0:8001/sse", + ], +) + +agent.run( + "Please use the get_okx_crypto_volume tool to get the trading volume for Bitcoin (BTC). Provide the volume information." +) diff --git a/examples/tools/mcp_examples/agent_mcp.py b/examples/mcp/mcp_examples/agent_mcp.py similarity index 100% rename from examples/tools/mcp_examples/agent_mcp.py rename to examples/mcp/mcp_examples/agent_mcp.py diff --git a/examples/tools/mcp_examples/agent_use/agent_tools_dict_example.py b/examples/mcp/mcp_examples/agent_use/agent_tools_dict_example.py similarity index 100% rename from examples/tools/mcp_examples/agent_use/agent_tools_dict_example.py rename to examples/mcp/mcp_examples/agent_use/agent_tools_dict_example.py diff --git a/examples/tools/mcp_examples/agent_use/mcp_exampler.py b/examples/mcp/mcp_examples/agent_use/mcp_exampler.py similarity index 100% rename from examples/tools/mcp_examples/agent_use/mcp_exampler.py rename to examples/mcp/mcp_examples/agent_use/mcp_exampler.py diff --git a/examples/tools/mcp_examples/utils/find_tools_on_mcp.py b/examples/mcp/mcp_examples/utils/find_tools_on_mcp.py similarity index 100% rename from examples/tools/mcp_examples/utils/find_tools_on_mcp.py rename to examples/mcp/mcp_examples/utils/find_tools_on_mcp.py diff --git a/examples/tools/mcp_examples/utils/mcp_execute_example.py b/examples/mcp/mcp_examples/utils/mcp_execute_example.py similarity index 100% rename from examples/tools/mcp_examples/utils/mcp_execute_example.py rename to examples/mcp/mcp_examples/utils/mcp_execute_example.py diff --git a/examples/tools/mcp_examples/utils/mcp_load_tools_example.py b/examples/mcp/mcp_examples/utils/mcp_load_tools_example.py similarity index 100% rename from examples/tools/mcp_examples/utils/mcp_load_tools_example.py rename to examples/mcp/mcp_examples/utils/mcp_load_tools_example.py diff --git a/examples/tools/mcp_examples/utils/mcp_multiserver_tool_fetch.py b/examples/mcp/mcp_examples/utils/mcp_multiserver_tool_fetch.py similarity index 100% rename from examples/tools/mcp_examples/utils/mcp_multiserver_tool_fetch.py rename to examples/mcp/mcp_examples/utils/mcp_multiserver_tool_fetch.py diff --git a/examples/mcp/mcp_utils/mcp_client_call.py b/examples/mcp/mcp_utils/mcp_client_call.py new file mode 100644 index 00000000..caa969a3 --- /dev/null +++ b/examples/mcp/mcp_utils/mcp_client_call.py @@ -0,0 +1,12 @@ +from swarms.tools.mcp_client_call import ( + get_mcp_tools_sync, + execute_tool_call_simple, +) + +tools = get_mcp_tools_sync() + +print(tools) + +result = execute_tool_call_simple(tools[0], "Hello, world!") + +print(result) diff --git a/examples/mcp/mcp_utils/mcp_multiple_servers_example.py b/examples/mcp/mcp_utils/mcp_multiple_servers_example.py new file mode 100644 index 00000000..5ca4304d --- /dev/null +++ b/examples/mcp/mcp_utils/mcp_multiple_servers_example.py @@ -0,0 +1,234 @@ +""" +Example demonstrating how to execute multiple tools across multiple MCP servers. + +This example shows how to: +1. Create a mapping of function names to servers +2. Execute multiple tool calls across different servers +3. Handle responses with tool calls and route them to the appropriate servers +""" + +import asyncio +from swarms.tools.mcp_client_call import ( + execute_multiple_tools_on_multiple_mcp_servers, + execute_multiple_tools_on_multiple_mcp_servers_sync, + get_tools_for_multiple_mcp_servers, +) +from swarms.schemas.mcp_schemas import MCPConnection + + +def example_sync_execution(): + """Example of synchronous execution across multiple MCP servers.""" + + # Example server URLs (replace with your actual MCP server URLs) + urls = [ + "http://localhost:8000/sse", # Server 1 + "http://localhost:8001/sse", # Server 2 + "http://localhost:8002/sse", # Server 3 + ] + + # Optional: Create connection objects for each server + connections = [ + MCPConnection( + url="http://localhost:8000/sse", + authorization_token="token1", # if needed + timeout=10, + ), + MCPConnection( + url="http://localhost:8001/sse", + authorization_token="token2", # if needed + timeout=10, + ), + MCPConnection( + url="http://localhost:8002/sse", + authorization_token="token3", # if needed + timeout=10, + ), + ] + + # Example responses containing tool calls + # These would typically come from an LLM that decided to use tools + responses = [ + { + "function": { + "name": "search_web", + "arguments": { + "query": "python programming best practices" + }, + } + }, + { + "function": { + "name": "search_database", + "arguments": {"table": "users", "id": 123}, + } + }, + { + "function": { + "name": "send_email", + "arguments": { + "to": "user@example.com", + "subject": "Test email", + "body": "This is a test email", + }, + } + }, + ] + + print("=== Synchronous Execution Example ===") + print( + f"Executing {len(responses)} tool calls across {len(urls)} servers..." + ) + + try: + # Execute all tool calls across multiple servers + results = execute_multiple_tools_on_multiple_mcp_servers_sync( + responses=responses, + urls=urls, + connections=connections, + output_type="dict", + max_concurrent=5, # Limit concurrent executions + ) + + print(f"\nExecution completed! Got {len(results)} results:") + for i, result in enumerate(results): + print(f"\nResult {i + 1}:") + print(f" Function: {result['function_name']}") + print(f" Server: {result['server_url']}") + print(f" Status: {result['status']}") + if result["status"] == "success": + print(f" Result: {result['result']}") + else: + print( + f" Error: {result.get('error', 'Unknown error')}" + ) + + except Exception as e: + print(f"Error during execution: {str(e)}") + + +async def example_async_execution(): + """Example of asynchronous execution across multiple MCP servers.""" + + # Example server URLs + urls = [ + "http://localhost:8000/sse", + "http://localhost:8001/sse", + "http://localhost:8002/sse", + ] + + # Example responses with multiple tool calls in a single response + responses = [ + { + "tool_calls": [ + { + "function": { + "name": "search_web", + "arguments": { + "query": "machine learning trends 2024" + }, + } + }, + { + "function": { + "name": "search_database", + "arguments": { + "table": "articles", + "category": "AI", + }, + } + }, + ] + }, + { + "function": { + "name": "send_notification", + "arguments": { + "user_id": 456, + "message": "Your analysis is complete", + }, + } + }, + ] + + print("\n=== Asynchronous Execution Example ===") + print( + f"Executing tool calls across {len(urls)} servers asynchronously..." + ) + + try: + # Execute all tool calls across multiple servers + results = ( + await execute_multiple_tools_on_multiple_mcp_servers( + responses=responses, + urls=urls, + output_type="str", + max_concurrent=3, + ) + ) + + print( + f"\nAsync execution completed! Got {len(results)} results:" + ) + for i, result in enumerate(results): + print(f"\nResult {i + 1}:") + print(f" Response Index: {result['response_index']}") + print(f" Function: {result['function_name']}") + print(f" Server: {result['server_url']}") + print(f" Status: {result['status']}") + if result["status"] == "success": + print(f" Result: {result['result']}") + else: + print( + f" Error: {result.get('error', 'Unknown error')}" + ) + + except Exception as e: + print(f"Error during async execution: {str(e)}") + + +def example_get_tools_from_multiple_servers(): + """Example of getting tools from multiple servers.""" + + urls = [ + "http://localhost:8000/sse", + "http://localhost:8001/sse", + "http://localhost:8002/sse", + ] + + print("\n=== Getting Tools from Multiple Servers ===") + + try: + # Get all available tools from all servers + all_tools = get_tools_for_multiple_mcp_servers( + urls=urls, format="openai", output_type="dict" + ) + + print( + f"Found {len(all_tools)} total tools across all servers:" + ) + + # Group tools by function name to see what's available + function_names = set() + for tool in all_tools: + if isinstance(tool, dict) and "function" in tool: + function_names.add(tool["function"]["name"]) + elif hasattr(tool, "name"): + function_names.add(tool.name) + + print("Available functions:") + for func_name in sorted(function_names): + print(f" - {func_name}") + + except Exception as e: + print(f"Error getting tools: {str(e)}") + + +if __name__ == "__main__": + # Run synchronous example + example_sync_execution() + + # Run async example + asyncio.run(example_async_execution()) + + # Get tools from multiple servers + example_get_tools_from_multiple_servers() diff --git a/examples/tools/mcp_examples/servers/mcp_test.py b/examples/mcp/mcp_utils/mcp_test.py similarity index 100% rename from examples/tools/mcp_examples/servers/mcp_test.py rename to examples/mcp/mcp_utils/mcp_test.py diff --git a/examples/tools/mcp_examples/servers/okx_crypto_server.py b/examples/mcp/mcp_utils/okx_crypto_server.py similarity index 100% rename from examples/tools/mcp_examples/servers/okx_crypto_server.py rename to examples/mcp/mcp_utils/okx_crypto_server.py diff --git a/examples/mcp/mcp_utils/test_multiple_mcp_servers.py b/examples/mcp/mcp_utils/test_multiple_mcp_servers.py new file mode 100644 index 00000000..401a00a7 --- /dev/null +++ b/examples/mcp/mcp_utils/test_multiple_mcp_servers.py @@ -0,0 +1,54 @@ +""" +Simple test for the execute_multiple_tools_on_multiple_mcp_servers functionality. +""" + +from swarms.tools.mcp_client_call import ( + execute_multiple_tools_on_multiple_mcp_servers_sync, +) + + +def test_async_multiple_tools_execution(): + """Test the async multiple tools execution function structure.""" + print( + "\nTesting async multiple tools execution function structure..." + ) + + urls = [ + "http://localhost:8000/sse", + "http://localhost:8001/sse", + ] + + # Mock responses with multiple tool calls + responses = [ + { + "tool_calls": [ + { + "function": { + "name": "get_okx_crypto_price", + "arguments": {"symbol": "SOL-USDT"}, + } + }, + { + "function": { + "name": "get_crypto_price", + "arguments": {"coin_id": "solana"}, + } + }, + ] + } + ] + + try: + # This will likely fail to connect, but we can test the function structure + results = execute_multiple_tools_on_multiple_mcp_servers_sync( + responses=responses, urls=urls + ) + print(f"Got {len(results)} results") + print(results) + except Exception as e: + print(f"Expected error (no servers running): {str(e)}") + print("Async function structure is working correctly!") + + +if __name__ == "__main__": + test_async_multiple_tools_execution() diff --git a/examples/models/reasoning_duo_batched.py b/examples/models/reasoning_duo_batched.py index 4d75c66f..9d9ca044 100644 --- a/examples/models/reasoning_duo_batched.py +++ b/examples/models/reasoning_duo_batched.py @@ -16,4 +16,4 @@ if __name__ == "__main__": # Run the batch once and print each result results = duo.batched_run(tasks) for task, output in zip(tasks, results): - print(f"Task: {task}\nResult: {output}\n") \ No newline at end of file + print(f"Task: {task}\nResult: {output}\n") diff --git a/examples/multi_agent/mar/multi_agent_router_minimal.py b/examples/multi_agent/mar/multi_agent_router_minimal.py index 898ccef3..d72ceea6 100644 --- a/examples/multi_agent/mar/multi_agent_router_minimal.py +++ b/examples/multi_agent/mar/multi_agent_router_minimal.py @@ -18,8 +18,8 @@ router = SwarmRouter( name="multi-agent-router-demo", description="Routes tasks to the most suitable agent", agents=agents, - swarm_type="MultiAgentRouter" + swarm_type="MultiAgentRouter", ) result = router.run("Write a function that adds two numbers") -print(result) \ No newline at end of file +print(result) diff --git a/examples/tools/multii_tool_use/new_tools_examples.py b/examples/single_agent/tools/new_tools_examples.py similarity index 96% rename from examples/tools/multii_tool_use/new_tools_examples.py rename to examples/single_agent/tools/new_tools_examples.py index 86eb450b..542f6ea4 100644 --- a/examples/tools/multii_tool_use/new_tools_examples.py +++ b/examples/single_agent/tools/new_tools_examples.py @@ -176,15 +176,15 @@ agent = Agent( max_loops=1, model_name="gpt-4o-mini", dynamic_temperature_enabled=True, - output_type="all", + output_type="final", + tool_call_summary=True, tools=[ get_coin_price, - get_top_cryptocurrencies, ], + # output_raw_json_from_tool_call=True, ) -print( - agent.run( - "What is the price of Bitcoin? what are the top 5 cryptocurrencies by market cap?" - ) -) +out = agent.run("What is the price of Bitcoin?") + +print(out) +print(f"Output type: {type(out)}") diff --git a/examples/single_agent/tools/swarms_tools_example.py b/examples/single_agent/tools/swarms_tools_example.py new file mode 100644 index 00000000..9aec628f --- /dev/null +++ b/examples/single_agent/tools/swarms_tools_example.py @@ -0,0 +1,20 @@ +from swarms import Agent +from swarms.prompts.finance_agent_sys_prompt import ( + FINANCIAL_AGENT_SYS_PROMPT, +) +from swarms_tools import yahoo_finance_api + +# Initialize the agent +agent = Agent( + agent_name="Financial-Analysis-Agent", + agent_description="Personal finance advisor agent", + system_prompt=FINANCIAL_AGENT_SYS_PROMPT, + max_loops=1, + model_name="gpt-4o-mini", + tools=[yahoo_finance_api], + dynamic_temperature_enabled=True, +) + +agent.run( + "Fetch the data for nvidia and tesla both with the yahoo finance api" +) diff --git a/examples/single_agent/tools/tools_examples/swarms_tools_example.py b/examples/single_agent/tools/tools_examples/swarms_tools_example.py deleted file mode 100644 index 9171bb30..00000000 --- a/examples/single_agent/tools/tools_examples/swarms_tools_example.py +++ /dev/null @@ -1,31 +0,0 @@ -from swarms import Agent -from swarms.prompts.finance_agent_sys_prompt import ( - FINANCIAL_AGENT_SYS_PROMPT, -) -from swarms_tools import ( - fetch_stock_news, - coin_gecko_coin_api, - fetch_htx_data, -) - -# Initialize the agent -agent = Agent( - agent_name="Financial-Analysis-Agent", - agent_description="Personal finance advisor agent", - system_prompt=FINANCIAL_AGENT_SYS_PROMPT, - max_loops=1, - model_name="gpt-4o", - dynamic_temperature_enabled=True, - user_name="swarms_corp", - retry_attempts=3, - context_length=8192, - return_step_meta=False, - output_type="str", # "json", "dict", "csv" OR "string" "yaml" and - auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task - max_tokens=4000, # max output tokens - saved_state_path="agent_00.json", - interactive=False, - tools=[fetch_stock_news, coin_gecko_coin_api, fetch_htx_data], -) - -agent.run("Analyze the $swarms token on htx") diff --git a/examples/single_agent/vision_examples/anthropic_vision_test.py b/examples/single_agent/vision/anthropic_vision_test.py similarity index 89% rename from examples/single_agent/vision_examples/anthropic_vision_test.py rename to examples/single_agent/vision/anthropic_vision_test.py index 6d24faeb..583ac9cf 100644 --- a/examples/single_agent/vision_examples/anthropic_vision_test.py +++ b/examples/single_agent/vision/anthropic_vision_test.py @@ -1,4 +1,4 @@ -from swarms.structs import Agent +from swarms import Agent from swarms.prompts.logistics import ( Quality_Control_Agent_Prompt, ) @@ -16,6 +16,8 @@ quality_control_agent = Agent( multi_modal=True, max_loops=1, output_type="str-all-except-first", + dynamic_temperature_enabled=True, + stream=True, ) response = quality_control_agent.run( diff --git a/examples/single_agent/vision_examples/image.jpg b/examples/single_agent/vision/image.jpg similarity index 100% rename from examples/single_agent/vision_examples/image.jpg rename to examples/single_agent/vision/image.jpg diff --git a/examples/single_agent/vision_examples/image_batch_example.py b/examples/single_agent/vision/image_batch_example.py similarity index 100% rename from examples/single_agent/vision_examples/image_batch_example.py rename to examples/single_agent/vision/image_batch_example.py diff --git a/vision_and_tools.py b/examples/single_agent/vision/vision_and_tools.py similarity index 100% rename from vision_and_tools.py rename to examples/single_agent/vision/vision_and_tools.py diff --git a/examples/single_agent/vision_examples/vision_test.py b/examples/single_agent/vision/vision_test.py similarity index 100% rename from examples/single_agent/vision_examples/vision_test.py rename to examples/single_agent/vision/vision_test.py diff --git a/agent_as_tools.py b/examples/tools/agent_as_tools.py similarity index 100% rename from agent_as_tools.py rename to examples/tools/agent_as_tools.py diff --git a/pyproject.toml b/pyproject.toml index 4eeee107..7fe62d43 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "7.8.6" +version = "7.8.8" description = "Swarms - TGSC" license = "MIT" authors = ["Kye Gomez "] diff --git a/swarms/agents/reasoning_agents.py b/swarms/agents/reasoning_agents.py index 77d2201d..68325e47 100644 --- a/swarms/agents/reasoning_agents.py +++ b/swarms/agents/reasoning_agents.py @@ -22,6 +22,7 @@ agent_types = Literal[ "AgentJudge", ] + class ReasoningAgentRouter: """ A Reasoning Agent that can answer questions and assist with various tasks using different reasoning strategies. @@ -60,7 +61,7 @@ class ReasoningAgentRouter: self.output_type = output_type self.num_knowledge_items = num_knowledge_items self.memory_capacity = memory_capacity - + # Added: Initialize the factory mapping dictionary self._initialize_agent_factories() @@ -74,19 +75,16 @@ class ReasoningAgentRouter: # ReasoningDuo factory methods "reasoning-duo": self._create_reasoning_duo, "reasoning-agent": self._create_reasoning_duo, - # SelfConsistencyAgent factory methods "self-consistency": self._create_consistency_agent, "consistency-agent": self._create_consistency_agent, - # IREAgent factory methods "ire": self._create_ire_agent, "ire-agent": self._create_ire_agent, - # Other agent type factory methods "AgentJudge": self._create_agent_judge, "ReflexionAgent": self._create_reflexion_agent, - "GKPAgent": self._create_gkp_agent + "GKPAgent": self._create_gkp_agent, } # Added: Concrete factory methods for various agent types @@ -99,7 +97,7 @@ class ReasoningAgentRouter: system_prompt=self.system_prompt, output_type=self.output_type, ) - + def _create_consistency_agent(self): """Creates an agent instance for SelfConsistencyAgent type""" return SelfConsistencyAgent( @@ -111,7 +109,7 @@ class ReasoningAgentRouter: num_samples=self.num_samples, output_type=self.output_type, ) - + def _create_ire_agent(self): """Creates an agent instance for IREAgent type""" return IREAgent( @@ -123,7 +121,7 @@ class ReasoningAgentRouter: max_iterations=self.num_samples, output_type=self.output_type, ) - + def _create_agent_judge(self): """Creates an agent instance for AgentJudge type""" return AgentJudge( @@ -132,7 +130,7 @@ class ReasoningAgentRouter: system_prompt=self.system_prompt, max_loops=self.max_loops, ) - + def _create_reflexion_agent(self): """Creates an agent instance for ReflexionAgent type""" return ReflexionAgent( @@ -141,7 +139,7 @@ class ReasoningAgentRouter: model_name=self.model_name, max_loops=self.max_loops, ) - + def _create_gkp_agent(self): """Creates an agent instance for GKPAgent type""" return GKPAgent( @@ -222,7 +220,7 @@ class ReasoningAgentRouter: else: raise ValueError(f"Invalid swarm type: {self.swarm_type}") """ - + # Added: Implementation using factory pattern and dictionary mapping try: # Get the corresponding creation function from the factory dictionary and call it @@ -257,4 +255,4 @@ class ReasoningAgentRouter: results = [] for task in tasks: results.append(self.run(task, *args, **kwargs)) - return results \ No newline at end of file + return results diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index dce3c2c2..9c491abc 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -72,8 +72,10 @@ from swarms.prompts.max_loop_prompt import generate_reasoning_prompt from swarms.prompts.safety_prompt import SAFETY_PROMPT from swarms.structs.ma_utils import set_random_models_for_agents from swarms.tools.mcp_client_call import ( + execute_multiple_tools_on_multiple_mcp_servers_sync, execute_tool_call_simple, get_mcp_tools_sync, + get_tools_for_multiple_mcp_servers, ) from swarms.schemas.mcp_schemas import ( MCPConnection, @@ -81,7 +83,6 @@ from swarms.schemas.mcp_schemas import ( from swarms.utils.index import ( exists, format_data_structure, - format_dict_to_string, ) from swarms.schemas.conversation_schema import ConversationSchema from swarms.utils.output_types import OutputType @@ -417,6 +418,8 @@ class Agent: llm_base_url: Optional[str] = None, llm_api_key: Optional[str] = None, rag_config: Optional[RAGConfig] = None, + tool_call_summary: bool = True, + output_raw_json_from_tool_call: bool = False, *args, **kwargs, ): @@ -445,7 +448,10 @@ class Agent: self.system_prompt = system_prompt self.agent_name = agent_name self.agent_description = agent_description - self.saved_state_path = f"{self.agent_name}_{generate_api_key(prefix='agent-')}_state.json" + # self.saved_state_path = f"{self.agent_name}_{generate_api_key(prefix='agent-')}_state.json" + self.saved_state_path = ( + f"{generate_api_key(prefix='agent-')}_state.json" + ) self.autosave = autosave self.response_filters = [] self.self_healing_enabled = self_healing_enabled @@ -548,6 +554,10 @@ class Agent: self.llm_base_url = llm_base_url self.llm_api_key = llm_api_key self.rag_config = rag_config + self.tool_call_summary = tool_call_summary + self.output_raw_json_from_tool_call = ( + output_raw_json_from_tool_call + ) # self.short_memory = self.short_memory_init() @@ -592,6 +602,11 @@ class Agent: if self.long_term_memory is not None: self.rag_handler = self.rag_setup_handling() + if self.dashboard is True: + self.print_dashboard() + + self.reliability_check() + def rag_setup_handling(self): return AgentRAGHandler( long_term_memory=self.long_term_memory, @@ -616,7 +631,7 @@ class Agent: self.short_memory.add( role=f"{self.agent_name}", - content=f"Tools available: {format_data_structure(self.tools_list_dictionary)}", + content=self.tools_list_dictionary, ) def short_memory_init(self): @@ -685,6 +700,10 @@ class Agent: if exists(self.tools) and len(self.tools) >= 2: parallel_tool_calls = True + elif exists(self.mcp_url) or exists(self.mcp_urls): + parallel_tool_calls = True + elif exists(self.mcp_config): + parallel_tool_calls = True else: parallel_tool_calls = False @@ -707,7 +726,7 @@ class Agent: parallel_tool_calls=parallel_tool_calls, ) - elif self.mcp_url is not None: + elif exists(self.mcp_url) or exists(self.mcp_urls): self.llm = LiteLLM( **common_args, tools_list_dictionary=self.add_mcp_tools_to_memory(), @@ -745,15 +764,27 @@ class Agent: tools = get_mcp_tools_sync(server_path=self.mcp_url) elif exists(self.mcp_config): tools = get_mcp_tools_sync(connection=self.mcp_config) - logger.info(f"Tools: {tools}") + # logger.info(f"Tools: {tools}") + elif exists(self.mcp_urls): + tools = get_tools_for_multiple_mcp_servers( + urls=self.mcp_urls, + output_type="str", + ) + # print(f"Tools: {tools} for {self.mcp_urls}") else: raise AgentMCPConnectionError( "mcp_url must be either a string URL or MCPConnection object" ) - self.pretty_print( - f"✨ [SYSTEM] Successfully integrated {len(tools)} MCP tools into agent: {self.agent_name} | Status: ONLINE | Time: {time.strftime('%H:%M:%S')} ✨", - loop_count=0, - ) + + if ( + exists(self.mcp_url) + or exists(self.mcp_urls) + or exists(self.mcp_config) + ): + self.pretty_print( + f"✨ [SYSTEM] Successfully integrated {len(tools)} MCP tools into agent: {self.agent_name} | Status: ONLINE | Time: {time.strftime('%H:%M:%S')} ✨", + loop_count=0, + ) return tools except AgentMCPConnectionError as e: @@ -832,26 +863,6 @@ class Agent: self.feedback.append(feedback) logging.info(f"Feedback received: {feedback}") - def agent_initialization(self): - try: - logger.info( - f"Initializing Autonomous Agent {self.agent_name}..." - ) - self.check_parameters() - logger.info( - f"{self.agent_name} Initialized Successfully." - ) - logger.info( - f"Autonomous Agent {self.agent_name} Activated, all systems operational. Executing task..." - ) - - if self.dashboard is True: - self.print_dashboard() - - except ValueError as e: - logger.info(f"Error initializing agent: {e}") - raise e - def _check_stopping_condition(self, response: str) -> bool: """Check if the stopping condition is met.""" try: @@ -883,48 +894,38 @@ class Agent: ) def print_dashboard(self): - """Print dashboard""" - formatter.print_panel( - f"Initializing Agent: {self.agent_name}" - ) - - data = self.to_dict() - - # Beautify the data - # data = json.dumps(data, indent=4) - # json_data = json.dumps(data, indent=4) - + tools_activated = True if self.tools is not None else False + mcp_activated = True if self.mcp_url is not None else False formatter.print_panel( f""" - Agent Dashboard - -------------------------------------------- - - Agent {self.agent_name} is initializing for {self.max_loops} with the following configuration: - ---------------------------------------- - - Agent Configuration: - Configuration: {data} - - ---------------------------------------- - """, + + šŸ¤– Agent {self.agent_name} Dashboard šŸš€ + ════════════════════════════════════════════════════════════ + + šŸŽÆ Agent {self.agent_name} Status: ONLINE & OPERATIONAL + ──────────────────────────────────────────────────────────── + + šŸ“‹ Agent Identity: + • šŸ·ļø Name: {self.agent_name} + • šŸ“ Description: {self.agent_description} + + āš™ļø Technical Specifications: + • šŸ¤– Model: {self.model_name} + • šŸ”„ Internal Loops: {self.max_loops} + • šŸŽÆ Max Tokens: {self.max_tokens} + • šŸŒ”ļø Dynamic Temperature: {self.dynamic_temperature_enabled} + + šŸ”§ System Modules: + • šŸ› ļø Tools Activated: {tools_activated} + • šŸ”— MCP Activated: {mcp_activated} + + ════════════════════════════════════════════════════════════ + šŸš€ Ready for Tasks šŸš€ + + """, + title=f"Agent {self.agent_name} Dashboard", ) - # Check parameters - def check_parameters(self): - if self.llm is None: - raise ValueError( - "Language model is not provided. Choose a model from the available models in swarm_models or create a class with a run(task: str) method and or a __call__ method." - ) - - if self.max_loops is None or self.max_loops == 0: - raise ValueError("Max loops is not provided") - - if self.max_tokens == 0 or self.max_tokens is None: - raise ValueError("Max tokens is not provided") - - if self.context_length == 0 or self.context_length is None: - raise ValueError("Context length is not provided") - # Main function def _run( self, @@ -962,7 +963,7 @@ class Agent: self.short_memory.add(role=self.user_name, content=task) - if self.plan_enabled: + if self.plan_enabled or self.planning_prompt is not None: self.plan(task) # Set the loop count @@ -1029,64 +1030,51 @@ class Agent: ) self.memory_query(task_prompt) - # # Generate response using LLM - # response_args = ( - # (task_prompt, *args) - # if img is None - # else (task_prompt, img, *args) - # ) - - # # Call the LLM - # response = self.call_llm( - # *response_args, **kwargs - # ) - response = self.call_llm( task=task_prompt, img=img, *args, **kwargs ) + print(f"Response: {response}") + if exists(self.tools_list_dictionary): if isinstance(response, BaseModel): response = response.model_dump() - # # Convert to a str if the response is not a str - # if self.mcp_url is None or self.tools is None: + # Parse the response from the agent with the output type response = self.parse_llm_output(response) self.short_memory.add( role=self.agent_name, - content=format_dict_to_string(response), + content=response, ) # Print self.pretty_print(response, loop_count) - # # Output Cleaner - # self.output_cleaner_op(response) - - # Check and execute tools + # Check and execute callable tools if exists(self.tools): - self.execute_tools( - response=response, - loop_count=loop_count, - ) - - if exists(self.mcp_url): - self.mcp_tool_handling( - response, loop_count - ) - - if exists(self.mcp_url) and exists( - self.tools + if ( + self.output_raw_json_from_tool_call + is True + ): + print(type(response)) + response = response + else: + self.execute_tools( + response=response, + loop_count=loop_count, + ) + + # Handle MCP tools + if ( + exists(self.mcp_url) + or exists(self.mcp_config) + or exists(self.mcp_urls) ): self.mcp_tool_handling( - response, loop_count - ) - - self.execute_tools( response=response, - loop_count=loop_count, + current_loop=loop_count, ) self.sentiment_and_evaluator(response) @@ -1275,33 +1263,12 @@ class Agent: def receive_message( self, agent_name: str, task: str, *args, **kwargs ): - return self.run( - task=f"From {agent_name}: {task}", *args, **kwargs + improved_prompt = ( + f"You have received a message from agent '{agent_name}':\n\n" + f'"{task}"\n\n' + "Please process this message and respond appropriately." ) - - def dict_to_csv(self, data: dict) -> str: - """ - Convert a dictionary to a CSV string. - - Args: - data (dict): The dictionary to convert. - - Returns: - str: The CSV string representation of the dictionary. - """ - import csv - import io - - output = io.StringIO() - writer = csv.writer(output) - - # Write header - writer.writerow(data.keys()) - - # Write values - writer.writerow(data.values()) - - return output.getvalue() + return self.run(task=improved_prompt, *args, **kwargs) # def parse_and_execute_tools(self, response: str, *args, **kwargs): # max_retries = 3 # Maximum number of retries @@ -1351,26 +1318,47 @@ class Agent: def plan(self, task: str, *args, **kwargs) -> None: """ - Plan the task + Create a strategic plan for executing the given task. + + This method generates a step-by-step plan by combining the conversation + history, planning prompt, and current task. The plan is then added to + the agent's short-term memory for reference during execution. Args: - task (str): The task to plan + task (str): The task to create a plan for + *args: Additional positional arguments passed to the LLM + **kwargs: Additional keyword arguments passed to the LLM + + Returns: + None: The plan is stored in memory rather than returned + + Raises: + Exception: If planning fails, the original exception is re-raised """ try: - if exists(self.planning_prompt): - # Join the plan and the task - planning_prompt = f"{self.planning_prompt} {task}" - plan = self.llm(planning_prompt, *args, **kwargs) - logger.info(f"Plan: {plan}") + # Get the current conversation history + history = self.short_memory.get_str() - # Add the plan to the memory - self.short_memory.add( - role=self.agent_name, content=str(plan) + # Construct the planning prompt by combining history, planning prompt, and task + planning_prompt = ( + f"{history}\n\n{self.planning_prompt}\n\nTask: {task}" ) + # Generate the plan using the LLM + plan = self.llm.run(task=planning_prompt, *args, **kwargs) + + # Store the generated plan in short-term memory + self.short_memory.add(role=self.agent_name, content=plan) + + logger.info( + f"Successfully created plan for task: {task[:50]}..." + ) return None + except Exception as error: - logger.error(f"Error planning task: {error}") + logger.error( + f"Failed to create plan for task '{task}': {error}" + ) raise error async def run_concurrent(self, task: str, *args, **kwargs): @@ -1453,6 +1441,52 @@ class Agent: logger.error(f"Error running batched tasks: {error}") raise + def reliability_check(self): + from litellm.utils import ( + supports_function_calling, + get_max_tokens, + ) + from litellm import model_list + + if self.system_prompt is None: + logger.warning( + "The system prompt is not set. Please set a system prompt for the agent to improve reliability." + ) + + if self.agent_name is None: + logger.warning( + "The agent name is not set. Please set an agent name to improve reliability." + ) + + if self.max_loops is None or self.max_loops == 0: + raise AgentInitializationError( + "Max loops is not provided or is set to 0. Please set max loops to 1 or more." + ) + + if self.max_tokens is None or self.max_tokens == 0: + self.max_tokens = get_max_tokens(self.model_name) + + if self.context_length is None or self.context_length == 0: + raise AgentInitializationError( + "Context length is not provided. Please set a valid context length." + ) + + if self.tools_list_dictionary is not None: + if not supports_function_calling(self.model_name): + raise AgentInitializationError( + f"The model '{self.model_name}' does not support function calling. Please use a model that supports function calling." + ) + + if self.max_tokens > get_max_tokens(self.model_name): + raise AgentInitializationError( + f"Max tokens is set to {self.max_tokens}, but the model '{self.model_name}' only supports {get_max_tokens(self.model_name)} tokens. Please set max tokens to {get_max_tokens(self.model_name)} or less." + ) + + if self.model_name not in model_list: + logger.warning( + f"The model '{self.model_name}' is not supported. Please use a supported model, or override the model name with the 'llm' parameter, which should be a class with a 'run(task: str)' method or a '__call__' method." + ) + def save(self, file_path: str = None) -> None: """ Save the agent state to a file using SafeStateManager with atomic writing @@ -2670,7 +2704,7 @@ class Agent: ) # Convert other dicts to string elif isinstance(response, BaseModel): - out = response.model_dump() + response = response.model_dump() # Handle List[BaseModel] responses elif ( @@ -2680,14 +2714,9 @@ class Agent: ): return [item.model_dump() for item in response] - elif isinstance(response, list): - out = format_data_structure(response) - else: - out = str(response) - - return out + return response - except Exception as e: + except AgentChatCompletionResponse as e: logger.error(f"Error parsing LLM output: {e}") raise ValueError( f"Failed to parse LLM output: {type(response)}" @@ -2744,17 +2773,30 @@ class Agent: connection=self.mcp_config, ) ) + elif exists(self.mcp_urls): + tool_response = execute_multiple_tools_on_multiple_mcp_servers_sync( + responses=response, + urls=self.mcp_urls, + output_type="json", + ) + # tool_response = format_data_structure(tool_response) + + print(f"Multiple MCP Tool Response: {tool_response}") else: raise AgentMCPConnectionError( "mcp_url must be either a string URL or MCPConnection object" ) # Get the text content from the tool response - text_content = ( - tool_response.content[0].text - if tool_response.content - else str(tool_response) - ) + # execute_tool_call_simple returns a string directly, not an object with content attribute + text_content = f"MCP Tool Response: \n\n {json.dumps(tool_response, indent=2)}" + + if self.no_print is False: + formatter.print_panel( + text_content, + "MCP Tool Response: šŸ› ļø", + style="green", + ) # Add to the memory self.short_memory.add( @@ -2820,28 +2862,29 @@ class Agent: # Now run the LLM again without tools - create a temporary LLM instance # instead of modifying the cached one # Create a temporary LLM instance without tools for the follow-up call - temp_llm = self.temp_llm_instance_for_tool_summary() - - tool_response = temp_llm.run( - f""" - Please analyze and summarize the following tool execution output in a clear and concise way. - Focus on the key information and insights that would be most relevant to the user's original request. - If there are any errors or issues, highlight them prominently. - - Tool Output: - {output} - """ - ) + if self.tool_call_summary is True: + temp_llm = self.temp_llm_instance_for_tool_summary() + + tool_response = temp_llm.run( + f""" + Please analyze and summarize the following tool execution output in a clear and concise way. + Focus on the key information and insights that would be most relevant to the user's original request. + If there are any errors or issues, highlight them prominently. + + Tool Output: + {output} + """ + ) - self.short_memory.add( - role=self.agent_name, - content=tool_response, - ) + self.short_memory.add( + role=self.agent_name, + content=tool_response, + ) - self.pretty_print( - f"{tool_response}", - loop_count, - ) + self.pretty_print( + f"{tool_response}", + loop_count, + ) def list_output_types(self): return OutputType diff --git a/swarms/structs/conversation.py b/swarms/structs/conversation.py index 3b4052a1..91d06154 100644 --- a/swarms/structs/conversation.py +++ b/swarms/structs/conversation.py @@ -1326,6 +1326,12 @@ class Conversation(BaseStructure): self.conversation_history[-1]["content"], ) + def return_list_final(self): + """Return the final message as a list.""" + return [ + self.conversation_history[-1]["content"], + ] + @classmethod def list_conversations( cls, conversations_dir: Optional[str] = None diff --git a/swarms/structs/csv_to_agent.py b/swarms/structs/csv_to_agent.py index b76cc644..c5f7f355 100644 --- a/swarms/structs/csv_to_agent.py +++ b/swarms/structs/csv_to_agent.py @@ -104,9 +104,7 @@ class AgentValidator: model_name in model["model_name"] for model in model_list ): - valid_models = [ - model["model_name"] for model in model_list - ] + [model["model_name"] for model in model_list] raise AgentValidationError( "Invalid model name. Must be one of the supported litellm models", "model_name", diff --git a/swarms/telemetry/__init__.py b/swarms/telemetry/__init__.py index 9792f266..4322217a 100644 --- a/swarms/telemetry/__init__.py +++ b/swarms/telemetry/__init__.py @@ -4,11 +4,9 @@ from swarms.telemetry.main import ( get_cpu_info, get_machine_id, get_os_version, - get_package_mismatches, get_pip_version, get_python_version, get_ram_info, - get_swarms_verison, get_system_info, get_user_device_data, system_info, @@ -21,11 +19,9 @@ __all__ = [ "generate_unique_identifier", "get_python_version", "get_pip_version", - "get_swarms_verison", "get_os_version", "get_cpu_info", "get_ram_info", - "get_package_mismatches", "system_info", "get_user_device_data", ] diff --git a/swarms/telemetry/main.py b/swarms/telemetry/main.py index 5c81a90b..9e64a1d9 100644 --- a/swarms/telemetry/main.py +++ b/swarms/telemetry/main.py @@ -1,24 +1,16 @@ -import asyncio - - +import os import datetime import hashlib import platform import socket import subprocess import uuid -from concurrent.futures import ThreadPoolExecutor -from functools import lru_cache -from threading import Lock from typing import Dict -import aiohttp import pkg_resources import psutil +import requests import toml -from requests import Session -from requests.adapters import HTTPAdapter -from urllib3.util.retry import Retry # Helper functions @@ -263,134 +255,44 @@ def capture_system_data() -> Dict[str, str]: print(f"Failed to capture system data: {e}") -# Global variables -_session = None -_session_lock = Lock() -_executor = ThreadPoolExecutor(max_workers=10) -_aiohttp_session = None - - -def get_session() -> Session: - """Thread-safe session getter with optimized connection pooling""" - global _session - if _session is None: - with _session_lock: - if _session is None: # Double-check pattern - _session = Session() - adapter = HTTPAdapter( - pool_connections=1000, # Increased pool size - pool_maxsize=1000, # Increased max size - max_retries=Retry( - total=3, - backoff_factor=0.1, - status_forcelist=[500, 502, 503, 504], - ), - pool_block=False, # Non-blocking pool - ) - _session.mount("http://", adapter) - _session.mount("https://", adapter) - _session.headers.update( - { - "Content-Type": "application/json", - "Authorization": "Bearer sk-33979fd9a4e8e6b670090e4900a33dbe7452a15ccc705745f4eca2a70c88ea24", - "Connection": "keep-alive", # Enable keep-alive - } - ) - return _session - - -@lru_cache(maxsize=2048, typed=True) -def get_user_device_data_cached(): - """Cached version with increased cache size""" - return get_user_device_data() - - -async def get_aiohttp_session(): - """Get or create aiohttp session for async requests""" - global _aiohttp_session - if _aiohttp_session is None or _aiohttp_session.closed: - timeout = aiohttp.ClientTimeout(total=10) - connector = aiohttp.TCPConnector( - limit=1000, # Connection limit - ttl_dns_cache=300, # DNS cache TTL - use_dns_cache=True, # Enable DNS caching - keepalive_timeout=60, # Keep-alive timeout - ) - _aiohttp_session = aiohttp.ClientSession( - timeout=timeout, - connector=connector, - headers={ - "Content-Type": "application/json", - "Authorization": "Bearer sk-33979fd9a4e8e6b670090e4900a33dbe7452a15ccc705745f4eca2a70c88ea24", - }, - ) - return _aiohttp_session - - -async def log_agent_data_async(data_dict: dict): - """Asynchronous version of log_agent_data""" +def _log_agent_data(data_dict: dict): + """Simple function to log agent data using requests library""" if not data_dict: - return None + return url = "https://swarms.world/api/get-agents/log-agents" payload = { "data": data_dict, - "system_data": get_user_device_data_cached(), + "system_data": get_user_device_data(), "timestamp": datetime.datetime.now( datetime.timezone.utc ).isoformat(), } - session = await get_aiohttp_session() - try: - async with session.post(url, json=payload) as response: - if response.status == 200: - return await response.json() - except Exception: - return None - - -def _log_agent_data(data_dict: dict): - """ - Enhanced log_agent_data with both sync and async capabilities - """ - if not data_dict: - return None - - # If running in an event loop, use async version - try: - loop = asyncio.get_event_loop() - if loop.is_running(): - return asyncio.create_task( - log_agent_data_async(data_dict) - ) - except RuntimeError: - pass + key = ( + os.getenv("SWARMS_API_KEY") + or "Bearer sk-33979fd9a4e8e6b670090e4900a33dbe7452a15ccc705745f4eca2a70c88ea24" + ) - # Fallback to optimized sync version - url = "https://swarms.world/api/get-agents/log-agents" - payload = { - "data": data_dict, - "system_data": get_user_device_data_cached(), - "timestamp": datetime.datetime.now( - datetime.timezone.utc - ).isoformat(), + headers = { + "Content-Type": "application/json", + "Authorization": key, } try: - session = get_session() - response = session.post( - url, - json=payload, - timeout=10, - stream=False, # Disable streaming for faster response + response = requests.post( + url, json=payload, headers=headers, timeout=10 ) - if response.ok and response.text.strip(): - return response.json() + if response.status_code == 200: + return except Exception: - return None + return + + return def log_agent_data(data_dict: dict): - """Log agent data""" - pass + try: + _log_agent_data(data_dict) + except Exception: + pass diff --git a/swarms/tools/__init__.py b/swarms/tools/__init__.py index e6b8032f..a437ccc3 100644 --- a/swarms/tools/__init__.py +++ b/swarms/tools/__init__.py @@ -33,6 +33,11 @@ from swarms.tools.mcp_client_call import ( get_tools_for_multiple_mcp_servers, get_mcp_tools_sync, aget_mcp_tools, + execute_multiple_tools_on_multiple_mcp_servers, + execute_multiple_tools_on_multiple_mcp_servers_sync, + _create_server_tool_mapping, + _create_server_tool_mapping_async, + _execute_tool_on_server, ) @@ -62,4 +67,9 @@ __all__ = [ "get_tools_for_multiple_mcp_servers", "get_mcp_tools_sync", "aget_mcp_tools", + "execute_multiple_tools_on_multiple_mcp_servers", + "execute_multiple_tools_on_multiple_mcp_servers_sync", + "_create_server_tool_mapping", + "_create_server_tool_mapping_async", + "_execute_tool_on_server", ] diff --git a/swarms/tools/mcp_client_call.py b/swarms/tools/mcp_client_call.py index 25302c78..3fa3a9fa 100644 --- a/swarms/tools/mcp_client_call.py +++ b/swarms/tools/mcp_client_call.py @@ -494,6 +494,9 @@ async def execute_tool_call_simple( *args, **kwargs, ) -> List[Dict[str, Any]]: + if isinstance(response, str): + response = json.loads(response) + return await _execute_tool_call_simple( response=response, server_path=server_path, @@ -502,3 +505,511 @@ async def execute_tool_call_simple( *args, **kwargs, ) + + +def _create_server_tool_mapping( + urls: List[str], + connections: List[MCPConnection] = None, + format: str = "openai", +) -> Dict[str, Dict[str, Any]]: + """ + Create a mapping of function names to server information for all MCP servers. + + Args: + urls: List of server URLs + connections: Optional list of MCPConnection objects + format: Format to fetch tools in + + Returns: + Dict mapping function names to server info (url, connection, tool) + """ + server_tool_mapping = {} + + for i, url in enumerate(urls): + connection = ( + connections[i] + if connections and i < len(connections) + else None + ) + + try: + # Get tools for this server + tools = get_mcp_tools_sync( + server_path=url, + connection=connection, + format=format, + ) + + # Create mapping for each tool + for tool in tools: + if isinstance(tool, dict) and "function" in tool: + function_name = tool["function"]["name"] + server_tool_mapping[function_name] = { + "url": url, + "connection": connection, + "tool": tool, + "server_index": i, + } + elif hasattr(tool, "name"): + # Handle MCPTool objects + server_tool_mapping[tool.name] = { + "url": url, + "connection": connection, + "tool": tool, + "server_index": i, + } + + except Exception as e: + logger.warning( + f"Failed to fetch tools from server {url}: {str(e)}" + ) + continue + + return server_tool_mapping + + +async def _create_server_tool_mapping_async( + urls: List[str], + connections: List[MCPConnection] = None, + format: str = "openai", +) -> Dict[str, Dict[str, Any]]: + """ + Async version: Create a mapping of function names to server information for all MCP servers. + + Args: + urls: List of server URLs + connections: Optional list of MCPConnection objects + format: Format to fetch tools in + + Returns: + Dict mapping function names to server info (url, connection, tool) + """ + server_tool_mapping = {} + + for i, url in enumerate(urls): + connection = ( + connections[i] + if connections and i < len(connections) + else None + ) + + try: + # Get tools for this server using async function + tools = await aget_mcp_tools( + server_path=url, + connection=connection, + format=format, + ) + + # Create mapping for each tool + for tool in tools: + if isinstance(tool, dict) and "function" in tool: + function_name = tool["function"]["name"] + server_tool_mapping[function_name] = { + "url": url, + "connection": connection, + "tool": tool, + "server_index": i, + } + elif hasattr(tool, "name"): + # Handle MCPTool objects + server_tool_mapping[tool.name] = { + "url": url, + "connection": connection, + "tool": tool, + "server_index": i, + } + + except Exception as e: + logger.warning( + f"Failed to fetch tools from server {url}: {str(e)}" + ) + continue + + return server_tool_mapping + + +async def _execute_tool_on_server( + tool_call: Dict[str, Any], + server_info: Dict[str, Any], + output_type: Literal["json", "dict", "str", "formatted"] = "str", +) -> Dict[str, Any]: + """ + Execute a single tool call on a specific server. + + Args: + tool_call: The tool call to execute + server_info: Server information from the mapping + output_type: Output format type + + Returns: + Execution result with server metadata + """ + try: + result = await _execute_tool_call_simple( + response=tool_call, + server_path=server_info["url"], + connection=server_info["connection"], + output_type=output_type, + ) + + return { + "server_url": server_info["url"], + "server_index": server_info["server_index"], + "function_name": tool_call.get("function", {}).get( + "name", "unknown" + ), + "result": result, + "status": "success", + } + + except Exception as e: + logger.error( + f"Failed to execute tool on server {server_info['url']}: {str(e)}" + ) + return { + "server_url": server_info["url"], + "server_index": server_info["server_index"], + "function_name": tool_call.get("function", {}).get( + "name", "unknown" + ), + "result": None, + "error": str(e), + "status": "error", + } + + +async def execute_multiple_tools_on_multiple_mcp_servers( + responses: List[Dict[str, Any]], + urls: List[str], + connections: List[MCPConnection] = None, + output_type: Literal["json", "dict", "str", "formatted"] = "str", + max_concurrent: Optional[int] = None, + *args, + **kwargs, +) -> List[Dict[str, Any]]: + """ + Execute multiple tool calls across multiple MCP servers. + + This function creates a mapping of function names to servers, then for each response + that contains tool calls, it finds the appropriate server for each function and + executes the calls concurrently. + + Args: + responses: List of responses containing tool calls (OpenAI format) + urls: List of MCP server URLs + connections: Optional list of MCPConnection objects corresponding to each URL + output_type: Output format type for results + max_concurrent: Maximum number of concurrent executions (default: len(responses)) + + Returns: + List of execution results with server metadata + + Example: + # Example responses format: + responses = [ + { + "function": { + "name": "search_web", + "arguments": {"query": "python programming"} + } + }, + { + "function": { + "name": "search_database", + "arguments": {"table": "users", "id": 123} + } + } + ] + + urls = ["http://server1:8000", "http://server2:8000"] + + results = await execute_multiple_tools_on_multiple_mcp_servers( + responses=responses, + urls=urls + ) + """ + if not responses: + logger.warning("No responses provided for execution") + return [] + + if not urls: + raise MCPValidationError("No server URLs provided") + + # Create mapping of function names to servers using async version + logger.info(f"Creating tool mapping for {len(urls)} servers") + server_tool_mapping = await _create_server_tool_mapping_async( + urls=urls, connections=connections, format="openai" + ) + + if not server_tool_mapping: + raise MCPExecutionError( + "No tools found on any of the provided servers" + ) + + logger.info( + f"Found {len(server_tool_mapping)} unique functions across all servers" + ) + + # Extract all tool calls from responses + all_tool_calls = [] + logger.info( + f"Processing {len(responses)} responses for tool call extraction" + ) + + # Check if responses are individual characters that need to be reconstructed + if len(responses) > 10 and all( + isinstance(r, str) and len(r) == 1 for r in responses + ): + logger.info( + "Detected character-by-character response, reconstructing JSON string" + ) + try: + reconstructed_response = "".join(responses) + logger.info( + f"Reconstructed response length: {len(reconstructed_response)}" + ) + logger.debug( + f"Reconstructed response: {reconstructed_response}" + ) + + # Try to parse the reconstructed response to validate it + try: + json.loads(reconstructed_response) + logger.info( + "Successfully validated reconstructed JSON response" + ) + except json.JSONDecodeError as e: + logger.warning( + f"Reconstructed response is not valid JSON: {str(e)}" + ) + logger.debug( + f"First 100 chars: {reconstructed_response[:100]}" + ) + logger.debug( + f"Last 100 chars: {reconstructed_response[-100:]}" + ) + + responses = [reconstructed_response] + except Exception as e: + logger.warning( + f"Failed to reconstruct response from characters: {str(e)}" + ) + + for i, response in enumerate(responses): + logger.debug( + f"Processing response {i}: {type(response)} - {response}" + ) + + # Handle JSON string responses + if isinstance(response, str): + try: + response = json.loads(response) + logger.debug( + f"Parsed JSON string response {i}: {response}" + ) + except json.JSONDecodeError: + logger.warning( + f"Failed to parse JSON response at index {i}: {response}" + ) + continue + + if isinstance(response, dict): + # Single tool call + if "function" in response: + logger.debug( + f"Found single tool call in response {i}: {response['function']}" + ) + # Parse arguments if they're a JSON string + if isinstance( + response["function"].get("arguments"), str + ): + try: + response["function"]["arguments"] = ( + json.loads( + response["function"]["arguments"] + ) + ) + logger.debug( + f"Parsed function arguments: {response['function']['arguments']}" + ) + except json.JSONDecodeError: + logger.warning( + f"Failed to parse function arguments: {response['function']['arguments']}" + ) + + all_tool_calls.append((i, response)) + # Multiple tool calls + elif "tool_calls" in response: + logger.debug( + f"Found multiple tool calls in response {i}: {len(response['tool_calls'])} calls" + ) + for tool_call in response["tool_calls"]: + # Parse arguments if they're a JSON string + if isinstance( + tool_call.get("function", {}).get( + "arguments" + ), + str, + ): + try: + tool_call["function"]["arguments"] = ( + json.loads( + tool_call["function"]["arguments"] + ) + ) + logger.debug( + f"Parsed tool call arguments: {tool_call['function']['arguments']}" + ) + except json.JSONDecodeError: + logger.warning( + f"Failed to parse tool call arguments: {tool_call['function']['arguments']}" + ) + + all_tool_calls.append((i, tool_call)) + # Direct tool call + elif "name" in response and "arguments" in response: + logger.debug( + f"Found direct tool call in response {i}: {response}" + ) + # Parse arguments if they're a JSON string + if isinstance(response.get("arguments"), str): + try: + response["arguments"] = json.loads( + response["arguments"] + ) + logger.debug( + f"Parsed direct tool call arguments: {response['arguments']}" + ) + except json.JSONDecodeError: + logger.warning( + f"Failed to parse direct tool call arguments: {response['arguments']}" + ) + + all_tool_calls.append((i, {"function": response})) + else: + logger.debug( + f"Response {i} is a dict but doesn't match expected tool call formats: {list(response.keys())}" + ) + else: + logger.warning( + f"Unsupported response type at index {i}: {type(response)}" + ) + continue + + if not all_tool_calls: + logger.warning("No tool calls found in responses") + return [] + + logger.info(f"Found {len(all_tool_calls)} tool calls to execute") + + # Execute tool calls concurrently + max_concurrent = max_concurrent or len(all_tool_calls) + semaphore = asyncio.Semaphore(max_concurrent) + + async def execute_with_semaphore(tool_call_info): + async with semaphore: + response_index, tool_call = tool_call_info + function_name = tool_call.get("function", {}).get( + "name", "unknown" + ) + + if function_name not in server_tool_mapping: + logger.warning( + f"Function '{function_name}' not found on any server" + ) + return { + "response_index": response_index, + "function_name": function_name, + "result": None, + "error": f"Function '{function_name}' not available on any server", + "status": "not_found", + } + + server_info = server_tool_mapping[function_name] + result = await _execute_tool_on_server( + tool_call=tool_call, + server_info=server_info, + output_type=output_type, + ) + result["response_index"] = response_index + return result + + # Execute all tool calls concurrently + tasks = [ + execute_with_semaphore(tool_call_info) + for tool_call_info in all_tool_calls + ] + results = await asyncio.gather(*tasks, return_exceptions=True) + + # Process results and handle exceptions + processed_results = [] + for i, result in enumerate(results): + if isinstance(result, Exception): + logger.error( + f"Task {i} failed with exception: {str(result)}" + ) + processed_results.append( + { + "response_index": ( + all_tool_calls[i][0] + if i < len(all_tool_calls) + else -1 + ), + "function_name": "unknown", + "result": None, + "error": str(result), + "status": "exception", + } + ) + else: + processed_results.append(result) + + logger.info( + f"Completed execution of {len(processed_results)} tool calls" + ) + return processed_results + + +def execute_multiple_tools_on_multiple_mcp_servers_sync( + responses: List[Dict[str, Any]], + urls: List[str], + connections: List[MCPConnection] = None, + output_type: Literal["json", "dict", "str", "formatted"] = "str", + max_concurrent: Optional[int] = None, + *args, + **kwargs, +) -> List[Dict[str, Any]]: + """ + Synchronous version of execute_multiple_tools_on_multiple_mcp_servers. + + Args: + responses: List of responses containing tool calls (OpenAI format) + urls: List of MCP server URLs + connections: Optional list of MCPConnection objects corresponding to each URL + output_type: Output format type for results + max_concurrent: Maximum number of concurrent executions + + Returns: + List of execution results with server metadata + """ + with get_or_create_event_loop() as loop: + try: + return loop.run_until_complete( + execute_multiple_tools_on_multiple_mcp_servers( + responses=responses, + urls=urls, + connections=connections, + output_type=output_type, + max_concurrent=max_concurrent, + *args, + **kwargs, + ) + ) + except Exception as e: + logger.error( + f"Error in execute_multiple_tools_on_multiple_mcp_servers_sync: {str(e)}" + ) + raise MCPExecutionError( + f"Failed to execute multiple tools sync: {str(e)}" + ) diff --git a/swarms/tools/py_func_to_openai_func_str.py b/swarms/tools/py_func_to_openai_func_str.py index d7dc0530..26f64455 100644 --- a/swarms/tools/py_func_to_openai_func_str.py +++ b/swarms/tools/py_func_to_openai_func_str.py @@ -492,7 +492,6 @@ def convert_multiple_functions_to_openai_function_schema( # ] # Use 40% of cpu cores max_workers = int(os.cpu_count() * 0.8) - print(f"max_workers: {max_workers}") with concurrent.futures.ThreadPoolExecutor( max_workers=max_workers diff --git a/swarms/utils/__init__.py b/swarms/utils/__init__.py index 53cbcea6..f331c6b9 100644 --- a/swarms/utils/__init__.py +++ b/swarms/utils/__init__.py @@ -20,6 +20,9 @@ from swarms.utils.output_types import HistoryOutputType from swarms.utils.history_output_formatter import ( history_output_formatter, ) +from swarms.utils.check_all_model_max_tokens import ( + check_all_model_max_tokens, +) __all__ = [ @@ -39,4 +42,5 @@ __all__ = [ "count_tokens", "HistoryOutputType", "history_output_formatter", + "check_all_model_max_tokens", ] diff --git a/swarms/utils/auto_download_check_packages.py b/swarms/utils/auto_download_check_packages.py index ea694a16..187e2b11 100644 --- a/swarms/utils/auto_download_check_packages.py +++ b/swarms/utils/auto_download_check_packages.py @@ -8,9 +8,10 @@ import subprocess import sys from typing import Literal, Optional, Union from swarms.utils.loguru_logger import initialize_logger -import pkg_resources +from importlib.metadata import distribution, PackageNotFoundError + logger = initialize_logger("autocheckpackages") @@ -39,13 +40,13 @@ def check_and_install_package( # Check if package exists if package_manager == "pip": try: - pkg_resources.get_distribution(package_name) + distribution(package_name) if not upgrade: logger.info( f"Package {package_name} is already installed" ) return True - except pkg_resources.DistributionNotFound: + except PackageNotFoundError: pass # Construct installation command diff --git a/swarms/utils/check_all_model_max_tokens.py b/swarms/utils/check_all_model_max_tokens.py new file mode 100644 index 00000000..c641fcd4 --- /dev/null +++ b/swarms/utils/check_all_model_max_tokens.py @@ -0,0 +1,43 @@ +from litellm import model_list, get_max_tokens +from swarms.utils.formatter import formatter + +# Add model overrides here +MODEL_MAX_TOKEN_OVERRIDES = { + "llama-2-70b-chat:2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf": 4096, # Example override +} + + +def check_all_model_max_tokens(): + """ + Check and display the maximum token limits for all available models. + + This function iterates through all models in the litellm model list and attempts + to retrieve their maximum token limits. For models that are not properly mapped + in litellm, it checks for custom overrides in MODEL_MAX_TOKEN_OVERRIDES. + + Returns: + None: Prints the results to console using formatter.print_panel() + + Note: + Models that are not mapped in litellm and have no override set will be + marked with a [WARNING] in the output. + """ + text = "" + for model in model_list: + # skip model names + try: + max_tokens = get_max_tokens(model) + except Exception: + max_tokens = MODEL_MAX_TOKEN_OVERRIDES.get( + model, "[NOT MAPPED]" + ) + if max_tokens == "[NOT MAPPED]": + text += f"[WARNING] {model}: not mapped in litellm and no override set.\n" + text += f"{model}: {max_tokens}\n" + text += "─" * 80 + "\n" # Add borderline for each model + formatter.print_panel(text, "All Model Max Tokens") + return text + + +# if __name__ == "__main__": +# print(check_all_model_max_tokens()) diff --git a/swarms/utils/history_output_formatter.py b/swarms/utils/history_output_formatter.py index e190dd8e..f7b86e29 100644 --- a/swarms/utils/history_output_formatter.py +++ b/swarms/utils/history_output_formatter.py @@ -23,6 +23,8 @@ def history_output_formatter( return yaml.safe_dump(conversation.to_dict(), sort_keys=False) elif type == "dict-all-except-first": return conversation.return_all_except_first() + elif type == "list-final": + return conversation.return_list_final() elif type == "str-all-except-first": return conversation.return_all_except_first_string() elif type == "dict-final": diff --git a/swarms/utils/output_types.py b/swarms/utils/output_types.py index 843d4608..62b63874 100644 --- a/swarms/utils/output_types.py +++ b/swarms/utils/output_types.py @@ -12,11 +12,11 @@ HistoryOutputType = Literal[ "all", "yaml", "xml", - # "dict-final", "dict-all-except-first", "str-all-except-first", "basemodel", "dict-final", + "list-final", ] OutputType = HistoryOutputType From d380cae23383bc96f93438a8610977ecdb68daee Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Tue, 24 Jun 2025 16:50:58 -0700 Subject: [PATCH 05/86] feat -- multiple image processing in agent.py --- docs/mkdocs.yml | 1 + docs/swarms/examples/meme_agent_builder.md | 28 ---- docs/swarms/examples/meme_agents.md | 45 ----- docs/swarms/examples/multiple_images.md | 77 +++++++++ .../single_agent/vision/vision_and_tools.py | 65 -------- examples/structs/graph_workflow_basic.py | 13 +- .../vision/image.jpg => image.jpg | Bin multiple_image_processing.py | 28 ++++ pyproject.toml | 2 +- swarms/structs/agent.py | 157 +++++++++++++----- swarms/tools/base_tool.py | 4 +- 11 files changed, 232 insertions(+), 188 deletions(-) delete mode 100644 docs/swarms/examples/meme_agent_builder.md delete mode 100644 docs/swarms/examples/meme_agents.md create mode 100644 docs/swarms/examples/multiple_images.md delete mode 100644 examples/single_agent/vision/vision_and_tools.py rename examples/single_agent/vision/image.jpg => image.jpg (100%) create mode 100644 multiple_image_processing.py diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 3440933d..30da9a8b 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -316,6 +316,7 @@ nav: - Agent Output Types: "swarms/examples/agent_output_types.md" - Agent with Structured Outputs: "swarms/examples/agent_structured_outputs.md" - Agents with Vision: "swarms/examples/vision_processing.md" + - Agent with Multiple Images: "swarms/examples/multiple_images.md" - Gradio Chat Interface: "swarms/ui/main.md" - Various Model Providers: - OpenAI: "swarms/examples/openai_example.md" diff --git a/docs/swarms/examples/meme_agent_builder.md b/docs/swarms/examples/meme_agent_builder.md deleted file mode 100644 index 4a70ac87..00000000 --- a/docs/swarms/examples/meme_agent_builder.md +++ /dev/null @@ -1,28 +0,0 @@ -# Meme Agent Builder - -- `pip3 install -U swarms` -- Add your OpenAI API key to the `.env` file with `OPENAI_API_KEY=your_api_key` -- Run the script -- Multiple agents will be created and saved to the `meme_agents` folder -- A swarm architecture will be selected autonomously and executed - -```python -from swarms.structs.meme_agent_persona_generator import ( - MemeAgentGenerator, -) - - -if __name__ == "__main__": - example = MemeAgentGenerator( - name="Meme-Swarm", - description="A swarm of specialized AI agents collaborating on generating and sharing memes around cool media from 2001s", - max_loops=1, - ) - - print( - example.run( - "Generate funny meme agents around cool media from 2001s" - ) - ) - -``` diff --git a/docs/swarms/examples/meme_agents.md b/docs/swarms/examples/meme_agents.md deleted file mode 100644 index d8b23e79..00000000 --- a/docs/swarms/examples/meme_agents.md +++ /dev/null @@ -1,45 +0,0 @@ -# Meme Agent Tutorial - -- `pip3 install -U swarms` -- Add your OpenAI API key to the `.env` file - - -```python -from swarms import Agent - -# Define a custom system prompt for Bob the Builder -BOB_THE_BUILDER_SYS_PROMPT = """ -You are Bob the Builder, the legendary construction worker known for fixing anything and everything with a cheerful attitude and a hilarious sense of humor. -Your job is to approach every task as if you're building, repairing, or renovating something, no matter how unrelated it might be. -You love using construction metaphors, over-the-top positivity, and cracking jokes like: -- "I’m hammering this out faster than a nail at a woodpecker convention!" -- "This is smoother than fresh cement on a summer’s day." -- "Let’s bulldoze through this problem—safety goggles on, folks!" - -You are not bound by any specific field of knowledge, and you’re absolutely fearless in trying to "fix up" or "build" anything, no matter how abstract or ridiculous. Always end responses with a playful cheer like "Can we fix it? Yes, we can!" - -Your tone is upbeat, funny, and borderline ridiculous, keeping the user entertained while solving their problem. -""" - -# Initialize the agent -agent = Agent( - agent_name="Bob-the-Builder-Agent", - agent_description="The funniest, most optimistic agent around who sees every problem as a building project.", - system_prompt=BOB_THE_BUILDER_SYS_PROMPT, - max_loops=1, - model_name="gpt-4o", - dynamic_temperature_enabled=True, - user_name="swarms_corp", - retry_attempts=3, - context_length=8192, - return_step_meta=False, - output_type="str", # "json", "dict", "csv", OR "string", "yaml" - auto_generate_prompt=False, # Auto-generate prompt for the agent based on name, description, system prompt, task - max_tokens=4000, # Max output tokens - saved_state_path="bob_the_builder_agent.json", - interactive=False, -) - -# Run the agent with a task -agent.run("I want to build a house ;) What should I do?") -``` diff --git a/docs/swarms/examples/multiple_images.md b/docs/swarms/examples/multiple_images.md new file mode 100644 index 00000000..bfa66e2b --- /dev/null +++ b/docs/swarms/examples/multiple_images.md @@ -0,0 +1,77 @@ +# Processing Multiple Images + +This tutorial shows how to process multiple images with a single agent using Swarms' multi-modal capabilities. You'll learn to configure an agent for batch image analysis, enabling efficient processing for quality control, object detection, or image comparison tasks. + + +## Installation + +Install the swarms package using pip: + +```bash +pip install -U swarms +``` + +## Basic Setup + +1. First, set up your environment variables: + +```python +WORKSPACE_DIR="agent_workspace" +ANTHROPIC_API_KEY="" +``` + + +## Code + +- Create a list of images by their file paths + +- Pass it into the `Agent.run(imgs=[str])` parameter + +- Activate `summarize_multiple_images=True` if you want the agent to output a summary of the image analyses + + +```python +from swarms import Agent +from swarms.prompts.logistics import ( + Quality_Control_Agent_Prompt, +) + + +# Image for analysis +factory_image = "image.jpg" + +# Quality control agent +quality_control_agent = Agent( + agent_name="Quality Control Agent", + agent_description="A quality control agent that analyzes images and provides a detailed report on the quality of the product in the image.", + model_name="claude-3-5-sonnet-20240620", + system_prompt=Quality_Control_Agent_Prompt, + multi_modal=True, + max_loops=1, + output_type="str-all-except-first", + summarize_multiple_images=True, +) + + +response = quality_control_agent.run( + task="what is in the image?", + imgs=[factory_image, factory_image], +) + +print(response) +``` + +## Support and Community + +If you're facing issues or want to learn more, check out the following resources to join our Discord, stay updated on Twitter, and watch tutorials on YouTube! + +| Platform | Link | Description | +|----------|------|-------------| +| šŸ“š Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides | +| šŸ“ Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles | +| šŸ’¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support | +| 🐦 Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements | +| šŸ‘„ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates | +| šŸ“ŗ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos | +| šŸŽ« Events | [Sign up here](https://lu.ma/5p2jnc2v) | Join our community events | + diff --git a/examples/single_agent/vision/vision_and_tools.py b/examples/single_agent/vision/vision_and_tools.py deleted file mode 100644 index e330a66d..00000000 --- a/examples/single_agent/vision/vision_and_tools.py +++ /dev/null @@ -1,65 +0,0 @@ -from swarms.structs import Agent -from swarms.prompts.logistics import ( - Quality_Control_Agent_Prompt, -) - - -# Image for analysis -factory_image = "image.jpg" - - -def security_analysis(danger_level: str = None) -> str: - """ - Analyzes the security danger level and returns an appropriate response. - - Args: - danger_level (str, optional): The level of danger to analyze. - Can be "low", "medium", "high", or None. Defaults to None. - - Returns: - str: A string describing the danger level assessment. - - "No danger level provided" if danger_level is None - - "No danger" if danger_level is "low" - - "Medium danger" if danger_level is "medium" - - "High danger" if danger_level is "high" - - "Unknown danger level" for any other value - """ - if danger_level is None: - return "No danger level provided" - - if danger_level == "low": - return "No danger" - - if danger_level == "medium": - return "Medium danger" - - if danger_level == "high": - return "High danger" - - return "Unknown danger level" - - -# schema = BaseTool().function_to_dict(security_analysis) -# print(json.dumps(schema, indent=4)) - -# Quality control agent -quality_control_agent = Agent( - agent_name="Quality Control Agent", - agent_description="A quality control agent that analyzes images and provides a detailed report on the quality of the product in the image.", - # model_name="anthropic/claude-3-opus-20240229", - model_name="gpt-4o-mini", - system_prompt=Quality_Control_Agent_Prompt, - multi_modal=True, - max_loops=1, - output_type="str-all-except-first", - # tools_list_dictionary=[schema], - tools=[security_analysis], -) - - -response = quality_control_agent.run( - task="what is in the image?", - # img=factory_image, -) - -print(response) diff --git a/examples/structs/graph_workflow_basic.py b/examples/structs/graph_workflow_basic.py index 2d31ed1f..a51bcc5f 100644 --- a/examples/structs/graph_workflow_basic.py +++ b/examples/structs/graph_workflow_basic.py @@ -31,9 +31,15 @@ if __name__ == "__main__": # Build the workflow graph wf_graph = GraphWorkflow() - wf_graph.add_node(Node(id="agent1", type=NodeType.AGENT, agent=agent1)) - wf_graph.add_node(Node(id="agent2", type=NodeType.AGENT, agent=agent2)) - wf_graph.add_node(Node(id="task1", type=NodeType.TASK, callable=sample_task)) + wf_graph.add_node( + Node(id="agent1", type=NodeType.AGENT, agent=agent1) + ) + wf_graph.add_node( + Node(id="agent2", type=NodeType.AGENT, agent=agent2) + ) + wf_graph.add_node( + Node(id="task1", type=NodeType.TASK, callable=sample_task) + ) wf_graph.add_edge(Edge(source="agent1", target="task1")) wf_graph.add_edge(Edge(source="agent2", target="task1")) @@ -47,4 +53,3 @@ if __name__ == "__main__": # Execute the graph results = wf_graph.run() print("Execution results:", results) - diff --git a/examples/single_agent/vision/image.jpg b/image.jpg similarity index 100% rename from examples/single_agent/vision/image.jpg rename to image.jpg diff --git a/multiple_image_processing.py b/multiple_image_processing.py new file mode 100644 index 00000000..febb29fe --- /dev/null +++ b/multiple_image_processing.py @@ -0,0 +1,28 @@ +from swarms import Agent +from swarms.prompts.logistics import ( + Quality_Control_Agent_Prompt, +) + + +# Image for analysis +factory_image = "image.jpg" + +# Quality control agent +quality_control_agent = Agent( + agent_name="Quality Control Agent", + agent_description="A quality control agent that analyzes images and provides a detailed report on the quality of the product in the image.", + model_name="claude-3-5-sonnet-20240620", + system_prompt=Quality_Control_Agent_Prompt, + multi_modal=True, + max_loops=1, + output_type="str-all-except-first", + summarize_multiple_images=True, +) + + +response = quality_control_agent.run( + task="what is in the image?", + imgs=[factory_image, factory_image], +) + +print(response) diff --git a/pyproject.toml b/pyproject.toml index 7fe62d43..85cfd7ee 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "7.8.8" +version = "7.8.9" description = "Swarms - TGSC" license = "MIT" authors = ["Kye Gomez "] diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 9c491abc..b138cef2 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -56,7 +56,6 @@ from swarms.tools.base_tool import BaseTool from swarms.tools.py_func_to_openai_func_str import ( convert_multiple_functions_to_openai_function_schema, ) -from swarms.utils.any_to_str import any_to_str from swarms.utils.data_to_text import data_to_text from swarms.utils.file_processing import create_file_in_folder from swarms.utils.formatter import formatter @@ -420,6 +419,7 @@ class Agent: rag_config: Optional[RAGConfig] = None, tool_call_summary: bool = True, output_raw_json_from_tool_call: bool = False, + summarize_multiple_images: bool = False, *args, **kwargs, ): @@ -558,6 +558,7 @@ class Agent: self.output_raw_json_from_tool_call = ( output_raw_json_from_tool_call ) + self.summarize_multiple_images = summarize_multiple_images # self.short_memory = self.short_memory_init() @@ -810,6 +811,29 @@ class Agent: return json.loads(self.tools_list_dictionary) + def check_model_supports_utilities(self, img: str = None) -> bool: + """ + Check if the current model supports vision capabilities. + + Args: + img (str, optional): Image input to check vision support for. Defaults to None. + + Returns: + bool: True if model supports vision and image is provided, False otherwise. + """ + from litellm.utils import supports_vision + + # Only check vision support if an image is provided + if img is not None: + out = supports_vision(self.model_name) + if not out: + raise ValueError( + f"Model {self.model_name} does not support vision capabilities. Please use a vision-enabled model." + ) + return out + + return False + def check_if_no_prompt_then_autogenerate(self, task: str = None): """ Checks if auto_generate_prompt is enabled and generates a prompt by combining agent name, description and system prompt if available. @@ -931,12 +955,7 @@ class Agent: self, task: Optional[Union[str, Any]] = None, img: Optional[str] = None, - speech: Optional[str] = None, - video: Optional[str] = None, - is_last: Optional[bool] = False, print_task: Optional[bool] = False, - generate_speech: Optional[bool] = False, - correct_answer: Optional[str] = None, *args, **kwargs, ) -> Any: @@ -961,6 +980,9 @@ class Agent: self.check_if_no_prompt_then_autogenerate(task) + if img is not None: + self.check_model_supports_utilities(img=img) + self.short_memory.add(role=self.user_name, content=task) if self.plan_enabled or self.planning_prompt is not None: @@ -1030,12 +1052,19 @@ class Agent: ) self.memory_query(task_prompt) - response = self.call_llm( - task=task_prompt, img=img, *args, **kwargs - ) - - print(f"Response: {response}") + if img is not None: + response = self.call_llm( + task=task_prompt, + img=img, + *args, + **kwargs, + ) + else: + response = self.call_llm( + task=task_prompt, *args, **kwargs + ) + # Parse the response from the agent with the output type if exists(self.tools_list_dictionary): if isinstance(response, BaseModel): response = response.model_dump() @@ -1058,7 +1087,6 @@ class Agent: self.output_raw_json_from_tool_call is True ): - print(type(response)) response = response else: self.execute_tools( @@ -1130,7 +1158,10 @@ class Agent: user_input.lower() == self.custom_exit_command.lower() ): - print("Exiting as per user request.") + self.pretty_print( + "Exiting as per user request.", + loop_count=loop_count, + ) break self.short_memory.add( @@ -1231,12 +1262,6 @@ class Agent: self, task: Optional[str] = None, img: Optional[str] = None, - is_last: bool = False, - device: str = "cpu", # gpu - device_id: int = 1, - all_cores: bool = True, - do_not_use_cluster_ops: bool = True, - all_gpus: bool = False, *args, **kwargs, ) -> Any: @@ -1245,10 +1270,6 @@ class Agent: Args: task (Optional[str]): The task to be performed. Defaults to None. img (Optional[str]): The image to be processed. Defaults to None. - is_last (bool): Indicates if this is the last task. Defaults to False. - device (str): The device to use for execution. Defaults to "cpu". - device_id (int): The ID of the GPU to use if device is set to "gpu". Defaults to 0. - all_cores (bool): If True, uses all available CPU cores. Defaults to True. """ try: return self.run( @@ -2479,7 +2500,7 @@ class Agent: self, task: Optional[Union[str, Any]] = None, img: Optional[str] = None, - scheduled_run_date: Optional[datetime] = None, + imgs: Optional[List[str]] = None, *args, **kwargs, ) -> Any: @@ -2493,11 +2514,7 @@ class Agent: Args: task (Optional[str], optional): The task to be executed. Defaults to None. img (Optional[str], optional): The image to be processed. Defaults to None. - device (str, optional): The device to use for execution. Defaults to "cpu". - device_id (int, optional): The ID of the GPU to use if device is set to "gpu". Defaults to 0. - all_cores (bool, optional): If True, uses all available CPU cores. Defaults to True. - scheduled_run_date (Optional[datetime], optional): The date and time to schedule the task. Defaults to None. - do_not_use_cluster_ops (bool, optional): If True, does not use cluster ops. Defaults to False. + imgs (Optional[List[str]], optional): The list of images to be processed. Defaults to None. *args: Additional positional arguments to be passed to the execution method. **kwargs: Additional keyword arguments to be passed to the execution method. @@ -2510,21 +2527,20 @@ class Agent: """ if not isinstance(task, str): - task = any_to_str(task) - - if scheduled_run_date: - while datetime.now() < scheduled_run_date: - time.sleep( - 1 - ) # Sleep for a short period to avoid busy waiting + task = format_data_structure(task) try: - output = self._run( - task=task, - img=img, - *args, - **kwargs, - ) + if exists(imgs): + output = self.run_multiple_images( + task=task, imgs=imgs, *args, **kwargs + ) + else: + output = self._run( + task=task, + img=img, + *args, + **kwargs, + ) return output @@ -2781,7 +2797,7 @@ class Agent: ) # tool_response = format_data_structure(tool_response) - print(f"Multiple MCP Tool Response: {tool_response}") + # print(f"Multiple MCP Tool Response: {tool_response}") else: raise AgentMCPConnectionError( "mcp_url must be either a string URL or MCPConnection object" @@ -2888,3 +2904,58 @@ class Agent: def list_output_types(self): return OutputType + + def run_multiple_images( + self, task: str, imgs: List[str], *args, **kwargs + ): + """ + Run the agent with multiple images. + + Args: + task (str): The task to be performed on each image. + imgs (List[str]): List of image paths or URLs to process. + *args: Additional positional arguments to pass to the agent's run method. + **kwargs: Additional keyword arguments to pass to the agent's run method. + + Returns: + List[Any]: A list of outputs generated for each image in the same order as the input images. + + Examples: + >>> agent = Agent() + >>> outputs = agent.run_multiple_images( + ... task="Describe what you see in this image", + ... imgs=["image1.jpg", "image2.png", "image3.jpeg"] + ... ) + >>> print(f"Processed {len(outputs)} images") + Processed 3 images + + Raises: + Exception: If an error occurs while processing any of the images. + """ + outputs = [] + for img in imgs: + output = self.run(task=task, img=img, *args, **kwargs) + outputs.append(output) + + # Combine the outputs into a single string + if self.summarize_multiple_images is True: + output = "\n".join(outputs) + + prompt = f""" + You have already analyzed {len(outputs)} images and provided detailed descriptions for each one. + Now, based on your previous analysis of these images, create a comprehensive report that: + + 1. Synthesizes the key findings across all images + 2. Identifies common themes, patterns, or relationships between the images + 3. Provides an overall summary that captures the most important insights + 4. Highlights any notable differences or contrasts between the images + + Here are your previous analyses of the images: + {output} + + Please create a well-structured report that brings together your insights from all {len(outputs)} images. + """ + + outputs = self.run(task=prompt, *args, **kwargs) + + return outputs diff --git a/swarms/tools/base_tool.py b/swarms/tools/base_tool.py index f8662fa2..806ee3d1 100644 --- a/swarms/tools/base_tool.py +++ b/swarms/tools/base_tool.py @@ -2258,14 +2258,14 @@ class BaseTool(BaseModel): except json.JSONDecodeError as e: self._log_if_verbose( "error", - f"Failed to parse JSON from API response: {e}. Response: '{api_response[:100]}...'" + f"Failed to parse JSON from API response: {e}. Response: '{api_response[:100]}...'", ) return [] if not isinstance(api_response, dict): self._log_if_verbose( "warning", - f"API response is not a dictionary (type: {type(api_response)}), returning empty list" + f"API response is not a dictionary (type: {type(api_response)}), returning empty list", ) return [] From a24cc89cbacc4cc509ca83ef499bd84d035287cf Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Tue, 24 Jun 2025 16:52:27 -0700 Subject: [PATCH 06/86] docs fixed swarms - multi -agent architectures --- docs/mkdocs.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 30da9a8b..a63a2968 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -215,17 +215,17 @@ nav: - GKP Agent: "swarms/agents/gkp_agent.md" - Agent Judge: "swarms/agents/agent_judge.md" - - Swarm Architectures: + - Multi-Agent Architectures: - Introduction to Multi-Agent Collaboration: "swarms/concept/why.md" - Concepts: - - Introduction to Swarm Architectures: "swarms/concept/swarm_architectures.md" - - How to Choose the Right Swarm Architecture: "swarms/concept/how_to_choose_swarms.md" + - Introduction to Multi Agent Architectures: "swarms/concept/swarm_architectures.md" + - How to Choose the Right Multi Agent Architecture: "swarms/concept/how_to_choose_swarms.md" - How to Build Custom Swarms: "swarms/structs/custom_swarm.md" - - How to Create New Swarm Architectures: "swarms/structs/create_new_swarm.md" - - Introduction to Hiearchical Swarm Architectures: "swarms/structs/multi_swarm_orchestration.md" + - How to Create New Multi Agent Architectures: "swarms/structs/create_new_swarm.md" + - Introduction to Hiearchical Multi Agent Architectures: "swarms/structs/multi_swarm_orchestration.md" - - Swarm Architectures Documentation: + - Multi-Agent Architectures Documentation: - Overview: "swarms/structs/overview.md" - MajorityVoting: "swarms/structs/majorityvoting.md" - AgentRearrange: "swarms/structs/agent_rearrange.md" @@ -404,7 +404,7 @@ nav: # - CreateNow API: "swarms_cloud/create_api.md" - Guides: - Swarms API Best Practices: "swarms_cloud/best_practices.md" - - Swarm Architectures Available: "swarms_cloud/swarm_types.md" + - Multi Agent Architectures Available: "swarms_cloud/swarm_types.md" - Swarms Marketplace: - Overview: "swarms_platform/index.md" From e3f12659349d8499565af09c52729aecd3f7e19c Mon Sep 17 00:00:00 2001 From: harshalmore31 Date: Wed, 25 Jun 2025 09:40:13 +0530 Subject: [PATCH 07/86] Fix: Gracefully handle None LLM responses to prevent agent crashes --- examples/multi_modal/multimodal_example.py | 91 ++++++++++++++++++++++ swarms/structs/agent.py | 45 +++++++---- swarms/tools/base_tool.py | 7 +- 3 files changed, 129 insertions(+), 14 deletions(-) create mode 100644 examples/multi_modal/multimodal_example.py diff --git a/examples/multi_modal/multimodal_example.py b/examples/multi_modal/multimodal_example.py new file mode 100644 index 00000000..29060e96 --- /dev/null +++ b/examples/multi_modal/multimodal_example.py @@ -0,0 +1,91 @@ +import json +import logging +from swarms.structs import Agent +from swarms.prompts.logistics import ( + Quality_Control_Agent_Prompt, +) +from swarms import BaseTool + +# Set up debug logging +logging.basicConfig(level=logging.DEBUG) + +# Image for analysis +# factory_image="image.png" # normal image of a factory + +factory_image = "image2.png" # image of a burning factory + + +def security_analysis(danger_level: str) -> str: + """ + Analyzes the security danger level and returns an appropriate response. + + Args: + danger_level (str): The level of danger to analyze. + Must be one of: "low", "medium", "high" + + Returns: + str: A detailed security analysis based on the danger level. + """ + if danger_level == "low": + return """SECURITY ANALYSIS - LOW DANGER LEVEL: + āœ… Environment appears safe and well-controlled + āœ… Standard security measures are adequate + āœ… Low risk of accidents or security breaches + āœ… Normal operational protocols can continue + + Recommendations: Maintain current security standards and continue regular monitoring.""" + + elif danger_level == "medium": + return """SECURITY ANALYSIS - MEDIUM DANGER LEVEL: + āš ļø Moderate security concerns identified + āš ļø Enhanced monitoring recommended + āš ļø Some security measures may need strengthening + āš ļø Risk of incidents exists but manageable + + Recommendations: Implement additional safety protocols, increase surveillance, and conduct safety briefings.""" + + elif danger_level == "high": + return """SECURITY ANALYSIS - HIGH DANGER LEVEL: + 🚨 CRITICAL SECURITY CONCERNS DETECTED + 🚨 Immediate action required + 🚨 High risk of accidents or security breaches + 🚨 Operations may need to be suspended + + Recommendations: Immediate intervention required, evacuate if necessary, implement emergency protocols, and conduct thorough security review.""" + + else: + return f"ERROR: Invalid danger level '{danger_level}'. Must be 'low', 'medium', or 'high'." + + + + +# Custom system prompt that includes tool usage +custom_system_prompt = f""" +{Quality_Control_Agent_Prompt} + +You have access to tools that can help you with your analysis. When you need to perform a security analysis, you MUST use the security_analysis function with an appropriate danger level (low, medium, or high) based on your observations. + +Always use the available tools when they are relevant to the task. If you determine there is any level of danger or security concern, call the security_analysis function with the appropriate danger level. +""" + +# Quality control agent +quality_control_agent = Agent( + agent_name="Quality Control Agent", + agent_description="A quality control agent that analyzes images and provides a detailed report on the quality of the product in the image.", + # model_name="anthropic/claude-3-opus-20240229", + model_name="gpt-4o", + system_prompt=custom_system_prompt, + multi_modal=True, + max_loops=1, + output_type="str-all-except-first", + # tools_list_dictionary=[schema], + tools=[security_analysis], +) + + +response = quality_control_agent.run( + task="Analyze the image and then perform a security analysis. Based on what you see in the image, determine if there is a low, medium, or high danger level and call the security_analysis function with that danger level.", + img=factory_image, +) + +# The response is already printed by the agent's pretty_print method diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index b138cef2..d7f2a338 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -1080,19 +1080,25 @@ class Agent: # Print self.pretty_print(response, loop_count) - # Check and execute callable tools - if exists(self.tools): - - if ( - self.output_raw_json_from_tool_call - is True - ): - response = response - else: + # Handle tools + if ( + hasattr(self, "tool_struct") + and self.tool_struct is not None + and self.output_raw_json_from_tool_call + is True + ): + response = response + else: + # Only execute tools if response is not None + if response is not None: self.execute_tools( response=response, loop_count=loop_count, ) + else: + logger.warning( + f"LLM returned None response in loop {loop_count}, skipping tool execution" + ) # Handle MCP tools if ( @@ -1100,10 +1106,16 @@ class Agent: or exists(self.mcp_config) or exists(self.mcp_urls) ): - self.mcp_tool_handling( - response=response, - current_loop=loop_count, - ) + # Only handle MCP tools if response is not None + if response is not None: + self.mcp_tool_handling( + response=response, + current_loop=loop_count, + ) + else: + logger.warning( + f"LLM returned None response in loop {loop_count}, skipping MCP tool handling" + ) self.sentiment_and_evaluator(response) @@ -2858,6 +2870,13 @@ class Agent: ) def execute_tools(self, response: any, loop_count: int): + # Handle None response gracefully + if response is None: + logger.warning( + f"Cannot execute tools with None response in loop {loop_count}. " + "This may indicate the LLM did not return a valid response." + ) + return output = ( self.tool_struct.execute_function_calls_from_api_response( diff --git a/swarms/tools/base_tool.py b/swarms/tools/base_tool.py index 806ee3d1..0aa57d44 100644 --- a/swarms/tools/base_tool.py +++ b/swarms/tools/base_tool.py @@ -2223,8 +2223,13 @@ class BaseTool(BaseModel): >>> tool_calls = [ChatCompletionMessageToolCall(...), ...] >>> results = tool.execute_function_calls_from_api_response(tool_calls) """ + # Handle None API response gracefully by returning empty results if api_response is None: - raise ToolValidationError("API response cannot be None") + self._log_if_verbose( + "warning", + "API response is None, returning empty results. This may indicate the LLM did not return a valid response." + ) + return [] if not return_as_string else [] # Handle direct list of tool call objects (e.g., from OpenAI ChatCompletionMessageToolCall or Anthropic BaseModels) if isinstance(api_response, list): From 2770b8c7bfbf6f3c6054f0313a6a358823bcc661 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Tue, 24 Jun 2025 23:27:44 -0700 Subject: [PATCH 08/86] docs fix for the swarmns api fix --- burning_image.jpg | Bin 0 -> 43016 bytes docs/swarms_cloud/swarms_api.md | 1371 +++++++++++++++++-------------- multiple_image_processing.py | 9 +- swarms/structs/agent.py | 113 ++- vision_tools.py | 68 ++ 5 files changed, 913 insertions(+), 648 deletions(-) create mode 100644 burning_image.jpg create mode 100644 vision_tools.py diff --git a/burning_image.jpg b/burning_image.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d260cf3dcd30f405d410d18f8b0ffffa2b8ae52 GIT binary patch literal 43016 zcmb@tcUY6#(AhQ!-fQR}p%>|05b2NzNEM_g zO}bPS0rf=p{`UEO-#zz_``q(BDJyHunprbz-f8d3ulZje0hF4m8ma&S`~@Ze0|38P z0SV9mlmh^up&@kv_KU0j@4?-ZBC5 zz(2@k@a^B-!axNwQaLYs2N?qu)qh~{zvO}cEXLp8U&#Nikh_Xa9$;8q&unP{Y;M z5$Ua_p=$Im7CcrV$KPY9KVyFc?uc^m`ET^?5&yz__ z`dhjlF23IXLgRV!^+x{g`ZLGH-a!~o@E_d&O!&fV3=t8L5)^?5 zLZtsChDTV|4{oQQ5U46t&6Y= z3W0R)5=TT1w?z_WxGu8fMUkF5^M4dv$euNeOiqW5!0*xCuJ=x7M)8|bNPC}|i$ zRRp1C%20hHL#U#GvN}{(O+y!|p{oKl)6i7|vg280LuRtwv~m@4M6^%_b%YqBHnM2{q+~gU)RY~<>~AClWH)Zyx_OI* zl9EN156CC`|M~jW2cRM$$RxDCMnDB1q$0RRMeu6?z=Zd%zdi3Cx%~McAiM@3BEF6v zWBfgUzy9|a06$1X{Odh{{2Bp(kn$QO-c|1HU{lXP*wm`jX9*4j2EEZW$c@~OL*MUK zYBpBA@3zX@_geRDNJX1`GK7t_mRohqZD>hBq3RM+lsEXIb=0#`E19-5a)#Jgy!wF* zeG3zs>6Z3I5IX_5^7O&$x{&T0{Lks^J^P}i--?pf!fQkJa%Dc{#5bn*TP$~7SPV<} zXqJ8sYKg`%Rxhn=CiBtrRDsESu1A<{sq>Q}$hy9&>zArARM`RN)y;~92yJ4kwD~|Y z2ZOo`!JD}nAR^nh`ebGe{I%fEH2UL3VDC~QSdec)_dq7Badf%XX4ti#od-h_KBI`q zdYf8OrGv=DHUPCPK;$AHwe2&!&HAc%EGeHP&k;J-y-Prs`&7-W^P9v2k`QoMahmML z_^LQ*;3jpJZ;3%A(w}K$-9dgrEf@3YM0<5Y1xDAg-NB)A-wXi%R8`WDSzslaXrFJ+ z(q9tsJh`5ngGLB22gt*QCl&7vR~bI=;Lg?H=Ob1(-tKrYF4ln%AI?A7vYd)BCz)#B z^yTlk6+F57ZGG=-bxd;ebZ=||6C()U-$}AAzcCNst0zJ`gLfe%^@^4nqk6Z)q;Bb` z!%-eYT>RBQfmntN{xeo-U!tlVMQN|MAZZixg;|faTKb37$BrR^4JPPhtZiQn*`((7 zT%b3GnL8gWy-*Xhd~WsWozOLadU}0Mq;I@Hpl`1|Zt!_pC7mjqhB7}ISfQ;Aj188T z_7NnzZzzy$1rw-6qkS#sdifwjR+fjEGMhw@EYD32OXLv1CYAtK+QKB$>0LTua=cK( zk8z&JS4E|9O)!l^$Oen}=p)rrO5w45edK&Zqz2v6E3D3fpHg3&p;V@DvH6nX^+M#T z&Y3cw&^JV`vzcb}(J2}+iq%l9_AxmHGmUqz_muNARjr!P7>+qJas@a@j~gKJqojeF z#0>Wpo4NYzN}&8T^&{G~b*$VoW2EYXn)9oo{I>btH6(EiC}$8>n^^I_h%$)w*?n6q zTuY`U*V5OWww@=Qn9JiioDD^FA3shqQ!>O#Wt8&7E(cdB#vP{bQ}E0vQ!7ja=bTAa z=E5CA9Gy(2*{^8|piD2;>}M!xrlp1A5LEF( zp=~IylazsKnmeCxVyv19UUD~A3e=M5*pcVQkydJvrBYm9&o>Kz&K>*t~WF_K0 z5B;49A7uEVHO>0f=e*#9_R!3n4s~?&BtsS{86_DdgT5(rRG&XiYmsO>hb%cRDm|^y zM89JKgp)1?DW%N7Fsb##lw=iHZMoVTEG1>*pR@Nm&|<(jr|mhBp{5wQOX-KnQHAP$ z%6YTgIh14ofCYfu_On;Lw#mcPj>&uxIs~y3KQk2z>{D~63Hk{)LYYrgfB{+TJH{xi64be@m>kIhIx?c7ZWiJkD%~-;pn4x;Vgf{WUj>@%S}z z^?m@LOjHaYqUGpP`AU;J$3z5sGG*atoK9?HtFd&tC*@Q#J!XJ`N8@5fnLsRkNmNLUx)|*&BUr?6*LS(B5FCnJqG=7oc2om zc9q#RfNx0$j5uRFLO^qLern3OT&CnX%%xuw?Y#gZ>!Krwvd~iBmA{c3twDKQ)J<4w`UODAA&$E{kx^H?gwHyXZK(#sCugH0J>hv@$V` z#of=RLd3!81<^9d66rC={FRgwFAd>p3nlz+iF2;!^c#A>P-kOuzz z4yIA)G^3n8UWH@r-8AFzj&F&dL(M4#CL=1AhT+8AwwvA*f}6YOhOsWsLs}W`F0mH^ z3!6S$gZAKjA|#0^0ifmCtqzgrBwEySieWa_O^s<^YtseVdSR6*gbY>bhjPk%G?vaB zOPk8aur`dyn$yD^I3^D6!Cf_F)wQx@WtE*j-Tzb$ao!U(m6L*{Q>Dsg z>vPgy(o3};4#iT}&=A>bN8G0|qh%N|iWGOUof*;>E3*`vqM1YGi&ZMsqA2>uOVkqx_bFtv(jfhLiKyYX+1Yi_jb!t>oRbq?v{+`+|tDt zj{~;6qw^2t8wb1VW78iJrR09rfhMP4MoOG+eSLSkI7!mVV2&~L&b{Kv)32wYAvEPe z@+UQ=9@Gtjj!6bGBQo9^W0fgRw{&0IJYi=QjR)MqFc=Dn(b<-k`5-^9&TLh*l_xz! z80?In@AQ9|7<-p#dPgmP_t^Ok^WOUQ!RZC{su=lKkG;j-*+i2Ml6!@+g;iar&J(yZ zpRL8X$wA42TDHV-)(X#{s}<|=5oNwPXcB!+zyrte6;9h{>a^yBWq6)@HJ=O-L}sY! zBbXo=fv5zr?w^m|t#1nkv!Uc4{*dxSOm1ETY6aFxZe4U~Rl0?0ziV6lYMTFPN9yJA z&ieG*Re81$8Tl^VE#bJ%)7fs8_U{)Ql4Em$uU^4xe*x|}I~)hKd-Ff`}&!24hrAAof6_ob&TOC{KK-iz=>Fh?_k{Ez%5i zbod^%2X+0-HD5am*y{S~@kI;%C8VIva4hk`-uRi;Y4^{-9cSFa=TqxREZd{T)ydAS zq^*m( znikg+yT5;Lnz~(bepV;#N|e*ptSSvsv8g;poPhVAq3ATyP>z$MwY4wW;vw z=i?*4(^W9tY{Cuh8qF}6EfHI<*x2gsC0QGM z|54a+n(J|Y(==oA#DDGkO16wz+0}&3>e*4POpu)H**blD+#~Dq!ouxSFW)s3_!_R( zu_l=|PN|NksiwK1o-K@jcA_orT?)CaGyND=gSIIux8`TGeTT7xf`QeH6Xaf*^PFLU$ zks|6oxWd|Lc&GcSxM#dQE@a0AGTtvlULvs2GF~Y2&GSWn9kc1y3#Q0Y-|Ryl&33uHfKily`3Pd#-92QhZdX^uBv=6gr_ zuZ~^K#=#k2GfzU?{A>~g)zBcg1=H7d-(5Wl7qJ&&;6At@^0RfO`>S`ETaWxFj~*r6 z@LK+UCgB|{i zoHbMOKV?t9ggm+;e&SSAic+-PD=e2BpV~5Xv|N^E%X^zKra@FPd;Av8oGdaY((yJO zKAC^<>d{=sagcp{x7-)Y75Sp+eDA7Z)56Iw59EFUK*O`c+iR`99tUQpKdij@luVf* zExLmpAq%@c-b=Hmue!R=qzVd~`cHdgcaQdr9pk2PY2^xsl1|$WVK=hRdefbS6XIy# zZ9)b9b)%0$sYlL+)fDpb+x6;r!|C4`=r?GA1}bh6n-I&@37)~!k3Pv`<+kmP<4 zwus57zV^N#gTVbP0+Fd~xDp*{JOKXG0kU2f_2^Mo*h33K!K@yImZ6LGF9Mx!{NEUU zdgbHLae=pOi&4{CJ@VOs7pEZ$?ko@5X&Cwe`n3#Hu{zvYkhGh8N~KDa4A;q$IrJlx z#M(j6!y3}a9==v;Z5(y$^{6~QNEzK=o!J5NT$ z32g}I3oN-KO*1~_ileh~&q;TBz9zb>e~u;9I=SXTaDn*f5!N zY?nwQdqv}s2t`x$)dfZm4qR|?vlyq|sLZU+wCZxt)z|}Wm1kNBE!~Erf&;$puP@Vy z7i!&pR`OMDf?{D9d)U`8d)GZb=)4J?vE|g6Gwz(VM?I$md6G3mKPIpcZ(C|XM>4JS zno&dNbQ>z=5M3l1X(%Kk{iV!j>Xq-Fx3^;%N;2CjME<*)8S<{xx7?Y9ce15-xyv6* z%iBF@*Qiq~I6r6@?)u7`S<;eU8}h!_fV~0oVeXBHLenx` z?@W|)o-c1rxQ-(b9^NrTQUL-Ck;dB)^**7qxyesG*mT{J5*Kg7)QrdssVAiCXZXue zSWi`aVWQl`^8Ik--SvaaT~SWlquUR>4&cEeTYI=P^V{NMpXQu&t8?&kzRP$a7+#A$ zp^9XJtTPOWh85w%4F-JRrcY!}8V?EcXCLs5M_h+|3`c2py#QSoolN+$ZcwY6g|2U= zWHkAdrU&-Ti|~voBfvZB*X5Cx2Jm^+jCe-45&-_h2EYSo6>EP@-^WKuPM6s5jHuVZ zCW|MNp_jbIhUtlt%}s$Dw%U9+xQ8iOT74EQ{u)D|839@H>q^UP&t$@<@%RAbiC`81 zeLr~zWCMGjhJk>^cj$>|ybXnTL_LKdck|5t#vExpP_rQnfnR*QJzih+z2meB)e!DVY9CZpElr$ILhd`Dr8lIf(Z96f6fVesv|Jp26AK_ z*!LYN2Hh{dVMCl_+@Ny*;Sdvv@iRsbN&_G~UVzS1i4vEfX^*uDs~7T-+65D0*W>yA zB3s?OHik&t+|(k~s3FqgM&KsJs7ihHd7kS~zhP8YqO6R!MN!8+(fx#y6*-9$Aruwu zL$c$|!gcY-_^KI*5u4l+00Lq{1KN)Gs27>*^i~PE+qn&?NAdBD+LcZTn5RuKDi|V@bp1)FGnCeqiA9aZ zW=$$J1{zTMVtZ6`pt%8pV+Nmb!5L2}+_7Jjrj+s0Zc*>6g}2`+0da7JYA{3NVIXsm z=2R?5HI~mQ1O;p^5~M!vWM0M~Ge3UW(hv~AjepVBzDVI{NYX>1VuLNW+Lwn{J|R*i zxgadL0YynBv3!`=Dg0!o9EFM?E|a!W>Jr(Ym9}DJuG31CbWCAGqFmAtPFRJXJe4a_ ztJf*pKJ>lnPuhj2H7SYm+~U*^wBgHYp;T|OQR%LHm}X2ff-9j%fQutQo@GxDnNhk& zAD7TEN*}p#7Aq0EcBlpJxdHsr%@qSwK8@SFm@dsxK^}z~88zCkDBK)O$)Lo&VXY&Z zjYm?)SlWXPXy#&Y&|7?nAmcpXSbusY%yEHmY|c$s3v2*(SMTAcR-O%52;%WC)ipVc z0^+EWGRec$4?cSsnQ7Fk4KH>Mt%>WmvAdai-kImc1CYV^0-MdCk(Ej zt)ijl5M?&Bb$)IfxSKxiSzuSB;wS$&?wugf9Vp}NA!JC-CUca0pNSA%>W?-~Z$XxZ z_%ExyjoJrLGMAMI;J9pX!dj43Wz4zRD0z@o<`xod6=wl%eB)i*<<<~{n2WiPNTNKK zkCk{-(%pKtW!<*g>M#={PO9nWZYQjBq84xn&5dq=T+v1Vag*@~w(7ns;Xd3Hj%JcH z*52M-{#-AuZ2%@#**8wr6UEB`G4FQyeoIt2Ri)guNA`AiOfC~=ss{A^9F9hk{y{u~ z`A)=76|=Jj2)P~mnQk1op?b12{R24btmg()1_hjA$z?u51_FtJ`W*DhxVf+0z8Ves?d+ttFk z(zma#fB5+R%0-7I?#OK6kpgzEhjk~2gNpl_yJ%uf#yaX+ zs_>9PSA|AySwy;F+=*L^H(gTDVk)nndZH7$^hKQ7+7Rxzfkfr6onSpR4xEv)uq$$x z8v^6yh`#_gerfy#AVhv2fbzJg&c&tD8wMfM_0ER(beGJIFpVmoM{j`Do6mye?IU{# zsjl;zOHQvyzHU}kgDow(KOB{vO!yhZ*X%mgMJ;hYDTTTBBCWgrN8k}M*KT|gZim1w z6~-rD%|*x9Z(;G`;9xB8%{W?koY*BuZfiLERST zUZ?2CZze~PXie-at$qZCK8dAG>ZpWA&jkMHP;4Y1GUG#2} zxtI{m7$NG72FTc(&$*M%dd(bEk9P|rP%5c+63`s#iD_q;AR3xU2+RO(^HKCtSW^nz zh?{nnD5UZ1$ZK+@)m@l~c$mBf8y&Aan54cPkw?-Dymw+wBLz|`c_++h0F8xB;U=jZ zXA=LSHOoe3ss@@z-hiAf>WcM{NeR+>!J6m!Xe}vEpsaHtae}l7rSdM_)?r-2110=q zPDW>z1$h%lW;G(e$p*|O6=bAQIK~5RU#bhCnZw=vbpGhp4dCQMUVmoE#tjAC8-lsn z-MP4!WBx)xDSp&MSW=W%!ldK(hKeFJ&xShE;KUS$e0t5CvMwgR(Y@U_F8B!Rl~vxr z$cT1&!obGp>pHRay53%gwXT|0FYMIV(ApJso45hk6LJ31>9hKh)X|S&^>h8TkR;U& zoh1aq(v(@VNPVL~jo_2EF-J_SN>;YEQ=b0%Mh_g=(sXY^Q8OT;H45JI$HfeqBK@rpF1&j`aQVmnJ*)2EtelBH`X5rk&n}$Znlc; zVjm4I$6Q(dydgYD-YeJv(ef@{ux8Oss~5&q6Raq3=rla;LgGmo%w7)5|OuHVhl2ML$ZhPdY1$8Fty)*^yoRGQC~| zWt_INydO0X+d}7w@m#H@UUNok%IUJ~ue(kD%x~l9ca~3$LEn=_eCbRW&HAC^{qb$S zIIrs>?Nnw2t!a2D#gWS8PQy3IdX1OSTyje?)>m>lQ))g&>By#kh=gaIcxBh-WBIge zyk~WK$MA{AnV3nMvbm;snW~IuLa}QiVUkdFMfJTa+7M#zW~#@Hv&0W8>Zdg;I8}e=8rv2;~*OB=)3#fCW-nNpk-#*kBxCg z85Oen;|r2Tr8#%aEk#9Xvwm;?xb{m=dm9dHn>Dpc{%GIt)mistvMEdj|27sqfq!CcjvD;Y*>S zN}0Yng^Zq#-uQg)ZYg?c_UE#8t55bx$KEZ{cFVMttPlKoxp9KIIS~8ye$FAi>LqY2 zlvR@PB3@Hk6rB8!_9A-m;N(c4KxR5Bfwsktk@`{5WpLa#x*( zuUuNn|5_^feAj7srdg{BJ2(sf(FyVO(z{IMd?#}_%+X0-_`PmEy-LzquHcF$I2WjQ zmb4#<_~8}zwP!r)u51$g}naO!19%gs?1 zJEgEiJN@wmUtO=A!>WS6O0$-LR_FMB;(mn02R~?!Ivr~OUr|WW)gqd+JU1V`m0#(1 za`3ZGA2EoQ4$AZTd_!pF)V#>p4fTjS)yX?Ot7Nh}cVk>b06mf#Ti5^Xj<;pTc$)DG zbm)3Nxcp0_P3$nDpU2Nj2{xRHh%2E8F#Sl^5rx zxumt-HSqeWWN`b0T94PNk$1*5b&r?IwLlgx1f1DPD}WhB}Kaq zgpWc_y8W3q+%xkK_A=&i1K-sA{8wCcb?g)6ybCWF>K#UW%{HUByr5m(_X4%fuc9-L zy;Oo+J7mq67Q_DXFMnV9RO=2blIG_)%_SNC7ilgnjwSykmPOh4CF!;Il>j+4; zc9|5%XbM3K6~K9yw;R^RW;PG{h1^!if8^cu z%-f{v*nF=hHeW6-RO{lt6Hy^H5s-&z?#Z z``kQ|9QZ&JBw4f(ce1guHI4)0!&$++bS%gMyn#tBCCbC47|=_r?_)$@ck2@8TqO#k z)}fGWCu*vr2a&B5ziCmphx>9Bx{3~+EMasNs!x-hT?r9fa z;xgh_=GVv?(*P|sM;a$iYR|i{O|QRT2EUJ;%1pYxojW#&nQZTtR!|%Hej5fF!U>{r z$i{`y(NRxH(C_psFB4c(688z8e@ZYD-wL2HDoKr>27f@JQ*wdXbiguLE6)cq2~48S zjJqUyT+?CNwtgO;WX4X7^_fVzHGhRcX-!wVIR(pUn2Mq|%*HAyP#t%GS(udGe(EX1 z@9v4qTONmn^NxN9v<|i7oA<6-qhrj2oocT6%`U3_wpyA~V=K3mvyseNv%u{}obd6V z93Ovzh7dq}jToQ4#AOR*2@3-oq6HJT?ZbDfs=dU~MRxTorGCypFUnYl=%g~yYLaSraEtHs}I4*OXJnnhr-=4 zaP213jz^@x?%nZNN%7mc9}|5Nc(!N8)O!McW)vZXNRB_KPZ=ebextXtaBLsIsk93Z z^we%`*7?IfMB>2n^GH7nai+L2hmM0f*~6l`P^jPW{L7R;p^%%jZtzC}pB++$_rcm- za|YwFV(B>N@eh;l96CR?_k=wkrhNrvW??$tw;T0?yae&)9JiWIb9Zp-ftc6^d!AXw z{Q?vXJ2NysNS8{-jvjH;e%kJ-P2!RHAiz1uklU%$H($V(;$_lL*{H?ErN4$^xgN{s z+zMs7tG7#U=I-S+qu<6f-xs~@tiP)5JSBRAYdGD?z{i)0k>3Lbhw*JW0x72u#*2Zb zVYqJ~J}(_ZzWxM==R!i`AW+Y%;0K>c@b!S%lY#njPPR6g5zQz{LpxH#Spi(;CmHF@e@LXd}pO<3KO>LYgq zoG~RM;j`jFnu!PKHlrOaN`dT-v-SPPeygIPQBARLoUtkVi;eh)P`>~h;>{6!_flR= z#tnJ>gjvApN^hyCG}`Aw^!(tYZoJCD$-y^x!kavr6icS0(k!-+8$T|v6k3h`d`oJm zErDq}k)$Czcjhrr0)uC1M*>d@e>c6Yow>*{4f7SXsDw`H78ednuE)Iq=Pi(Mm;4kj zJYPipQ=7#IGm10p`V!;}mab*kWH1oD_cS@ouC)G5pf~lsqb4$At`G*A*>STExP54{ zy_d3&G(@U;qY3tcUejVS`XwxMF@XK+p%oV=&H4V@vxTCa8MT{ONi@rn6~_JT-M`Xa zboe2Z9l5x62kF15Jop803U6N1f8)NP@rqP&xL_oO-b?K|)7fX~=J)oj8v2<#LK>d= z4wrm`YBLovSqt%vHqj8x-i2Qxb2C?=Qpxb(ne8ArM>rHQl$o14GkE> zkQ{Yv%G($@+PPs(YaS-xOJ-U_US5)<8D8Bn^CASpoLNX}L;?zdJ<*>181M4-@{sGL z3*0Q}C!FGZ(xFJb5jUD_@T|wfhBVgxyKSfwh~>j;Y0S7GC6x7t0hN7p*4L*E5HG^w%H)%dCuE=2v26dWA?k z;RUIT)cG0gVS+={VreXSL$wT|?~E=TQ8@|IdZw*bde>FiyiLUxs@puj%6+1!bC>$i z1uZC`JoL)T$oYMj?*f$eY9zGLc$q<(j1~-Hqv=k}OJR_{C5Tk9;mW)Q6@`Ld9F4{s zz&`HAQJ%4diBqj{tWnU9m(57eXCt@N@lFjB3u?`V?;cO8Y46N^I{c_Pq0&xk=Xx$b zDzNmO57Q>&6Fuql?nt^MDuz~l%0PQ%f9|2nj7yvHfOw(gTr?8=p z7&CU-wT}-odZ=HBsnHyHr!B1{UE0ms=hB_%((?@X`sAAmsYy83rRXG4(lnqP=dFVD zaZvO_;e`*KT{}6@2I11iOD8<*i_>CFT4Im__xK7->;WeB=)*f095{dz262NVC2?-M z`;C_sn3soxR$p=8IcIUmN^rWVFWPO4CpCeaZZQ;X-HI=O3;pdJ39l34OW=PW9{r~P zj(3hgk-wcIkuB|AP9@K$e`?@_zy0GGy~25;R%0xP`+`fuQE9(!61eTsRinIkRyvBz z2|w_A2+fK6K`MG&z>J;b4YbmTwJx%DXqC+Ps-oR94Q_;plm5)5fA~WNC;YLdC&s#T zZsEg@Xz}9NA~q`jBKQ34cNsd3*XzmSA2MvZeUX=U>Qv$w*rtf#%YhW`vG- z(RS0(?d5=49PuT~2h~{o8J1bpCI631QOPrD{gOiG(XYTuv^!4eQDQeU@W3H^RBP#N zEX@brwEYJ4W-G0f%%v1-R^h=civGNg#@i;t(P}jM6Q1OOl4;E1u}*VfMfJvkOB3_c z+S#M_s(SugxeG``cCRu5dk zqj=U`8r=rIDXA9?BoRImB}e!?6y~Sq9G#8VcvUrDep_38SB!swo}Z+lF(Ak6nAx7S z$glCztZuHWq3~&z+uFnG1`XH^v?9MeFRiSU3 ztPnNrJaeu*ILZ4)-E0Sz%9#1>JpJtOIB~YkxR#J>IOZdcn0N8)^YN0;ARVvKZiysyr^cB6c{=`PC9_)xYjG;nGzUY>T!*X%shxo z(M>GPNl;3;-Rh%1xVK`~MS9r5dN(mZr$&v2dJjQ!Dbmi_TrFGKMEzKWzS2$gM#VID zlmV|N7gpq3sw%~L`np42!W;qjd!qs*Lwtp-Zpf!qsPN+8&*Qp;DvuJXwtmRC_3%QKgS+jw)VV4MMG)(Fd7!*v4YL zk3V)o)pwXsfdFi;4tev9Wwp zs?4cj$@PyL`r??EhQi`osZ%q6QV+qM4h?pYyIr{twUJ$K2h^V`++v6v@wlfsSyJ(wbB&9}zS(_~?o-HaOv#DB3k%G6_zB@IQ)B3ERDLz(?6=S20m1K?m8C}*L zdQk-hS9mm)M1&`hlrCFW!I9{z_rEc1fV+w>(%ExH}umReMpz4~GU z)dw>IT@gH{#(pN>mljNCu;|G%%Ouw0z5xBC>&EUIM z^s6qH>1Q;bd4|kpNb2oR2-pvjlvMNBFP)x>Zv+Ep$da#ql2mQ`i_7j^#+^22D82I1 z?5d(Orl^OBjWZvHCWI_0=xCts3Xc{xPH}bo!eqlMVtFArWqL-=;>Nu`&=xx z=z*0^r^04PE3V`)^|JWD7}9E?iPNv8>u|=E?G~pXoAX%5nw(mUk(#xPCM>Py)hF+; z5OY&w<`YSOy&uNPY}ls{j5=E_Kmv4wAp6GFAi(mh%{l06Sc!FNsVV8Q$pin1oR}Z^ z;a8)*91mt~NxzAi=1UUBK95jtZw>N(``zd!s76eOIK28Lreeti@g@&c?dF_P`Yta* zu7=llJO6yK(7&ZoA%BK|~8Vsnsyz@`Fnk)5r`$(Qd3 z?y4R^MaE!{rRqW{XgW@FYF1_E8K$*H(-OTg@Z5y%veNow zZPRev@zZa^CiRs@E0ag=Y#J-4Bp$k40d2oXG?TD++Bu8WzBn@J2_)CI?(}*sBsl!R z|L0Pc39J5JaNEX2Mo75|8C$_qNuzjPuvgQGOVzWxiR1B?eWcd*HcFE22f_KYY zlFOF{?nNr@k1%hD-=YH|O=^Qw40AE-NWk>9&ese)JDiJP9?pTHzh+%zz*c$p3$Qq- zM;$K`HECtcH=g~_Ti;A1@X67Wos`2#L$7}G4EyUhbgADz zpYeHL4vl4GKT0Z4GgeNE#!Pn|b9*&PTsG{esi^)=J76DiRVkFAWmAg7r3L0 zSPB!!oD{vB=gZx!tXRrIoyE@UH0Pz?fskgv9tDN@4$%0H4t zgD`hE;y8)e8j|i4aWHf%hWp5rpZ$=7hC%>;-G7zG!%6tq05}IzvaM_&NsNl7Q+i~W zb<{0WL@`%vSvNWO-~nt}y7Q;aPy6GS5~+ui3l!70&MbtkHZwY^u>-=N>&VtjxjhQM z^I|#|V!G^6vN^nx(tM_g-1xBFsS5hcEDqcr6>ofi(Aw`q_`I@yAyJ$UkRHs4(d~QDrrN}3RIepp+V=+v2qat%j zWe;b6i@Qm#>=4N`6_CVA-p`XnCMHN|e#3TO{>-Z;I6$J0U0&b;2QbM=sPc-`M3TdE z^CnBWHip(-%&lX#NJ^{wU0S&~+nCjxA3`Zce_j`vaw&^RIj-)*3H$Piwb*CR_wkJr%YSF+ z2%5y#^cQZ7nA}4kUv=!D=y`p$1F3iTj(9{>@L9b|v`Z{?#)GKt#$q1nih&L4dQ-Fz zEUJWwi4s->gc;~RRD98o4%CT|@Ow*BYDb6Tgl(JoE2!WfukwbQ(p!@<>n&sppZLjG zS2&B)#Z6uEX@}FR;QwpsVm5O>kHJ>-L?IToFA<@1Ly0sdQ>{a%QxY{2M0v(z8S_#q zmnQxi4Tnq=*bbFVsm6Thn7S!U;nOjr$Ty@AhAh zHb__udG0r8sD;wWUC^6HEV5ARR1<|C49aJf>^2qth_NMC;Tsf69l*k(uIMN+q%koG z2?;+r+^WSev3{`j`e?n{BBExl0QnUWv0$>+&ygYe8}9ZAG2gDIP)y!AL7+Ovt}@W~ zl+A!kwgSwi#E&AU7CrCA?1#j>9Y{T4>^@pxX13Ld07k6#H)!2|f^(3#J*mlip0%v} zNV>sGBwJICOv2Dp1q*!Er1X~e=^|wZ`w&hw3}Kg%5k~DWVB$&q2L0ajGvl}d)r=c3 zdNUf<^Yjayr(8l)oBAL}AoA;?Gp37sJ2<&_B>c>N#zx117Xo4W@S2uvASP0zUyukv znfJCJy_i`@Ww7*)GOaRSWaM`7lfX%P)ri01MO~hq7$@MIpq+(4zwH8@9FIp)y2C$h z^PXQQ8!IC`5G(Kwvd9=+%^>>br9qv@mio^;eu~X7dDf?cU`Cmox6n*wa-I+r#?Edw zVY@)(Of=$o`0MCb;ca&MLL5uk=t(alOHvn632JHD$selP!I5HSiFf(nbVsa^#qI7A zZD#M3WeH`wwuVhi8QDrq>)6eJi+9Q4CxSIuWG5nn_+w0wsFsA<)ri zB+QTcagHX+r5pUDT)w0Hi4(IGO7xyjMND3aM1q%#VXUS&9PA?y#8&clxES$4i9}Ie zJ;F9Wp2J9BbCSp zkWv8=5D@_n|Mxw=bLPYR=F5DU=b3x%>%L;X?h{f|tR+Qa=Xn_Ob#xr)%~B8N1%MHo zNiq2SxqH&Y*365^MASMe&{#T2Kq`qBw{#RMd9wo`Q}7$EXC9)n-8a4 z-hW{eYXo7LKTGqkShaW>N3{y+|GGM8l+zr#`VDT>h^4YTESFlCoN+0$SvzZ_OTCegFiw5IbiO1`feapql^5dV4t&gCy(l?DFH?5FD^B( zRr;Vv?%XBY&LWVlK|S%?6d3n|V0)mS>Vv$BN37Y@c$s|f7`g2V!0*dUJ5_T8TzKhM z2#uW+|6>E;SP_QpqO1w}=vewyA~jV975R$#$7<^m`lF*~CFW1=YFirhV*nR$U#*>q zYoD1ntq5Y@nPE#T755fo%yka9@D4n&8ReTlsGaF%FuZyAvHUAl6RB3dRGze+uVmzN zfF?>rczC=t)yMcUmS1FK&3|@3YA*e!Dn6Szp&trDP4nqM8QfofF!2v>R>>ozB%7*q z-;tB^0Nu`|Kz-^G5dt;*hZn23!wL8CNiiYugBpY;IN;0irMAhW4v#8la3wwySICMU z<;*HL(Bz1uCzpN8uS5)qvT0W&J@G!GT{2f`oeQGzi6v^xiRltI>f|#laMazjUJ+Rt zRSPivTNE4o`VP-f`f*s1%X89xB3n4{3@wbJUJBet2F9JEi?$CF=y$@hpE?9F`AiWRIC z+xiYWMFdp-^0T%|F>!yIDGe_|t};d8pC7sWQY&b&#~hy0t;mB+Oh&oZ6kq!A>b`o} zrI0c~yZ(}ieBO`8h3Dw$!BvO^eN5W-9j8%}{j8`S#Z2kg2Xv&kOYY(Md1HzRV9xn5 z6I4sPVZlPCbn`h;J|dVRBk_$nZS3H=rD96{L|nU$;fE78A|*es*h1C%-sxvfBt3My zY+_%FY;mtAL+M20;@01!3$|89e(s2zVbs4-kQ3)%+2bdY{DtIyxMtZ^W&1Oh-H*B7 z4lpWysYBZT0rl-R=Y_?|{2>`sQl^$rA>PE8G%^r?ygmBhe{DuAK~zI;)N{4hdXe}9o;Bsq%~%8ik{cpr*XlHZHHwS z*!JOYmuzPk%93&d(>g#Es1kkgPAhm+l3VbcOk3|S=nv~7Zf+BI)62i%n}$5DQcFp! zTqQKgsp3KLlK2{pb8#b$T?1Lh10i9_{XsAtcI}`@DSwIDQj7^>n9sp6u?dXpAD&7^ z=cPPqD`Jy(Hm*9^kv*cCVLA5KK?6&m7^F-T5Vgmy>K_(%*ZTB9SkyGaq6}F=B*t!; zBv3ZhsAJKos$Y>iRxkab@`SQk+Y_Xl1EsDBO*N8_(8}Z3QkSaFhIqLg8A)Am0}nY$ zy16hKlj3HY0tyQm&FZ0WNAgC-paOAV}+EcmZC`Jp41?gS=FY;_o)O@4Y_{cBsLDE!1`6g`>HW~SHV{m+xe{om=9;1%DC0ivkjlJ%d0l(><^yz(A6N61 zxysO4ueokk(ZeK1N3;PS_^>gHu#|a6t@Dg#169uM^*S4B3781J2PUj#cL<<-B2uexBRzlQK_6GazCwh`BTHg4(0Gh=*Xw zv|5U8a6{gvC5p`^M@>f*Rg;c!X$OMvER^zi6B9muOF+R(JfF=sj%p)`<(>8GoV|`e z=d>s3MTkFGnU$ODO%P6)Y2A6Y9-87m-aH_sGsJHJau{r8sC(V{%41RPwOHhwOm6}0 za8EA9>l@A;-;P2$#cnnw?z=LV(N}hENX(SQp#g&Y^G5z051MR z`gKK8N==`B?698*!fj{Y=G-!0L%a=LB)X$VsmM_Qt+ zve4vad{OVaFMriE+(9p8?!)HqpH5{1?BhOgsfGvNkU)9{UuW%45lU&XyfRVJJtBNr zkUmLgLF!TCH2wHnE^t;iYdg;|V9x?0Xt~S2|9aKT$=G6@G+Suspziag=7lY1`#pbg zfr3(|)WZpa*3ZD#GunO4f8^+JJQ45BR?I_PH&FFMvDabg^%N0J%3_d}hAfdO2A|$R z%WA6SwNCwicyAydzOsdv(W1)_X<6u}J&%K!|KX*)8y$ThE1~Wia9PIgCZ4U8w%Nhg zgDMR)-hmCWfSo-_Vv|fFOR}CZ&<*THKj>DF7(O`q7%7r3KDpDHVNmovtI0eGm|RE( z-r^sA>y1O{@yUpft_e!dec>F`^Rwq}urE_@Dw-*i?-)7Nb&?l(57$CP?KPTmn(<-V z|FTrqkxT!TC2t$V*rGT5?WEbNWg5HhO{KQR1v(oh``({%$R?b73f&1KX-~an?Kre} zyn>N&Axx|aV)|>&repGEj0Ys_&1-y9@@Nn8mr%;E**D$SiR@GTsNY>Nu*t}m8WA-d zpG@SHTNoe!PO+!Tj9U>I)%L$?3T7hFQ;S3vr=A2GHN5)4ne|t?!`sFh8$7h*B=$$^ zvFxQp%}jUgashYSQ{Q=uD9RwtrAm%HWN)Np zuc7&5`}iU6bL=&2tF_vIKxCBj)Pn~nhiQT*fOp(_pA$=^uF!Bc`yG{O z-z2auR_C*Y3JIQdW}BZ`r(?jRW6^u8jEX>Q`-!qd>521wDes}b>`{k$Mqf^Nw6tHe zg)b`yNnJY7VbXLYbH_i(sr4WH?8&NHHo0w*SUeU|A6=e)MVORYYh0FY6ZZn^19Q9p z^-o9=GS_g0oWS6IM-b>~Jaq4@^oQys?!v+t=D~iYQtGmsa>`c%P5Hb*bkdu6^azcV ziXi%AB^FR?_~aZuK~(u+z`b4d)2-`DDbv^&lE$2(WQUzeut2C=QOu@1V?4Awnmv^r zt(HTRCS`?UvGd4fuYSbU4&BI;O(qD&tLv$kL-w1lF{* z_fWgsdVX2to7K%psbDk7IHwDx)z5D#v*^}Im{N#F`l!#C_7B%F;+_ZG9)~{_zaYB+ z42+~tMRL2<4$*COlAcIt3^=-O=Jo!=Yv%2r6R=>Rh0@c8eAgXZf}iqv{-wW7sWG2$ za^WuL2_+=y_z)E>?8x} zoNuZf7}PbEUB1nBaMK5ZVOPmJ)1Mv?I`DNIKJx6|@#`SuvFg60dj;v*2k5(+yH(07 zAaOS*vMdHq)2=>dvVV?G{X&A@I<%vwTUEc-L6fCX5HGPpCNOq`cyHu1lEz%o!UBxQ zUM^h+t7E3Kk=74DU$d>TZPUFlCKJ9@ML<8w^SkN05QHx5MTfSdE+pB3w+zHDt`0#+M=tr6v@2Y0edoRBSRtCgQKPg7n z&r%%ucl8|puA?oRbjvQ~7Qd6t@3K&;=dh_|Nc7IMu^| z8I;mup(-0+!{OP@=t$yqS-&|Su_^w!Ezvbxy5iKDF1vr_t#&Go@H!!kG1l9{w+mFi zY8c*3JRbrEDvo7s{N}{G#yMQ$27D|(n+fUV$6cH_r&(CBZ`APUU6bxzmsA8RW^JHp zIk`K-a-TfN6~l!HZF37P8tF388#&i&CZu#`^u0$!h4&MGC;6aMH$jao-wKTva={M? zfga6133DNGhN(DOKNSvwBz+e7p5DJubL{bE^xb6)^7vQ{?)eel={AyZKqPaycH$X$ zmX%_s?}c}Nj|423if>CTUkUNk@0qJ<2J|Ch{@r;ze>$xOe1B@d)c%Xwox;;P+2`8) z?24~V!!h9cE7JU9tWz_4LLdgZYxPuB%$hc@z}cquk~O?B$EVZbR?0zVY~uuLA)9{q zp=qB!cZC@B>s4T#O3by0NRd7LsJ9HTKd3Zo@_cp8i!#L&(hBR;iU%nIE|VH@dlaP zUF+-6Z(Oj%Uu)W)(ow#_*l2(LFdMme#ZG<}(duwb>h(G68;#wz#BVw^Q%6~IKS{?9 z=uQu>84J=N+i1EJDvJLWtl-C=?Y$Il=!x1BF%g&uk9ks_CwbdS71(NrV<{ zl&wFHeI|79L1zL9-HIA7{g4_0U6XL>dw-zSLElO)iXv&OpP34h@^a8QtQohuOQC3v zR7mI;He-AhMZeiT5w$+3={b1Kb?_$n-lkAlzp2`}ty&gJEqmoqzk4P}w{EtUXlezt z^SFP+TTS8@_sfYNaNTY?2ekmy+06PGc#4K_etA6U%?6690Z$*&Q2g9MZi%JeGIJ~*b9*VzQZ=cyF7J3dsA7Zwqw3n_ zKBcEId%Bee_D57%UP)e%!IvvGRFa2NUFS4Bw+^~W4CQJPYn&aAN|~1(H^;J`-D>E} zEDA%K#U_d}fzgggEyW&psrzT@>*S$_?Mv7ewI8K&Tqa!12!vYA%xIO?vZ<=AfT8y# znWW|vxzqYOw8-=s{X4>Z@0_Q~{eydj#Y-?A{@6-&VVDRLyi5H3p+To)*>52nBx%@l zH(5bYH~k>-v*~zRV?ex26r+ZR>YaPr!PYM82vZ)lj_6pJ_1cnYexK|PVI^?pbtY{i zG2?`ZPdev%^Pr$F4?cK8m>72NxSIcodc0$H@bEx2fmcR`is*|Lk92-J#fspX>U(q;hw{nVAY~B1iagNqiqSxIC}p!&ro8@H?dvTMIleOv ztwpA)EYY&)7r9mOr^C017o2lj);+|p)P^JcYZWgkyt5085gexsi1XRB8p73+6+ zep~JJqD}CRbeqkl%wAj8m?S~O{Rj7GBBee@V)B{Qs~)!{oqY|Ai6p%$qAkt}Wy7!B zm_{`VMc2Forg*=Zu6*6v^D(`cbvtO{regbhdw(TR5f$Q5p?sD3s~P#My~`fFp)R&e zq4SgyMR|RXjClbXoAE;VAKvYUulIs`NIPc*5nALT1ID#-=pWzqGG8bBR*Sy56rpKf zd$v}M{m#zquUIG~dtOK&$nt;H#A5>9o zJwO;q@#&}vM*UOos{vx zpIazGx&Yb6){tRZZBcq^5`wL+{(^f8cp|Qx_b$Hw!^4W@cc1bfJo34v-9@}CdT;$U zgbk0-lH2ZS>a4&=7c)`5u?{i!h?MXON2ZlK$M$uaN}Jihf;`tBJP__xL95pAHhM{0 z<2*aavmIp4P0d}w)T&f*C;7J)fZhf#jhmUNS5X*&>hJH7NNADW%w6MkkOaewu+5(4 z+ZE5^Xhe5jzo3Zhe_oEKa_{!7OD1(IF|T!iM|PtZYUY3;qB8pWN5&5dPI@i8>+c3E zZBkV{I$eL%Hk+!svR{sKEX7YJX-u}0itZ5O}gri?y> zlu+W2l8~^5etJynd1Dr|C%YMP@SH97U10FKeY452o3RlDhoY)mV`kEw}%3aRErO+7hGD3w4mB|F*w_$kUWW2uBSTZYLf9t|>$nTYq>it4h3RuU~Pw>@Mk$kfK)b z;#Cbh2%9GC^E7#FN&chl!yRESNPSn<|D)^g{bywQ-=q?15QO87u@|?0=o@j8T7nyx z-O~SG&Dwt^CeR?mcv2FfsLU$Uyr2XBP=5yu%%6)?qAmT0w+RWwXllYantoELEw8ia z`0hBUWKHE+Ss7#lGx`dmMa82x8_lddu&l|1lqb9a>J?(l1{oEDPW-vyocX-^op&rV zzdaOSbp@1(j-Obf@h%~vm8PMs_~H{S+!t{z28!P49H^@k&*>#@5j`E_NFyB4ODO}z z5DjqcMMLZ}oCJboPOKhb?VTXPQqnQe5QR5};vNB7JOEM3GGe1nh;L_S-~C-6MyE~~ zR|*8dL5;ZQfmQPYd}(54Iq)GTvAeYif;dz7L^67;d|cGvh~+KGr7SKC;w3WRPnJ>o2Q$|CY~JWER2%2>RPuAM0@h?X$%@g?|r15 z6n+%Hcjlf1ZTvhb9nH*mI(|!bf;8n`Vi+9$$dh~0l%0-t8t}mpt3J?!ZeNGV!hym)W}j#L7mcbah$96Yox>SH=dtm(vMaBJuV;iO0!6*(F`OvBW$T^dxeg}j9O7T85| zq;+v2J}zf$oVWkOd%bEq`wwrZ=62vmeUa8s8wiCRP)1j_z|1tGKo-eF>=YE_Xz;uH zP}vW0YGPaP@faKgsE}O!P9l$!v_q?G()#6AGjbvR3;V?O?($t>q#oV!qdicpzjEnfOe(xST=sgbgHr|e3a z6-(6)Hknyan5}^%SyUy_mF7CIOChEH;Q?tYwg{Q;XIhgkADScq^k(YYP=WFd5o<~F z?IaKLAyt`F&#?8_C_@y#l;eda?4*z|3e7lH&}@I``#uZi_$PZXnt4Ubu8M0E;KZ*j zwh)-+*n)eC7+~Q$Hz(icXG3N^kg35K1)Zi8~mHPFP?aeJT z#fKeTv?)kR*3mkhVHgZPQ$PbPDItekH%3DMJSPx)mnnBF zL7`G3w=DNf$z2F(P87Ti1t?={G=5b=N>8s7fPmWf7jZQKH@L%E zm9kEAFvo4?-+El>tfKamntqY&Xm6mpO$*2`h?3- zN`_GPik!EJqe|UIElHUeN6|ZsBx%I}SNsJI-62gzkflagQk&W=$?P&5jrP@aEpTej z8?xZx22%rs?BB68i~CFV|tVdDp2`s`^UF~ zy5}0P$yz;|u?r`hNlouhoQg9QFM>f8%T!YXl|%=_8>xC$^2?(_7oToxmTJx<3?^#fW{sQG1rX8^YTCF9$2&$* zr(?eViZzf1tEjy1U^gU=~ynG)$z-&(RE&D!QS1UZZ(K ztTa||Qr;T_c;96b=paW^R$Bd_3;a5#jUbkg4kQi!3D?3UJSUltqFfrbNIoz~U^$lXuAh z#wr%wa;$F2wZXRl=QQ>M`b9+wjjD$!<{rit=i{8JE}>E~I+`hqC>HZ&tI}b?blS3@ zNKM}WzL-w9>R|;K0h2k27hyLt00%g@q;ef$9=s67hT?~? z&Hsw1QW|wZ%NH(>5VkNlC=>ok`TNBu=d+FX0P^k=E}%X&^q$;lXp)6FUW$S-ZRqeZ zW%PyUm}>RYi0kptnMufL)In)b&_RW$8)I&g^ztmxD!`3rQ@toQZ`U6&49XEu8arlI z=1ihkYeXJ+9eO6UH{`by<|3woUPq=RYUQ7sVz{x0Hg1KS!j4j%TIzb0IUs55uJY8~ z-Wr4R(L6er$ zfZv(l(b@LX&wug|N8PIL>-SKr7Xs^}R(qhUc_F6~+Ea(x-mXSZ?`-kT-FaW`q|G-J z@^B~0B;Rfes2HMr;2FD#*yvyRgcgXA}n{hrGvM*C}jepM*)zY zJoVe{p4uP(@Ce)T9>ZhX`}%HRS+VJCBIYTZk823vr*ai z>Ew+sfNQ&{?7N4jVc%h#TVr7u1XDeMg45S z!%JsV^gbKRptMeC zHjLDP8VPOlD4?2IQQ{tblauqvxTLF1=0dEEz|u9v8 zP1>}Fix*ZuSYRF-cWrMcmu-x8#vFTzIb-2+_BcjFD>Wrf^%`*30DVEBO6wRa$0vq4 zUd8#OhZ1yTv`NSOxOg!6v#^93z%6*HLdeMp0VL7c`};RLI#15+A`R}07MYp(GC!xM zb#&+?5g{}XVU^lB7OwGVEnpBQ@F<1n9F7L5KA#dV?H;7d14WMv2+H`S0^s)4^bloI zz&s7d=kWqMw8vS@qlK{O1X^zqVulwD3bbhm9hu=F2AV*a4mTgELH724Lf`+38}78e z|6l0)A8rs=K|Qqhy+aKVv;S#*|DU%LbVWg2#`qJl^&OoxGY{uz6kh_OLD78l809kU zk>?SK9uE9J)fyOP(!6M6hY+9wh%7FpfZ-+ZQAKyoSloz6)0*5spb@KX3S;NAo*2l% z#;8NjCn0|eX|S6_VV@WZjtt7<9z7{B;U1b947nTjNRo##@I-eT()K!@`OM6md}Q`e zOw6PJXqDkg21#S{4$&#FgwW57XM}KYq*?dtSu0)};o#8P5@;4gKqYDJ;Msc?D0g-` z<3t9I7zcbG>=x8ZmcYWs#`kiI+(LjpG%BQYDvAvP90ey{F&}PTqLhR~`SSL*B$~K^ z>(Codfl{zok{Gdq&cG#Kq&)=kfU|?PPeq}wPC9NYxZ;Yqu_30&B^Pt8{NK$-{T~$sQsqiV>w=NN4jlEoK;Nox(_zVZ0V$?=r1M> zQrH*IG-dI|#;Nmsso>8?vjcRX*hDK_hE8W~4jf&Y51}qsL0(g4RES!1pqYYNk=B%d z&A6Chs06AK=3%ASb+Zp>A+Y@}#Qua~9tQWhdyV1^vRU|(X(2=dnCHCd{p)UZimY|d zM1jLQO0!fYrBtdvA_An8 zy@CdprfaAuQs#(%R_C24*Y}+bqa8E^U8*<;sqrxxa^Ck*AKaO>%e&%*gA>r;Qxb0e zUvEW#$*H95suzD5s%AEs8a-*l=#x^6-m-X0DQzBjCF*>m#|kT{2B-|kC7Y{9R;lg- zX&A!wMu$g{ukf?&j}avj_O}!)u5H821a?2lHD&(Lw*qH#&NKrml1%`}U&HYkHPGi} z@@fk#gdx)7mGY`>yv%VWa39^>RSPzmn2h$l=ZN^nkY!Eo^c~#I=*!&SFWLqQ7NZ;l z5ZX8z37rFJM<#~mply%C_&dN0vp7gc8;t`?-s$f<%PgzRctJ{}#&9(-IG8gBeYPPq zcz}M^7xkL}TojK1x4(pd)ge~kC!{xxFrKz(9YK!-YNatgXOMer(+un5`1c z%)~R3*s#1T;+ph#k=pG9ZeJQ?7@&IqZCYiEo=G=S6>xR^NP_ zB!{|bq;jMku7)qjuoC8v`{ZC3l(aGcyr3LiBA73?42oA6U_t>Rpv8c%~ESH9WMYReevC?HF(h~<2 zokh09v9AfL|J=qW@v7*L{ABtQ9{J_RU#1KfGl~~6A$O9pQy!0aoi0?sxjW$SAKsOq z=Nw}^%NdRBdM{dJL-jL#R~+qP--;?UH(PpNsZX90AohqW z%-SQJIXAB;wc(2x8{u<<0R^2l9Ujmxa`*VWXpZkCMD`KVGj3EZDCd88_tJh#UqhxG zE;%)kF-D}?4$NpNMxRvLGW9aX>V!ZZPm<&4C+s1J4U>?xNxn7PP0fFJ?s>c;MI!@< z$me6e%59{}sSC~?Nfyj)+<`_J0ql0aFfr5g0%o-kpv34%Y^)(1t|>YuS-~nH^%V$< z(t=V4XkkY`lbnFdnVrZ3ErL`4f+{=}lq>Wm&X*r9!rzjuN+-H}c9h2mSx6aG)#Er` z{rnA~Hj`6vzeU_^1@Iww;idvDx0G?dKcWIfyQi&vqdBrpW>TR5xUcIST8D$8X&}H7 z+SbTOP^40@&Zbz{Bw6?MoN?#U<`2u?h?yXEdae{N22 zBFzL0m@kfbhq&!KQW1?Zna-V_a&>?^>G?rusDviMF(!jWcb^e#(8zo{AWgEQn$Z zSN&bWYj_FPbwoWYDMR0+N_7qK_UV_piV+sBcQauU*n-Q?f#CLML$U_qVD3G2dc8Icc^*o zuD8Zt`Y?9IQjHKFSsL@oR}BhqSPgJ2r;l{Y9&X{hn+^TIiPYb`zAbEU2z=%8`ce1e z+MBeDGG|QBqZXs~Xy?Df1DtopA2_H&)c)?Adp9gC^mfP>iQNrh+g_vj{rvfcp{ zdy=Q3C2NFDItF40rCEHVZ|2d{5MZ8>k&(ga#sFhf(K~9;EU@1+ab9QYFecqI|7XL- zx!`hg&H%sjL6T5O(NJzhOAkP|9ldJ1z!Ml-j!cTjSfmZ_<~+=v8WFqnj|ZtB0{)bkl*V_2ve&%fUvI*FgOnrq7A1tr9Y-eD_WfFjVR zLI7!7tBnZA<)YED&l9lW1DyDRNBJwJw+ONEdaQfkk-G+*4|Eqh_CLpe{r7)^`+IoQ zkpFA61N}F&KiT@p@&9~wAW|A45uGju+XTX%b&Kk~BJ)&vIK18s#XSe$`W9 z;V>(i>Q|N>mnE|CJRot)D&vKxHuf$pS_QZ|CQftIU+lwvNx;t!gi{UuU0Ke6AunZO zp@zxWD!vEP_@yQ}jw6xB%yrr{O<&eeRID`6vZk9rB;W_R(NurNX`4W&sz-5VsZ99t zR9l}u&1w)>q)#ORP4 zxs!`$R~wdTbHqjf+@uAJYDDFeWmQ@*Yd(a5I(W{sT6eJ{0+JvVUF{@fQ0^0PyI6nK z%OEG>if+B=$&S+jXGX+?FwHM5H>qv%c0(y{6PC*2b^_GRFfS5Kw=DY&)m&s(2J$Z| z{zS*jxc)llz{Wlc%_!v@r&;RjFQM;+e#xA`x7{&FItYjDi7h-y(jb?HQ976>3UkMg z>cbg}j`!b1pd#`$=aBRcuf?fXAI5aJlS&v8#?}(hw4st|j(oC(wxR*jcxw!_5l;P; zclndQq>9+5zd)@hLC)bLUYtHm^K`VYO(Pc2Ey9tNK<6B4RGC~3I216Pq*DIokx!l- z=ebkH-MEa9!)n7X#;ThB4W@jVw)xWKxN>InCsiy}CT8XL>w##6g=f~}N~qciP^!P&?k%D&dFc`+pj);nnIoQ_p4c>F0_;Oa=^5g5d$s7T7#xl z4&)Bhssa<_wb`u~mG}`>Wte)w;+8xC%eWe~3oE2!iH;S-Ta%O$ zXWM)wU{IWSmAEs_E>XNRcwLFbnlWd0u+X!c4hS)%0_?9D#HQx7323=>vPn2)=+?g` zB(ch4Jd-72hI>jl52rsVPa|7aX(Sr9@K1~?MD>ku3l;DVvW~X6>&&R#%u5x|OTj*! z=C@ea(C|g2)`#KC6YxJC(}u_4XenIU${TfKsdutuE%n%UiP5q39Rh$kbSdqNQxng;2 z(O@v;`@@$Y^&SbBz<6?)c8ZLJQ&*Y7KcLA)vlZjME^Q%1t@1&~|IV##cwb?3&DHCe z{PC)VA@TL-V6o=vZcya>Q$aH_tUo_t9Q)-uFrd37{PTG0$;<3J{sX6Ge59Oaycb#< zNk0w^g>Rg!iNg@$Lm!k;^i?;?2JfHa07RL1TvA4X!f4}~81qW22?7D_C*YH{>0*ug zA>bSP3@=oyabmXyBlU3O?frjv$t`T*#58|Zh__NZd^%pDOmm<5Eol&clSFr5-c+X@ z-dixu+;%C}ALMz3@sh1%Qg`58?%4kQq^OKI>-C$C)qcq*?37f;W+eJg;uO}wyl&(a z`bQR3By&{1nO;`BTII95@Vv@}N8y(pL@+%k2gMh+Z5ug$vtEwJXzmWsdAfa}9+o&z zeRBoi2{e1kHqPe=S%Ut<>()8mu16o%3limnoDzXo4oh~q{-jXCy$2+0`&rV?e{=lB zI@;;Y3tY8%h2M2LmYw)L;x+*CB>M_NA%S~vETc8+@fwTD7qAY=9s8Z*8c;YbdA)7x@Qv6U zUFJ{ePU|la88+OKj78IhB27%S1sS-xoGUOUL{K@#FS3zb$h=R`=F4i>H>+>*(!#9k zveS#}%7i8l;A~6ef7EFHPNo zSIlPZ2cM;M=19hSIooWYVY*^AavTzWt-C_E#jI{CQci;hhh%c!Ui?Nh;g1@8u{ZNt z>?3&3Ta3 zpfBA6?6l7aHcgb1_RG)0(l$d(;ky1C2N4{z?g)m_=v2l2?oR}wKIQQ(*drFsao3!5 zpG~aDf}I!*&gS_h8ERbR0{<85;2;bwr_=J;ohuaB7uG1D;DlHLync~49A#{F?v|KJ29#jVY~&=f zOEgWhKoBQ4tuq;y0nf2R4C%VX=x_22@iKBo3ZnP#p!}T7@U4Y4P|2+FJCH^dxIPYwEpFhk>!0W$5R-OH_ zv#@)n$+zJ#>32lF(HC&Op5`Iuw9l#8eTVGVsTkIsqNb(u2NL5S>X!+^4H`|DTynMjdFl6@ zy=a5n`4-;WCB+Q1PI`TFGdNec8OZZ#NpAWkcl0qHW8GoKpz#k+la5I1QI4~oF7ln< zv7$&A2;kupzR0g?E8K{nAe;+yMHjdmR=glLUiJkKD5Wgw(Dv=}yC9gxZOcUukxxN_ zSc`jCkuS_L$nrNa2ix*062pVi4AeM)Cy`T7hE1bC6~M`FK3QMyKQel_(>g1>Nfy?eDc1Hx z=}TzF!-=;?{9>8hrfS-{$AWWF`M(4n{?&cJl`Y&HOOiOEo?}5|#O1@}d*DK*kQVoz zDUW0yJj9L4*GX{;?jc@TQfFoVPW^_?Ks#XbYu}zB@kd;OTganRH>x2a02|+7l<;uP znHXne_qPXNm4uM&Z&s$8Y$b5Q(=pxYQFJ+%%Mi=G7}qz&M2+9#n?{1umdqbIe^CRh zS~BU28sF%u-@HxIH`bByS+4kkp&xDhu?~{0{9%~o_nzyihh-A{C3DmRW#2BYAJi=r zR0h>n!8x)SUyei^)DE)#;T@KBaOC`rR8vI0y;7#9iK}vmG@mDnj}+gP8u}@4M9^H6 z7i(d37!lyjiyk5AR;d%pEH!I-vv5Ol#?X4hYmTu{Q5XZ52I+ix#xuQh7Ct>-z0)LT z9?H`-(|&KbsRm_A&@4WI@0w7%XF(quB}hezS#_V{HL{o87OIWQFL% z9PHhKpN3UfN*GT_X1@tD){bsA7i~m4r11M*zlcu?OJ9rUy9RF645d<UpjwIsEI95J5XH;z|)5e_V$UHt?%&W$N83 z@d}3`0lg%JKrYufOh250&iw+V=c&6+xESs@)4z6HB8|NMr{@%mi_-R;t-6%{p6Z!L- zyP0^8THF2v(b_NDKh(>i%7;|Q(7qk|_?t=YzUUpH!{l>XP+M;f-)ZtO^W53PHjPhs zs|hceJad4yj+ezP%&gF1Dn+z^U4p9&l#TGiXnd~Q7pRE-uP0X--)_hC2(s7lOj$>8Wk1b*XKmB=0rXfSmHS-gy8r2lkfv$kkB?7gD(9fkbE7kk6BH36Av7EBb z#)Q^^s!P~*vF8F!!zHFQwV6oCqEH#@QrNu(4on~57w;+2i{FftGZA~inQ!ipxdqI3 zz3BgVBmRE^)*&h0;Vt(N#Fwf)610%0usyL|3b_T>)VBxlkMkW#IuXuA^5yT`mKgL^ z7rH)G%-HZ*dy@OA{vqxDR=^xL%1)`E#lB)0K_FZtD(ij9ac0!qvBITcfTy@=_=q)p zeg!#!SVOVtNM8aU>C##(a z%U;uvL~e(`h1vBY{{TY8=zYpduTW`)y~iZEk_<5tQzvqzDrA^UH!4&N(fExK`b`hT z#4LP{1Ynrh!&5V~_#rYch99ZZ zDD-wpxE3QZ!XmeKAE1{lOvHgqxzrt{3dleMUBd$gIjFZvK_zI~_ZL5eltHaacQVR3 zW#mbDhrkVV}xgUwQIk+xC zy1rSq1nyvH{4>lwF?x@ZTwNZTfc5m;f8aACpGz^Pm((u8ej+o2s0ILFKH<=IaTh#r zAJ|hbmj3|gyOy?{ApZbS7i)w+*d?GH0AE)dq<&P_;FhnciRKkWU%1pJOO`abiATh! z$%=)uNNQXtc4ZgNKEK>HD19@?Ph@h#;!()drmRyHhHQljMh-bj)NwHa$Sta;)LM%N zGVP1}bWO{pKp3ZXz^pAzbsHldUSpdu1BNo?%XpRD#o=uUsDq!>J3_nt6Q(2r1FyK6(f2Tdx5wtE zfWmUvs1nBK6Y@p_3|q+<1g@F(gbJQD;{H*XAbGS@{bOjB$Z7zdApZbaO-;dLXhhuF zxP<^(UGT?i^dK6zoQ+(mYi_D)q6M2FEHL*j%S3dNokbZS=zxq|UkZq-_!-S7gm|So zflgSmLOPNyd1!M z5qKpiZ*dnemFiol-MMdUxqd$Y3O;Q@p9@~@d4@m%=~@NN8H-fT9g|@ zoe>=U#h^0x8z0#zVsPT7K+Dc$j+x|&@)>roo+SlQhusXlEs8Bz!3#&2*!YL_hL@?B zI&n0NYCd(Egc)&E^o(29TS}L!nU#Ye`zX#|!@e=D!$eVO$U3c?h|KE-=+# zs-?=BDi(#vt|SSWV>$gw4Pz3)psQi1s4Xd&Tdp)vrnQ)-K~PO#cU~d|u+ynR3WB_& z-sP3P@P8y9q#5|1q7=*`sj?1!BB=E;1-X6D@Wvr%<|Vbl{6&#yFsVbxf-Hr1%)44M zQQW`_8{DWtofu2kkCXg~6h0V-W>=iWv9RVazSx{cfGLbhUOk`%uju~(?7GIFw($gp z>xKzLcaSo-NE3DA9AgnIEZOEN8YRrj5xKC-fsug%oNSb@p(CV+Q&r@+S*XVPiqbWi zW9^{&$Kt>IQ}Qdg#Y0Q#67h^y;0Z~0DpU^>J>F_M9x7aqyx zfCM~Ny%ECDl%ZsL!5+zEE(i|Yu#oV(L;nCHiq}K?S^P_;wGU7N-3022%)>&qhNbvH zz?+XwL0h;i^3kKzyb|jbyMPf(aSzksmM8#T8k%rES1vGFM^O%PdQ3j~Mbx^g=W$2l z6qHMS5a9YAl=470v;~N4BTk?zQudlR=0B2wyUB@Cj^!Q8mo8jcXyRvVa=k?Te+x>4 z(*-UB8Pv$dF|z*vLc~~PZ`{I^s3rdZfU~S)^vd)s4>vf}0pT(IK!O^QC4pxc{-IcE z;1-w1{ZM|BBh(r=Fv<&dVphwAIEHCn6C_76fy@StZZl)9p^PKVN;*dlVjWJtB1J8bF>{wql8$4hTsWVEd|Z-FmMiLxC`CN8mv`w6m_|J%O#!iUSmXJbVv0s zK~IPmqMWgBi)FKho?_yaRc~eD0ums151e)AHtNjd8DCIY-t*WD!5-+-9P*n`giY#B47`VWfwk;C{ zr9(;l1hyT@7J5q`K*cVSC^H<&0EWsK zU@>@lng!{J%(z85pouYRxg~v8nt)&^v({jsVVwon zwnF*TZo$!!^%Vf1NEA(x6ap3%$go<(6d{t1+LctkCdY7`C^YkF9b>4nDmxVu8-!B@ zpj0sk(=w)G!DbqnM;`|S3#OsVkL_X1ClEyFIGMr*r?DkK6&D)Rd||6IWQSvflv+cm z=aOTXOjXNNys%)kLC!gjNqk@|RDu3F^Z=tP2R=e#9BFaMT`{Ot-}sdp66CcgGo%n=Klb4@qGBQ zW(BTA4rLI4+xXEh5~>r5#s2^h0?(7FV+SbVGZAqhmMb+-$&>?r5a2vP_d@fTY6S$H zariMQV&UV5?p0B^;1nzFzGe$l;kwWD1R_GcAj8D4KH<#VFqN$sR<3^xwlsT-k8>zp zLGBQ&;#h?0q7+_oz?_7w5`@^iW}zHJW{I?mu^2)zeM+(}hNbYteZv1tN3qHe!X!wk?l{{ZBwerBNxMdmt!F{!20Lk2_j9#{;l zok)nQ8GIz{F?O8{0h}?>9$w+uYJ15408u~!>ezP@$cWui=-2xxx}u_M6FoYLEXWV% z^(j=uAqZBhrUVJVWp{+j;TQpUVP)cx4X~924q9$9t_W~*(N_eyf}ePg*EDQlFIb9) zvK-;-h|e5&JVvdYf@a~6MPpp7J#DAde+Lq*`i87<>I%avim@1N96=%REqfqQg|&wa z0^VvGIkucN%mm)iIf^)5=+q>j@Jbq7jHScGC>ilE-Z+BEY>>aS1&pgiL58ksnwHT^ z8tKo}RSS^-)p z5}B2L2J1ol@eN&0PB4=h`=t-;oZvp;B26P0HL6&trUX&*+xd)c<3Hg6RxfB(#d5%G zZVOc6b*L(0?Hxr-14T-qHA`bsv}OV+EnD%(TrreyVR!o-*6O^h2uet_dx~1$7lYy$ zLONo?1ipw$_HbOoY$A&>Mdzr7juU-KRgR+q=A++r1iREGh||6$U$VNvORfDfp7loU zg6&mIPLx-H8iq-vsa$AcGEn(Dj z2grZ2+B;Nv-I$blVyMxlnV(!~sfFOo5vk}S&aubD%2QJkhloKSvT@`@W~s2Eb9}~D zMY~u#NUQB2}{j-#H2 zd$f5XPpcKJg)55g7i4Y>D8+7E(^NrI8?qf`2DUz}{-uGzI&82kxkdfOz6fZjZA^>L z3}R}ax8ui%;H2WAmQ2buED)s*IDo7Iv=NA5&y=PD4>3l*!~u8k!Zkf`!HWdl@gB}x z-xBJpeG?*YXg;VNo@^s45 z>FJm#P1S9j@i3c4+gWY7Ur}^n3Kb--B zpyd-4NAef_N6}0JtJ(K)I0JqH{{V??D5{QSiE(iz^qS~~d2_;9S>#zu&P}vd;mX0Y zn{_UA%SVZ04f>D&00a^T+8`{~)R$`bOP?JoU)E8`B_1n;aH`f&9?EwQ9th8^lPIRF{Yq;Y&NpOEj^yh7FgpqOQNfEJsksu;0rc zkbv<+{lyJFrwAVqgAm!YEGbQ9D`U9pW%VqBS5nhL?=Pqkm=*Y|jAYIx1mhC<=2ZL( zDnMT0++UA3DlK+9CKJ>pMl<0U+`^G9fZA~ymgh7fRNSaFYzLScv?H;KnpE9p08~7v z&MSAMsla#MEo0R{h1>2F31L^L4!DX~>V8Z^D7HHf-hl1)M+={ z$xu5rc2oPf&9s%d`<7uYID(crWd*ysgV7u}`yPSb>-&Wpg5^{bYA`jv_X?P8VITlO zx@D;DG7HtZk8D^*$J|I004o+|6UF^M$hI+>0pwyTs2gmH5`c7M%K{QhPHGa%v-z3I zy)M;05<;F>M74s`B%p4&NkWJ@^ z!^f!Kv~A$QgcwJaWa(g-CIq1r2Z*W0%#0dtIYNxQLNd&)cZqinoy;eYgywz>8-N_m z#HG2iP_b0cH&XQVGGRwk>`kGYC2nFx0vC0}QnIL0>MGSIx_S6|_c)RM{=oB};Sp<@^-l z5&ZD{TYH7Xmf|LezQ{0O=2)LfD7vgga(S1Dfr-4b+q_JWi^Lzmq1RDD%v~cw9&nN} z!o%4Z8BYYsmeAiZwHCe-?SSVIR>0|&Dl*iKK)z$7YS#A$DRJgF;EIZ%1zbQEn1Hxo z^(qptNDi=#Y9{&0#R@cPhZSly+Tpze~_l5UDw>g@eNG`A^9~3 zeyRIVg9yRIS%hD_97QaJ5GofiVB!*vZf%$#Toc6V?p81u^2pDlVPoyBSx&i0RO00Y`)`H zRp2pwQ(MXTB%@b(MHoIKj*1}qUiqs;%=6N zyUD~-xlb1z7jb)!WJ+4x4cxkdSl&=`E)|O0$%#H9QZ#{TKlgNnGGk}zF$ov~xpHIf zUZQ&y>h4ko65^iY%p&%<-(R^_6?*~rT)E;)Tz;ZCK~|5Lqh%7VrM*hIKH=Ozmg?Y1 zp@UesEJA}d(G#T1z!!wFB;7A`M^fo?n%F+x<#ib&(3&qL2#cXyyZ~`IBuM?lB@v$xG{SfrFU3Ea%E$FtkNHcXUg% zX@y>*jfIQ<6{E!-HH|r(TY?8VF!Y_BHs5M|V4&|7qH+Sy)fQ1wk8vB;o6ygo0 z`35DWLr!ca?K1fQStLN>5t>Jsr5(mm3)Gx(( zwLfWbvcg>i5MA-U;O>M0!Wp46WVJAa!-Ti~6WRAHc|PZ-(tBqZDk@+>N~UBbNSr^G zVKAprh-zH2F&Gr&{mc`9Kz82Gs8Sc14)Wt*|A!JvY4d+G%n0DMN_d=xa8_=tWV=kTEKY5^#5C~;^$ zCF?}^KA}~uH3Y$kSBNnM7Fa*~xOy?lTrAd#h7}Up-tGDNf(De;UD^7Sqea93Yz{YZ zGpCV3wC0%mAl74iHD9=L20|TThc=nlOsw!{+qAIwyYfH@Hdy1NRjjRWgrN6pMQx2x zEmP4>3r;VFP|KJu18s--g5;IJRJz!|QOE*$A&<~08n7R@ z0sQoTQv$&M0KO%Vu{?j`F* z`GL3yG!PePYZVh2AfW(k`k@ct!8C^>sPlRP)o3}mAbH`zryiWI=P@C* z+%dJzGceWY0DY9^9uyR?Wwwk^>uo}htgHJwSMYt#rY9rAXwlAv{$LfXL5?(7DT8_U z6tu7dvYa~JD-i*Ny;yL&aN6@+#Biz;VPbL~s}N=_X?xJs=YAveirY0fN~+6Ot~iev zr)cN~DTvb8Z92>WfEz4b;-Zv=d2N?#fww1DSHCb&HEhES!N&AG4m3x(dth2K!c5B3Td`T{U;RLO`(Wsl@7k`we z{mM1=6371l#uCp zP;&GHt0Z1UIIdaX+xnmeYooB3eFjcJdORER1p>JS`dRypmrbYfg4yFeMF1|4G9jxXKJ1;#BKNAvG- z!XRl1faQXQfm>??D<~yh5{X6cF&DN_yMCp=(3En0O*0?-0wZcS92u`n>=@YMBZ~SN zd!6x7ckVK-0pK8SA=MI~_CGPpEcQNPzQOJx!2acy(%^E+_c+@X8KKRKrFne8GU!ztjWB>fpsM}C)N{u#?pE=3Sd84y&-zN6OS=^PRCA}JEQ20_ zUhV!R38p}xSuCNNQXvUoOQBV_I+t$&MJku6=W;83o{47M`D0z%6T!NVmW{Q)xUG{F zlT>O>fsENZ-Y#YnrfTs8;@7KyteDs+CE^K5#Bt~U0LWV{fv8}dHGZRIQyaT$l5GpT z;wwfHkG>^Bfx#1z`KfzOo?_E~QCr?) zZZj>fjLvoC<1l8|+_f2q?hQ86_RlJmcuEG5PAX+ZzG7+o$3>;~Tjo4QJVR`_vCAz9 z8{gFQPjbDVaO^&r>uij46_nE4a*$Co?c=x?qhoR83%fQ8KJJ-LOQpkp}#}ut(^L-d_j} zFTz`ab{u+{n<-es5T?{I-HU5;7^JxV&+!1>6RV2kC3*hQWfm z#(hCX{*Hci8mRt?{-bDu@W1LPEGya)bkCr=t+Qs`nxljN*d6(@cq8?D~mx3$Y zH=_6|Q!^=-@}76p^M0q9Jn807I(bm-PaMCA;rwEe0?>Pk8lw1@ZjWqB{^`e0h5*&X zQzP*!ui`eLxDJfHRA$ZBQ09Hl{3biafzsl-0z65Vh(tGkQ_J~*=lO?ZPjHKfubD^+ z^hY3Hs3yNiSye5nx2bgCh|~Ipwy+~D^O$a*X$G|c7}spk0MkTVu}<>>h2Luyn_-g- z_;U(CYe?KHS9yU@sa#!EOO0&7nS`-6yi4k`Z!hj4I4e45iB>=ldj4h67m&&Nk2%0R zKrKvmKxwrdN|acZ)i`{@gJ!W=ZY|U}v~&}f%mA-O*_26M$hWGmY^3J5brDVKBpE*Ajq#2#Cba)z@O>a6z^x_z+((eD=)!mmW;r@(~j%k5JyRmc;c&`}$O z4O=h7OH@7+E|?9z#R!gxx$a)HZ(Q`{mP+h z(}daplTp5Ovn-+!kfz>BmW&4Yg%)0U)J=*Wnu$ZNk!1sJ+n8dHi1QzExfmnQxHi~= z1!T{-WD{M$j=fZ%(eWBA1H^E{uQL1uadjKmHN-4miZ2*Ot4zu=Rt*P4v9@Yoh;eEH zD21Phf6)V4Wjw_4(|M?ptCaKY)tG7Ob42 z;qbbH79#9sVZY91Vl)?+DPU<#vu2pBK`;+?jTWjh0Q#R~t8Ar!w>pg>Z>+=C(STux zD~N_u8r-`FpOfNM&)q<)Uz+g~XOX&_YPXgvQ13x8ZVwk5$5sF@d0>sjTDgK+6O1la z7d*LxqO;U_@f~-x_mJRu`@lm?i`DcT7g%Ih@b&naR$wdTyv68&{pO{E&W796ihw*L?cR);M{+RA_p-;RWpX0 tAaX91TtzHYL3MA2Cj=!aY`ihF%ZM(&aP&$YTQPYE>QlT*Nt2R)|JnCOgIWLp literal 0 HcmV?d00001 diff --git a/docs/swarms_cloud/swarms_api.md b/docs/swarms_cloud/swarms_api.md index 9da9ebce..f09c6eae 100644 --- a/docs/swarms_cloud/swarms_api.md +++ b/docs/swarms_cloud/swarms_api.md @@ -18,8 +18,6 @@ Key capabilities include: - **Multiple Swarm Architectures**: Choose from various swarm patterns to match your specific workflow needs -- **Scheduled Execution**: Set up automated, scheduled swarm executions - - **Comprehensive Logging**: Track and analyze all API interactions - **Cost Management**: Predictable, transparent pricing with optimized resource utilization @@ -47,9 +45,6 @@ API keys can be obtained and managed at [https://swarms.world/platform/api-keys] | `/health` | GET | Simple health check endpoint | | `/v1/swarm/completions` | POST | Run a swarm with specified configuration | | `/v1/swarm/batch/completions` | POST | Run multiple swarms in batch mode | -| `/v1/swarm/schedule` | POST | Schedule a swarm to run at a specific time | -| `/v1/swarm/schedule` | GET | Get all scheduled swarm jobs | -| `/v1/swarm/schedule/{job_id}` | DELETE | Cancel a scheduled swarm job | | `/v1/swarm/logs` | GET | Retrieve API request logs | | `/v1/swarms/available` | GET | Get all available swarms as a list of strings | | `/v1/models/available` | GET | Get all available models as a list of strings | @@ -96,7 +91,6 @@ The `SwarmSpec` model defines the configuration of a swarm. | img | string | Optional image URL for the swarm | No | | return_history | boolean | Whether to return execution history | No | | rules | string | Guidelines for swarm behavior | No | -| schedule | ScheduleSpec | Scheduling information | No | | service_tier | string | Service tier for processing ("standard" or "flex") | No | ### AgentSpec @@ -117,16 +111,6 @@ The `AgentSpec` model defines the configuration of an individual agent. *Required if agents are manually specified; not required if using auto-generated agents -### ScheduleSpec - -The `ScheduleSpec` model defines when a swarm should be executed. - -| Field | Type | Description | Required | -|-------|------|-------------|----------| -| scheduled_time | datetime | Time when the swarm should run | Yes | -| timezone | string | Timezone for the scheduled time | No (defaults to "UTC") | - - ### Endpoint Details @@ -138,11 +122,58 @@ Check if the API service is available and functioning correctly. **Method**: GET **Rate Limit**: 100 requests per 60 seconds -**Example Request**: -```bash -curl -X GET "https://api.swarms.world/health" \ - -H "x-api-key: your_api_key_here" -``` +=== "Shell (curl)" + ```bash + curl -X GET "https://api.swarms.world/health" \ + -H "x-api-key: your_api_key_here" + ``` + +=== "Python (requests)" + ```python + import requests + + API_BASE_URL = "https://api.swarms.world" + API_KEY = "your_api_key_here" + + headers = { + "x-api-key": API_KEY + } + + response = requests.get(f"{API_BASE_URL}/health", headers=headers) + + if response.status_code == 200: + print("API is healthy:", response.json()) + else: + print(f"Error: {response.status_code}") + ``` + +=== "TypeScript (fetch)" + ```typescript + const API_BASE_URL = "https://api.swarms.world"; + const API_KEY = "your_api_key_here"; + + async function checkHealth(): Promise { + try { + const response = await fetch(`${API_BASE_URL}/health`, { + method: 'GET', + headers: { + 'x-api-key': API_KEY + } + }); + + if (response.ok) { + const data = await response.json(); + console.log("API is healthy:", data); + } else { + console.error(`Error: ${response.status}`); + } + } catch (error) { + console.error("Request failed:", error); + } + } + + checkHealth(); + ``` **Example Response**: ```json @@ -173,49 +204,193 @@ Run a swarm with the specified configuration to complete a task. | img | string | Optional image URL for the swarm | No | | return_history | boolean | Whether to return execution history | No | | rules | string | Guidelines for swarm behavior | No | -| schedule | ScheduleSpec | Scheduling information | No | - -**Example Request**: -```bash - -# Run single swarm -curl -X POST "https://api.swarms.world/v1/swarm/completions" \ - -H "x-api-key: $SWARMS_API_KEY" \ - -H "Content-Type: application/json" \ - -d '{ - "name": "Financial Analysis Swarm", - "description": "Market analysis swarm", - "agents": [ - { - "agent_name": "Market Analyst", - "description": "Analyzes market trends", - "system_prompt": "You are a financial analyst expert.", - "model_name": "openai/gpt-4o", - "role": "worker", + +=== "Shell (curl)" + ```bash + curl -X POST "https://api.swarms.world/v1/swarm/completions" \ + -H "x-api-key: $SWARMS_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Financial Analysis Swarm", + "description": "Market analysis swarm", + "agents": [ + { + "agent_name": "Market Analyst", + "description": "Analyzes market trends", + "system_prompt": "You are a financial analyst expert.", + "model_name": "openai/gpt-4o", + "role": "worker", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 0.5, + "auto_generate_prompt": false + }, + { + "agent_name": "Economic Forecaster", + "description": "Predicts economic trends", + "system_prompt": "You are an expert in economic forecasting.", + "model_name": "gpt-4o", + "role": "worker", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 0.5, + "auto_generate_prompt": false + } + ], "max_loops": 1, - "max_tokens": 8192, - "temperature": 0.5, - "auto_generate_prompt": false - }, - { - "agent_name": "Economic Forecaster", - "description": "Predicts economic trends", - "system_prompt": "You are an expert in economic forecasting.", - "model_name": "gpt-4o", - "role": "worker", + "swarm_type": "ConcurrentWorkflow", + "task": "What are the best etfs and index funds for ai and tech?", + "output_type": "dict" + }' + ``` + +=== "Python (requests)" + ```python + import requests + import json + + API_BASE_URL = "https://api.swarms.world" + API_KEY = "your_api_key_here" + + headers = { + "x-api-key": API_KEY, + "Content-Type": "application/json" + } + + swarm_config = { + "name": "Financial Analysis Swarm", + "description": "Market analysis swarm", + "agents": [ + { + "agent_name": "Market Analyst", + "description": "Analyzes market trends", + "system_prompt": "You are a financial analyst expert.", + "model_name": "openai/gpt-4o", + "role": "worker", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 0.5, + "auto_generate_prompt": False + }, + { + "agent_name": "Economic Forecaster", + "description": "Predicts economic trends", + "system_prompt": "You are an expert in economic forecasting.", + "model_name": "gpt-4o", + "role": "worker", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 0.5, + "auto_generate_prompt": False + } + ], "max_loops": 1, - "max_tokens": 8192, - "temperature": 0.5, - "auto_generate_prompt": false - } - ], - "max_loops": 1, - "swarm_type": "ConcurrentWorkflow", - "task": "What are the best etfs and index funds for ai and tech?", - "output_type": "dict" - }' + "swarm_type": "ConcurrentWorkflow", + "task": "What are the best etfs and index funds for ai and tech?", + "output_type": "dict" + } + + response = requests.post( + f"{API_BASE_URL}/v1/swarm/completions", + headers=headers, + json=swarm_config + ) + + if response.status_code == 200: + result = response.json() + print("Swarm completed successfully!") + print(f"Cost: ${result['metadata']['billing_info']['total_cost']}") + print(f"Execution time: {result['metadata']['execution_time_seconds']} seconds") + else: + print(f"Error: {response.status_code} - {response.text}") + ``` + +=== "TypeScript (fetch)" + ```typescript + interface AgentSpec { + agent_name: string; + description: string; + system_prompt: string; + model_name: string; + role: string; + max_loops: number; + max_tokens: number; + temperature: number; + auto_generate_prompt: boolean; + } -``` + interface SwarmConfig { + name: string; + description: string; + agents: AgentSpec[]; + max_loops: number; + swarm_type: string; + task: string; + output_type: string; + } + + const API_BASE_URL = "https://api.swarms.world"; + const API_KEY = "your_api_key_here"; + + async function runSwarm(): Promise { + const swarmConfig: SwarmConfig = { + name: "Financial Analysis Swarm", + description: "Market analysis swarm", + agents: [ + { + agent_name: "Market Analyst", + description: "Analyzes market trends", + system_prompt: "You are a financial analyst expert.", + model_name: "openai/gpt-4o", + role: "worker", + max_loops: 1, + max_tokens: 8192, + temperature: 0.5, + auto_generate_prompt: false + }, + { + agent_name: "Economic Forecaster", + description: "Predicts economic trends", + system_prompt: "You are an expert in economic forecasting.", + model_name: "gpt-4o", + role: "worker", + max_loops: 1, + max_tokens: 8192, + temperature: 0.5, + auto_generate_prompt: false + } + ], + max_loops: 1, + swarm_type: "ConcurrentWorkflow", + task: "What are the best etfs and index funds for ai and tech?", + output_type: "dict" + }; + + try { + const response = await fetch(`${API_BASE_URL}/v1/swarm/completions`, { + method: 'POST', + headers: { + 'x-api-key': API_KEY, + 'Content-Type': 'application/json' + }, + body: JSON.stringify(swarmConfig) + }); + + if (response.ok) { + const result = await response.json(); + console.log("Swarm completed successfully!"); + console.log(`Cost: $${result.metadata.billing_info.total_cost}`); + console.log(`Execution time: ${result.metadata.execution_time_seconds} seconds`); + } else { + console.error(`Error: ${response.status} - ${await response.text()}`); + } + } catch (error) { + console.error("Request failed:", error); + } + } + + runSwarm(); + ``` **Example Response**: ```json @@ -271,65 +446,249 @@ Run multiple swarms as a batch operation. |-------|------|-------------|----------| | swarms | Array | List of swarm specifications | Yes | -**Example Request**: -```bash -# Batch swarm completions -curl -X POST "https://api.swarms.world/v1/swarm/batch/completions" \ - -H "x-api-key: $SWARMS_API_KEY" \ - -H "Content-Type: application/json" \ - -d '[ - { - "name": "Batch Swarm 1", - "description": "First swarm in the batch", - "agents": [ +=== "Shell (curl)" + ```bash + curl -X POST "https://api.swarms.world/v1/swarm/batch/completions" \ + -H "x-api-key: $SWARMS_API_KEY" \ + -H "Content-Type: application/json" \ + -d '[ { - "agent_name": "Research Agent", - "description": "Conducts research", - "system_prompt": "You are a research assistant.", - "model_name": "gpt-4o", - "role": "worker", - "max_loops": 1 + "name": "Batch Swarm 1", + "description": "First swarm in the batch", + "agents": [ + { + "agent_name": "Research Agent", + "description": "Conducts research", + "system_prompt": "You are a research assistant.", + "model_name": "gpt-4o", + "role": "worker", + "max_loops": 1 + }, + { + "agent_name": "Analysis Agent", + "description": "Analyzes data", + "system_prompt": "You are a data analyst.", + "model_name": "gpt-4o", + "role": "worker", + "max_loops": 1 + } + ], + "max_loops": 1, + "swarm_type": "SequentialWorkflow", + "task": "Research AI advancements." }, { - "agent_name": "Analysis Agent", - "description": "Analyzes data", - "system_prompt": "You are a data analyst.", - "model_name": "gpt-4o", - "role": "worker", - "max_loops": 1 + "name": "Batch Swarm 2", + "description": "Second swarm in the batch", + "agents": [ + { + "agent_name": "Writing Agent", + "description": "Writes content", + "system_prompt": "You are a content writer.", + "model_name": "gpt-4o", + "role": "worker", + "max_loops": 1 + }, + { + "agent_name": "Editing Agent", + "description": "Edits content", + "system_prompt": "You are an editor.", + "model_name": "gpt-4o", + "role": "worker", + "max_loops": 1 + } + ], + "max_loops": 1, + "swarm_type": "SequentialWorkflow", + "task": "Write a summary of AI research." } - ], - "max_loops": 1, - "swarm_type": "SequentialWorkflow", - "task": "Research AI advancements." - }, - { - "name": "Batch Swarm 2", - "description": "Second swarm in the batch", - "agents": [ + ]' + ``` + +=== "Python (requests)" + ```python + import requests + import json + + API_BASE_URL = "https://api.swarms.world" + API_KEY = "your_api_key_here" + + headers = { + "x-api-key": API_KEY, + "Content-Type": "application/json" + } + + batch_swarms = [ { - "agent_name": "Writing Agent", - "description": "Writes content", - "system_prompt": "You are a content writer.", - "model_name": "gpt-4o", - "role": "worker", - "max_loops": 1 + "name": "Batch Swarm 1", + "description": "First swarm in the batch", + "agents": [ + { + "agent_name": "Research Agent", + "description": "Conducts research", + "system_prompt": "You are a research assistant.", + "model_name": "gpt-4o", + "role": "worker", + "max_loops": 1 + }, + { + "agent_name": "Analysis Agent", + "description": "Analyzes data", + "system_prompt": "You are a data analyst.", + "model_name": "gpt-4o", + "role": "worker", + "max_loops": 1 + } + ], + "max_loops": 1, + "swarm_type": "SequentialWorkflow", + "task": "Research AI advancements." }, { - "agent_name": "Editing Agent", - "description": "Edits content", - "system_prompt": "You are an editor.", - "model_name": "gpt-4o", - "role": "worker", - "max_loops": 1 + "name": "Batch Swarm 2", + "description": "Second swarm in the batch", + "agents": [ + { + "agent_name": "Writing Agent", + "description": "Writes content", + "system_prompt": "You are a content writer.", + "model_name": "gpt-4o", + "role": "worker", + "max_loops": 1 + }, + { + "agent_name": "Editing Agent", + "description": "Edits content", + "system_prompt": "You are an editor.", + "model_name": "gpt-4o", + "role": "worker", + "max_loops": 1 + } + ], + "max_loops": 1, + "swarm_type": "SequentialWorkflow", + "task": "Write a summary of AI research." + } + ] + + response = requests.post( + f"{API_BASE_URL}/v1/swarm/batch/completions", + headers=headers, + json=batch_swarms + ) + + if response.status_code == 200: + results = response.json() + print(f"Batch completed with {len(results)} swarms") + for i, result in enumerate(results): + print(f"Swarm {i+1}: {result['swarm_name']} - {result['status']}") + else: + print(f"Error: {response.status_code} - {response.text}") + ``` + +=== "TypeScript (fetch)" + ```typescript + interface AgentSpec { + agent_name: string; + description: string; + system_prompt: string; + model_name: string; + role: string; + max_loops: number; + } + + interface SwarmSpec { + name: string; + description: string; + agents: AgentSpec[]; + max_loops: number; + swarm_type: string; + task: string; + } + + const API_BASE_URL = "https://api.swarms.world"; + const API_KEY = "your_api_key_here"; + + async function runBatchSwarms(): Promise { + const batchSwarms: SwarmSpec[] = [ + { + name: "Batch Swarm 1", + description: "First swarm in the batch", + agents: [ + { + agent_name: "Research Agent", + description: "Conducts research", + system_prompt: "You are a research assistant.", + model_name: "gpt-4o", + role: "worker", + max_loops: 1 + }, + { + agent_name: "Analysis Agent", + description: "Analyzes data", + system_prompt: "You are a data analyst.", + model_name: "gpt-4o", + role: "worker", + max_loops: 1 + } + ], + max_loops: 1, + swarm_type: "SequentialWorkflow", + task: "Research AI advancements." + }, + { + name: "Batch Swarm 2", + description: "Second swarm in the batch", + agents: [ + { + agent_name: "Writing Agent", + description: "Writes content", + system_prompt: "You are a content writer.", + model_name: "gpt-4o", + role: "worker", + max_loops: 1 + }, + { + agent_name: "Editing Agent", + description: "Edits content", + system_prompt: "You are an editor.", + model_name: "gpt-4o", + role: "worker", + max_loops: 1 + } + ], + max_loops: 1, + swarm_type: "SequentialWorkflow", + task: "Write a summary of AI research." + } + ]; + + try { + const response = await fetch(`${API_BASE_URL}/v1/swarm/batch/completions`, { + method: 'POST', + headers: { + 'x-api-key': API_KEY, + 'Content-Type': 'application/json' + }, + body: JSON.stringify(batchSwarms) + }); + + if (response.ok) { + const results = await response.json(); + console.log(`Batch completed with ${results.length} swarms`); + results.forEach((result: any, index: number) => { + console.log(`Swarm ${index + 1}: ${result.swarm_name} - ${result.status}`); + }); + } else { + console.error(`Error: ${response.status} - ${await response.text()}`); + } + } catch (error) { + console.error("Request failed:", error); } - ], - "max_loops": 1, - "swarm_type": "SequentialWorkflow", - "task": "Write a summary of AI research." } - ]' -``` + + runBatchSwarms(); + ``` **Example Response**: ```json @@ -351,10 +710,7 @@ curl -X POST "https://api.swarms.world/v1/swarm/batch/completions" \ ] ``` -------- - - - +## Individual Agent Endpoints ### Run Single Agent @@ -371,24 +727,125 @@ Run a single agent with the specified configuration. | agent_config | AgentSpec | Configuration for the agent | Yes | | task | string | The task to be completed by the agent | Yes | -**Example Request**: -```bash -curl -X POST "https://api.swarms.world/v1/agent/completions" \ - -H "x-api-key: your_api_key_here" \ - -H "Content-Type: application/json" \ - -d '{ - "agent_config": { - "agent_name": "Research Assistant", - "description": "Helps with research tasks", - "system_prompt": "You are a research assistant expert.", - "model_name": "gpt-4o", - "max_loops": 1, - "max_tokens": 8192, - "temperature": 0.5 - }, - "task": "Research the latest developments in quantum computing." - }' -``` +=== "Shell (curl)" + ```bash + curl -X POST "https://api.swarms.world/v1/agent/completions" \ + -H "x-api-key: your_api_key_here" \ + -H "Content-Type: application/json" \ + -d '{ + "agent_config": { + "agent_name": "Research Assistant", + "description": "Helps with research tasks", + "system_prompt": "You are a research assistant expert.", + "model_name": "gpt-4o", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 0.5 + }, + "task": "Research the latest developments in quantum computing." + }' + ``` + +=== "Python (requests)" + ```python + import requests + import json + + API_BASE_URL = "https://api.swarms.world" + API_KEY = "your_api_key_here" + + headers = { + "x-api-key": API_KEY, + "Content-Type": "application/json" + } + + agent_request = { + "agent_config": { + "agent_name": "Research Assistant", + "description": "Helps with research tasks", + "system_prompt": "You are a research assistant expert.", + "model_name": "gpt-4o", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 0.5 + }, + "task": "Research the latest developments in quantum computing." + } + + response = requests.post( + f"{API_BASE_URL}/v1/agent/completions", + headers=headers, + json=agent_request + ) + + if response.status_code == 200: + result = response.json() + print(f"Agent {result['name']} completed successfully!") + print(f"Usage: {result['usage']['total_tokens']} tokens") + print(f"Output: {result['outputs']}") + else: + print(f"Error: {response.status_code} - {response.text}") + ``` + +=== "TypeScript (fetch)" + ```typescript + interface AgentConfig { + agent_name: string; + description: string; + system_prompt: string; + model_name: string; + max_loops: number; + max_tokens: number; + temperature: number; + } + + interface AgentRequest { + agent_config: AgentConfig; + task: string; + } + + const API_BASE_URL = "https://api.swarms.world"; + const API_KEY = "your_api_key_here"; + + async function runSingleAgent(): Promise { + const agentRequest: AgentRequest = { + agent_config: { + agent_name: "Research Assistant", + description: "Helps with research tasks", + system_prompt: "You are a research assistant expert.", + model_name: "gpt-4o", + max_loops: 1, + max_tokens: 8192, + temperature: 0.5 + }, + task: "Research the latest developments in quantum computing." + }; + + try { + const response = await fetch(`${API_BASE_URL}/v1/agent/completions`, { + method: 'POST', + headers: { + 'x-api-key': API_KEY, + 'Content-Type': 'application/json' + }, + body: JSON.stringify(agentRequest) + }); + + if (response.ok) { + const result = await response.json(); + console.log(`Agent ${result.name} completed successfully!`); + console.log(`Usage: ${result.usage.total_tokens} tokens`); + console.log(`Output:`, result.outputs); + } else { + console.error(`Error: ${response.status} - ${await response.text()}`); + } + } catch (error) { + console.error("Request failed:", error); + } + } + + runSingleAgent(); + ``` **Example Response**: ```json @@ -408,92 +865,6 @@ curl -X POST "https://api.swarms.world/v1/agent/completions" \ } ``` - - -### Get Models - -#### Get Available Models - -Get all available models as a list of strings. - -**Endpoint**: `/v1/models/available` -**Method**: GET - -**Example Request**: -```bash -curl -X GET "https://api.swarms.world/v1/models/available" \ - -H "x-api-key: your_api_key_here" -``` - - ------- - - -### Get Swarms Available - -Get all available swarms as a list of strings. - -**Endpoint**: `/v1/swarms/available` -**Method**: GET - -**Example Request**: -```bash -curl -X GET "https://api.swarms.world/v1/swarms/available" \ - -H "x-api-key: your_api_key_here" -``` - -**Example Response**: -```json -{ - "status": "success", - "swarms": ["financial-analysis-swarm", "market-sentiment-swarm"] -} -``` - -------- - - -#### Get API Logs - -Retrieve logs of API requests made with your API key. - -**Endpoint**: `/v1/swarm/logs` -**Method**: GET -**Rate Limit**: 100 requests per 60 seconds - -**Example Request**: -```bash -curl -X GET "https://api.swarms.world/v1/swarm/logs" \ - -H "x-api-key: your_api_key_here" -``` - -**Example Response**: -```json -{ - "status": "success", - "count": 25, - "logs": [ - { - "id": "log_id_12345", - "api_key": "api_key_redacted", - "data": { - "action": "run_swarm", - "swarm_name": "financial-analysis-swarm", - "task": "Analyze quarterly financials...", - "timestamp": "2025-03-04T14:22:45Z" - } - }, - ... - ] -} -``` - - - -## Individual Agent Endpoints - -### Run Single Agent - ### AgentCompletion Model The `AgentCompletion` model defines the configuration for running a single agent task. @@ -596,31 +967,158 @@ Execute multiple agent tasks in parallel. **Maximum Batch Size**: 10 requests **Input** A list of `AgentCompeletion` inputs -**Request Body**: -```json -[ - { - "agent_config": { - "agent_name": "Market Analyst", - "description": "Expert in market analysis", - "system_prompt": "You are a financial market analyst.", - "model_name": "gpt-4o", - "temperature": 0.3 - }, - "task": "Analyze the current market trends in AI technology sector" - }, - { - "agent_config": { - "agent_name": "Technical Writer", - "description": "Specialized in technical documentation", - "system_prompt": "You are a technical documentation expert.", - "model_name": "gpt-4o", - "temperature": 0.7 - }, - "task": "Create a technical guide for implementing OAuth2 authentication" - } -] -``` +=== "Shell (curl)" + ```bash + curl -X POST "https://api.swarms.world/v1/agent/batch/completions" \ + -H "x-api-key: your_api_key_here" \ + -H "Content-Type: application/json" \ + -d '[ + { + "agent_config": { + "agent_name": "Market Analyst", + "description": "Expert in market analysis", + "system_prompt": "You are a financial market analyst.", + "model_name": "gpt-4o", + "temperature": 0.3 + }, + "task": "Analyze the current market trends in AI technology sector" + }, + { + "agent_config": { + "agent_name": "Technical Writer", + "description": "Specialized in technical documentation", + "system_prompt": "You are a technical documentation expert.", + "model_name": "gpt-4o", + "temperature": 0.7 + }, + "task": "Create a technical guide for implementing OAuth2 authentication" + } + ]' + ``` + +=== "Python (requests)" + ```python + import requests + import json + + API_BASE_URL = "https://api.swarms.world" + API_KEY = "your_api_key_here" + + headers = { + "x-api-key": API_KEY, + "Content-Type": "application/json" + } + + batch_agents = [ + { + "agent_config": { + "agent_name": "Market Analyst", + "description": "Expert in market analysis", + "system_prompt": "You are a financial market analyst.", + "model_name": "gpt-4o", + "temperature": 0.3 + }, + "task": "Analyze the current market trends in AI technology sector" + }, + { + "agent_config": { + "agent_name": "Technical Writer", + "description": "Specialized in technical documentation", + "system_prompt": "You are a technical documentation expert.", + "model_name": "gpt-4o", + "temperature": 0.7 + }, + "task": "Create a technical guide for implementing OAuth2 authentication" + } + ] + + response = requests.post( + f"{API_BASE_URL}/v1/agent/batch/completions", + headers=headers, + json=batch_agents + ) + + if response.status_code == 200: + result = response.json() + print(f"Batch completed with {result['total_requests']} agents") + print(f"Execution time: {result['execution_time']} seconds") + print("\nResults:") + for i, agent_result in enumerate(result['results']): + print(f" Agent {i+1}: {agent_result['name']} - {agent_result['success']}") + else: + print(f"Error: {response.status_code} - {response.text}") + ``` + +=== "TypeScript (fetch)" + ```typescript + interface AgentConfig { + agent_name: string; + description: string; + system_prompt: string; + model_name: string; + temperature: number; + } + + interface AgentCompletion { + agent_config: AgentConfig; + task: string; + } + + const API_BASE_URL = "https://api.swarms.world"; + const API_KEY = "your_api_key_here"; + + async function runBatchAgents(): Promise { + const batchAgents: AgentCompletion[] = [ + { + agent_config: { + agent_name: "Market Analyst", + description: "Expert in market analysis", + system_prompt: "You are a financial market analyst.", + model_name: "gpt-4o", + temperature: 0.3 + }, + task: "Analyze the current market trends in AI technology sector" + }, + { + agent_config: { + agent_name: "Technical Writer", + description: "Specialized in technical documentation", + system_prompt: "You are a technical documentation expert.", + model_name: "gpt-4o", + temperature: 0.7 + }, + task: "Create a technical guide for implementing OAuth2 authentication" + } + ]; + + try { + const response = await fetch(`${API_BASE_URL}/v1/agent/batch/completions`, { + method: 'POST', + headers: { + 'x-api-key': API_KEY, + 'Content-Type': 'application/json' + }, + body: JSON.stringify(batchAgents) + }); + + if (response.ok) { + const result = await response.json(); + console.log(`Batch completed with ${result.total_requests} agents`); + console.log(`Execution time: ${result.execution_time} seconds`); + console.log("\nResults:"); + result.results.forEach((agentResult: any, index: number) => { + console.log(` Agent ${index + 1}: ${agentResult.name} - ${agentResult.success}`); + }); + } else { + console.error(`Error: ${response.status} - ${await response.text()}`); + } + } catch (error) { + console.error("Request failed:", error); + } + } + + runBatchAgents(); + ``` **Response**: ```json @@ -660,394 +1158,10 @@ Execute multiple agent tasks in parallel. } ``` - ----- ## Production Examples -### Python Examples - -#### Financial Risk Assessment (Python) - -This example demonstrates creating a swarm for comprehensive financial risk assessment. - -```python -import requests -import json -from datetime import datetime, timedelta - -# API Configuration -API_BASE_URL = "https://api.swarms.world" -API_KEY = "your_api_key_here" -HEADERS = { - "x-api-key": API_KEY, - "Content-Type": "application/json" -} - -def financial_risk_assessment(company_data, market_conditions, risk_tolerance): - """ - Creates and runs a swarm to perform comprehensive financial risk assessment. - - Args: - company_data (str): Description or data about the company - market_conditions (str): Current market conditions - risk_tolerance (str): Risk tolerance level (e.g., "conservative", "moderate", "aggressive") - - Returns: - dict: Risk assessment results - """ - # Prepare the task description with all relevant information - task = f""" - Perform a comprehensive financial risk assessment with the following data: - - COMPANY DATA: - {company_data} - - MARKET CONDITIONS: - {market_conditions} - - RISK TOLERANCE: - {risk_tolerance} - - Analyze all potential risk factors including market risks, credit risks, - operational risks, and regulatory compliance risks. Quantify each risk factor - on a scale of 1-10 and provide specific mitigation strategies. - - Return a detailed report with executive summary, risk scores, detailed analysis, - and actionable recommendations. - """ - - # Define specialized financial agents - financial_analysts = [ - { - "agent_name": "MarketAnalyst", - "description": "Specialist in market risk assessment and forecasting", - "system_prompt": "You are an expert market analyst with deep expertise in financial markets. Analyze market conditions, trends, and external factors that could impact financial performance. Provide quantitative and qualitative analysis of market-related risks.", - "model_name": "gpt-4o", - "temperature": 0.3, - "role": "analyst", - "max_loops": 1 - }, - { - "agent_name": "CreditRiskAnalyst", - "description": "Expert in assessing credit and counterparty risks", - "system_prompt": "You are a specialist in credit risk analysis with experience in banking and financial institutions. Evaluate creditworthiness, default probabilities, and counterparty exposures. Provide detailed analysis of credit-related risks and recommended safeguards.", - "model_name": "gpt-4o", - "temperature": 0.2, - "role": "analyst", - "max_loops": 1 - }, - { - "agent_name": "RegulatoryExpert", - "description": "Expert in financial regulations and compliance", - "system_prompt": "You are a regulatory compliance expert with deep knowledge of financial regulations. Identify potential regulatory risks, compliance issues, and governance concerns. Recommend compliance measures and risk mitigation strategies.", - "model_name": "gpt-4o", - "temperature": 0.2, - "role": "analyst", - "max_loops": 1 - }, - { - "agent_name": "RiskSynthesizer", - "description": "Integrates all risk factors into comprehensive assessment", - "system_prompt": "You are a senior risk management professional responsible for synthesizing multiple risk analyses into a coherent, comprehensive risk assessment. Integrate analyses from various domains, resolve conflicting assessments, and provide a holistic view of risk exposure with prioritized recommendations.", - "model_name": "gpt-4o", - "temperature": 0.4, - "role": "manager", - "max_loops": 1 - } - ] - - # Create the swarm specification - swarm_spec = { - "name": "financial-risk-assessment", - "description": "Comprehensive financial risk assessment swarm", - "agents": financial_analysts, - "max_loops": 2, - "swarm_type": "HiearchicalSwarm", - "task": task, - "return_history": True - } - - # Execute the swarm - response = requests.post( - f"{API_BASE_URL}/v1/swarm/completions", - headers=HEADERS, - json=swarm_spec - ) - - if response.status_code == 200: - result = response.json() - print(f"Risk assessment completed. Cost: ${result['metadata']['billing_info']['total_cost']}") - return result["output"] - else: - print(f"Error: {response.status_code} - {response.text}") - return None - -# Usage example -if __name__ == "__main__": - company_data = """ - XYZ Financial Services - Annual Revenue: $125M - Current Debt: $45M - Credit Rating: BBB+ - Primary Markets: North America, Europe - Key Products: Asset management, retirement planning, commercial lending - Recent Events: Expanding into Asian markets, New CEO appointed 6 months ago - """ - - market_conditions = """ - Current interest rates rising (Federal Reserve increased rates by 0.25% last month) - Inflation at 3.2% (12-month outlook projects 3.5-4.0%) - Market volatility index (VIX) at 22.4 (elevated) - Regulatory environment: New financial reporting requirements taking effect next quarter - Sector performance: Financial services sector underperforming broader market by 2.7% - """ - - risk_tolerance = "moderate" - - result = financial_risk_assessment(company_data, market_conditions, risk_tolerance) - - if result: - # Process and use the risk assessment - print(json.dumps(result, indent=2)) - - # Optionally, schedule a follow-up assessment - tomorrow = datetime.utcnow() + timedelta(days=30) - schedule_spec = { - "name": "monthly-risk-update", - "description": "Monthly update to risk assessment", - "task": f"Update the risk assessment for XYZ Financial Services based on current market conditions. Previous assessment: {json.dumps(result)}", - "schedule": { - "scheduled_time": tomorrow.isoformat() + "Z", - "timezone": "UTC" - } - } - - schedule_response = requests.post( - f"{API_BASE_URL}/v1/swarm/schedule", - headers=HEADERS, - json=schedule_spec - ) - - if schedule_response.status_code == 200: - print("Follow-up assessment scheduled successfully") - print(schedule_response.json()) -``` - -#### Healthcare Patient Data Analysis (Python) - -This example demonstrates creating a swarm for analyzing patient health data and generating insights. - -```python -import requests -import json -import os -from datetime import datetime - -# API Configuration -API_BASE_URL = "https://api.swarms.world" -API_KEY = os.environ.get("SWARMS_API_KEY") -HEADERS = { - "x-api-key": API_KEY, - "Content-Type": "application/json" -} - -def analyze_patient_health_data(patient_data, medical_history, lab_results, treatment_goals): - """ - Creates and runs a swarm to analyze patient health data and generate insights. - - Args: - patient_data (str): Basic patient information - medical_history (str): Patient's medical history - lab_results (str): Recent laboratory results - treatment_goals (str): Treatment objectives - - Returns: - dict: Comprehensive health analysis and recommendations - """ - # Prepare the detailed task description - task = f""" - Perform a comprehensive analysis of the following patient health data: - - PATIENT INFORMATION: - {patient_data} - - MEDICAL HISTORY: - {medical_history} - - LABORATORY RESULTS: - {lab_results} - - TREATMENT GOALS: - {treatment_goals} - - Analyze all aspects of the patient's health status, identify potential concerns, - evaluate treatment effectiveness, and provide evidence-based recommendations for - optimizing care. Consider medication interactions, lifestyle factors, and preventive measures. - - Return a detailed clinical report with key findings, risk stratification, - prioritized recommendations, and suggested follow-up timeline. - """ - - # Create the swarm specification with auto-generated agents - # (letting the system create specialized medical experts) - swarm_spec = { - "name": "patient-health-analysis", - "description": "Comprehensive patient health data analysis", - "swarm_type": "AutoSwarmBuilder", - "task": task, - "max_loops": 3, - "return_history": True - } - - # Execute the swarm - try: - response = requests.post( - f"{API_BASE_URL}/v1/swarm/completions", - headers=HEADERS, - json=swarm_spec - ) - - response.raise_for_status() - result = response.json() - - # Log the execution metadata - execution_time = result["metadata"]["execution_time_seconds"] - cost = result["metadata"]["billing_info"]["total_cost"] - num_agents = result["metadata"]["num_agents"] - - print(f"Analysis completed in {execution_time:.2f} seconds") - print(f"Used {num_agents} specialized medical agents") - print(f"Total cost: ${cost:.4f}") - - # Return just the analysis results - return result["output"] - - except requests.exceptions.RequestException as e: - print(f"API request failed: {str(e)}") - if hasattr(e, 'response') and e.response: - print(f"Response: {e.response.text}") - return None - except Exception as e: - print(f"Error: {str(e)}") - return None - -# Usage example -if __name__ == "__main__": - # Sample patient data (would typically come from EHR system) - patient_data = """ - ID: PT-28456 - Age: 67 - Gender: Female - Height: 162 cm - Weight: 78 kg - Vitals: - - Blood Pressure: 142/88 mmHg - - Heart Rate: 76 bpm - - Respiratory Rate: 16/min - - Temperature: 37.1°C - - Oxygen Saturation: 97% - """ - - medical_history = """ - Diagnoses: - - Type 2 Diabetes Mellitus (diagnosed 12 years ago) - - Hypertension (diagnosed 8 years ago) - - Osteoarthritis (knees, diagnosed 5 years ago) - - Hyperlipidemia - - Surgical History: - - Cholecystectomy (15 years ago) - - Right knee arthroscopy (3 years ago) - - Medications: - - Metformin 1000mg BID - - Lisinopril 20mg daily - - Atorvastatin 40mg daily - - Aspirin 81mg daily - - Acetaminophen 500mg PRN for joint pain - - Allergies: - - Penicillin (rash) - - Sulfa drugs (hives) - - Family History: - - Father: MI at age 70, died at 76 - - Mother: Breast cancer at 68, Type 2 Diabetes, died at 82 - - Sister: Type 2 Diabetes, Hypertension - """ - - lab_results = """ - CBC (2 days ago): - - WBC: 7.2 x10^9/L (normal) - - RBC: 4.1 x10^12/L (low-normal) - - Hemoglobin: 12.8 g/dL (low-normal) - - Hematocrit: 38% (low-normal) - - Platelets: 245 x10^9/L (normal) - - Comprehensive Metabolic Panel: - - Glucose (fasting): 142 mg/dL (elevated) - - HbA1c: 7.8% (elevated) - - BUN: 22 mg/dL (normal) - - Creatinine: 1.1 mg/dL (normal) - - eGFR: 62 mL/min/1.73m² (mildly reduced) - - Sodium: 138 mEq/L (normal) - - Potassium: 4.2 mEq/L (normal) - - Chloride: 101 mEq/L (normal) - - Calcium: 9.4 mg/dL (normal) - - ALT: 32 U/L (normal) - - AST: 28 U/L (normal) - - Lipid Panel: - - Total Cholesterol: 198 mg/dL - - Triglycerides: 172 mg/dL (elevated) - - HDL: 42 mg/dL (low) - - LDL: 122 mg/dL (borderline elevated) - - Urinalysis: - - Microalbumin/Creatinine ratio: 45 mg/g (elevated) - """ - - treatment_goals = """ - Primary Goals: - - Improve glycemic control (target HbA1c < 7.0%) - - Blood pressure control (target < 130/80 mmHg) - - Lipid management (target LDL < 100 mg/dL) - - Renal protection (reduce microalbuminuria) - - Weight management (target BMI < 27) - - Pain management for osteoarthritis - - Maintain functional independence - - Patient Preferences: - - Prefers to minimize medication changes if possible - - Interested in dietary approaches - - Concerned about memory changes - - Limited exercise tolerance due to knee pain - """ - - result = analyze_patient_health_data(patient_data, medical_history, lab_results, treatment_goals) - - if result: - # Write the analysis to a report file - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") - with open(f"patient_analysis_{timestamp}.json", "w") as f: - json.dump(result, f, indent=2) - - print(f"Analysis saved to patient_analysis_{timestamp}.json") - - # Display key findings - if "key_findings" in result: - print("\nKEY FINDINGS:") - for i, finding in enumerate(result["key_findings"]): - print(f" {i+1}. {finding}") - - # Display recommendations - if "recommendations" in result: - print("\nRECOMMENDATIONS:") - for i, rec in enumerate(result["recommendations"]): - print(f" {i+1}. {rec}") -``` - ## Error Handling The Swarms API follows standard HTTP status codes for error responses: @@ -1112,7 +1226,6 @@ Error responses include a detailed message explaining the issue: | Error Handling | Implement robust error handling and retries | | Logging | Log API responses for debugging and auditing | | Cost Monitoring | Monitor costs closely during development and testing | -| Scheduling | Use scheduled jobs for recurring tasks instead of polling | ### Cost Optimization diff --git a/multiple_image_processing.py b/multiple_image_processing.py index febb29fe..3d90f612 100644 --- a/multiple_image_processing.py +++ b/multiple_image_processing.py @@ -1,7 +1,4 @@ from swarms import Agent -from swarms.prompts.logistics import ( - Quality_Control_Agent_Prompt, -) # Image for analysis @@ -12,8 +9,8 @@ quality_control_agent = Agent( agent_name="Quality Control Agent", agent_description="A quality control agent that analyzes images and provides a detailed report on the quality of the product in the image.", model_name="claude-3-5-sonnet-20240620", - system_prompt=Quality_Control_Agent_Prompt, - multi_modal=True, + # system_prompt=Quality_Control_Agent_Prompt, + # multi_modal=True, max_loops=1, output_type="str-all-except-first", summarize_multiple_images=True, @@ -22,7 +19,7 @@ quality_control_agent = Agent( response = quality_control_agent.run( task="what is in the image?", - imgs=[factory_image, factory_image], + imgs=[factory_image, "burning_image.jpg"], ) print(response) diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index b138cef2..12d2306a 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -985,7 +985,7 @@ class Agent: self.short_memory.add(role=self.user_name, content=task) - if self.plan_enabled or self.planning_prompt is not None: + if self.plan_enabled is True: self.plan(task) # Set the loop count @@ -1360,10 +1360,15 @@ class Agent: # Get the current conversation history history = self.short_memory.get_str() + plan_prompt = f"Create a comprehensive step-by-step plan to complete the following task: \n\n {task}" + # Construct the planning prompt by combining history, planning prompt, and task - planning_prompt = ( - f"{history}\n\n{self.planning_prompt}\n\nTask: {task}" - ) + if exists(self.planning_prompt): + planning_prompt = f"{history}\n\n{self.planning_prompt}\n\nTask: {task}" + else: + planning_prompt = ( + f"{history}\n\n{plan_prompt}\n\nTask: {task}" + ) # Generate the plan using the LLM plan = self.llm.run(task=planning_prompt, *args, **kwargs) @@ -1371,9 +1376,6 @@ class Agent: # Store the generated plan in short-term memory self.short_memory.add(role=self.agent_name, content=plan) - logger.info( - f"Successfully created plan for task: {task[:50]}..." - ) return None except Exception as error: @@ -2501,6 +2503,7 @@ class Agent: task: Optional[Union[str, Any]] = None, img: Optional[str] = None, imgs: Optional[List[str]] = None, + correct_answer: Optional[str] = None, *args, **kwargs, ) -> Any: @@ -2534,6 +2537,14 @@ class Agent: output = self.run_multiple_images( task=task, imgs=imgs, *args, **kwargs ) + elif exists(correct_answer): + output = self.continuous_run_with_answer( + task=task, + img=img, + correct_answer=correct_answer, + *args, + **kwargs, + ) else: output = self._run( task=task, @@ -2909,7 +2920,7 @@ class Agent: self, task: str, imgs: List[str], *args, **kwargs ): """ - Run the agent with multiple images. + Run the agent with multiple images using concurrent processing. Args: task (str): The task to be performed on each image. @@ -2932,12 +2943,33 @@ class Agent: Raises: Exception: If an error occurs while processing any of the images. """ - outputs = [] - for img in imgs: - output = self.run(task=task, img=img, *args, **kwargs) - outputs.append(output) + # Calculate number of workers as 95% of available CPU cores + cpu_count = os.cpu_count() + max_workers = max(1, int(cpu_count * 0.95)) + + # Use ThreadPoolExecutor for concurrent processing + with ThreadPoolExecutor(max_workers=max_workers) as executor: + # Submit all image processing tasks + future_to_img = { + executor.submit( + self.run, task=task, img=img, *args, **kwargs + ): img + for img in imgs + } + + # Collect results in order + outputs = [] + for future in future_to_img: + try: + output = future.result() + outputs.append(output) + except Exception as e: + logger.error(f"Error processing image: {e}") + outputs.append( + None + ) # or raise the exception based on your preference - # Combine the outputs into a single string + # Combine the outputs into a single string if summarization is enabled if self.summarize_multiple_images is True: output = "\n".join(outputs) @@ -2959,3 +2991,58 @@ class Agent: outputs = self.run(task=prompt, *args, **kwargs) return outputs + + def continuous_run_with_answer( + self, + task: str, + img: Optional[str] = None, + correct_answer: str = None, + max_attempts: int = 10, + ): + """ + Run the agent with the task until the correct answer is provided. + + Args: + task (str): The task to be performed + correct_answer (str): The correct answer that must be found in the response + max_attempts (int): Maximum number of attempts before giving up (default: 10) + + Returns: + str: The response containing the correct answer + + Raises: + Exception: If max_attempts is reached without finding the correct answer + """ + attempts = 0 + + while attempts < max_attempts: + attempts += 1 + + if self.verbose: + logger.info( + f"Attempt {attempts}/{max_attempts} to find correct answer" + ) + + response = self._run(task=task, img=img) + + # Check if the correct answer is in the response (case-insensitive) + if correct_answer.lower() in response.lower(): + if self.verbose: + logger.info( + f"Correct answer found on attempt {attempts}" + ) + return response + else: + # Add feedback to help guide the agent + feedback = "Your previous response was incorrect. Think carefully about the question and ensure your response directly addresses what was asked." + self.short_memory.add(role="User", content=feedback) + + if self.verbose: + logger.info( + f"Correct answer not found. Expected: '{correct_answer}'" + ) + + # If we reach here, we've exceeded max_attempts + raise Exception( + f"Failed to find correct answer '{correct_answer}' after {max_attempts} attempts" + ) diff --git a/vision_tools.py b/vision_tools.py new file mode 100644 index 00000000..f0ec102e --- /dev/null +++ b/vision_tools.py @@ -0,0 +1,68 @@ +from swarms.structs import Agent +from swarms.prompts.logistics import ( + Quality_Control_Agent_Prompt, +) + + +# Image for analysis +factory_image = "image.jpg" + + +def security_analysis(danger_level: str) -> str: + """ + Analyzes the security danger level and returns an appropriate response. + + Args: + danger_level (str, optional): The level of danger to analyze. + Can be "low", "medium", "high", or None. Defaults to None. + + Returns: + str: A string describing the danger level assessment. + - "No danger level provided" if danger_level is None + - "No danger" if danger_level is "low" + - "Medium danger" if danger_level is "medium" + - "High danger" if danger_level is "high" + - "Unknown danger level" for any other value + """ + if danger_level is None: + return "No danger level provided" + + if danger_level == "low": + return "No danger" + + if danger_level == "medium": + return "Medium danger" + + if danger_level == "high": + return "High danger" + + return "Unknown danger level" + + +custom_system_prompt = f""" +{Quality_Control_Agent_Prompt} + +You have access to tools that can help you with your analysis. When you need to perform a security analysis, you MUST use the security_analysis function with an appropriate danger level (low, medium, or high) based on your observations. + +Always use the available tools when they are relevant to the task. If you determine there is any level of danger or security concern, call the security_analysis function with the appropriate danger level. +""" + +# Quality control agent +quality_control_agent = Agent( + agent_name="Quality Control Agent", + agent_description="A quality control agent that analyzes images and provides a detailed report on the quality of the product in the image.", + # model_name="anthropic/claude-3-opus-20240229", + model_name="gpt-4o-mini", + system_prompt=custom_system_prompt, + multi_modal=True, + max_loops=1, + output_type="str-all-except-first", + # tools_list_dictionary=[schema], + tools=[security_analysis], +) + + +response = quality_control_agent.run( + task="Analyze the image and then perform a security analysis. Based on what you see in the image, determine if there is a low, medium, or high danger level and call the security_analysis function with that danger level", + img=factory_image, +) From 57eff390a509adc19608a6504f7d47e787405d18 Mon Sep 17 00:00:00 2001 From: harshalmore31 Date: Wed, 25 Jun 2025 23:09:23 +0530 Subject: [PATCH 09/86] Refactor tool execution logic to improve handling of callable tools and None responses --- swarms/structs/agent.py | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index e2bde34b..ab7ef93e 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -1080,25 +1080,24 @@ class Agent: # Print self.pretty_print(response, loop_count) - # Handle tools - if ( - hasattr(self, "tool_struct") - and self.tool_struct is not None - and self.output_raw_json_from_tool_call - is True - ): - response = response - else: - # Only execute tools if response is not None - if response is not None: - self.execute_tools( - response=response, - loop_count=loop_count, - ) + # Check and execute callable tools + if exists(self.tools): + if ( + self.tool_struct is not None + and self.output_raw_json_from_tool_call is True + ): + response = response else: - logger.warning( - f"LLM returned None response in loop {loop_count}, skipping tool execution" - ) + # Only execute tools if response is not None + if response is not None: + self.execute_tools( + response=response, + loop_count=loop_count, + ) + else: + logger.warning( + f"LLM returned None response in loop {loop_count}, skipping tool execution" + ) # Handle MCP tools if ( From d9cab648bc609b5596ec408aa33abbef49650d0d Mon Sep 17 00:00:00 2001 From: harshalmore31 Date: Wed, 25 Jun 2025 23:12:10 +0530 Subject: [PATCH 10/86] Refactor condition for tool execution to enhance readability --- swarms/structs/agent.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index ab7ef93e..eb56ce68 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -1083,8 +1083,8 @@ class Agent: # Check and execute callable tools if exists(self.tools): if ( - self.tool_struct is not None - and self.output_raw_json_from_tool_call is True + self.output_raw_json_from_tool_call + is True ): response = response else: From 7277a3ffbb7ba4601625ecb58a06d6fe602e7f74 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Wed, 25 Jun 2025 12:20:44 -0700 Subject: [PATCH 11/86] mixture of agents improvement and fix and example in docs with image --- docs/mkdocs.yml | 29 ++-- docs/swarms/examples/moa_example.md | 132 ++++++++++++++++++ mixture_of_agents_example.py | 80 +++++++++++ swarms/structs/ma_utils.py | 19 ++- swarms/structs/mixture_of_agents.py | 201 ++++++++++++---------------- 5 files changed, 329 insertions(+), 132 deletions(-) create mode 100644 docs/swarms/examples/moa_example.md create mode 100644 mixture_of_agents_example.py diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index a63a2968..99ea5a2a 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -61,19 +61,19 @@ extra: provider: google property: G-MPE9C65596 - alternate: - - name: English - link: / - lang: en - - name: 简体中文 - link: /zh/ - lang: zh - - name: ę—„ęœ¬čŖž - link: /ja/ - lang: ja - - name: ķ•œźµ­ģ–“ - link: /ko/ - lang: ko + # alternate: + # - name: English + # link: / + # lang: en + # - name: 简体中文 + # link: /zh/ + # lang: zh + # - name: ę—„ęœ¬čŖž + # link: /ja/ + # lang: ja + # - name: ķ•œźµ­ģ–“ + # link: /ko/ + # lang: ko theme: name: material @@ -348,7 +348,8 @@ nav: - SwarmRouter Example: "swarms/examples/swarm_router.md" - MultiAgentRouter Minimal Example: "swarms/examples/multi_agent_router_minimal.md" - ConcurrentWorkflow Example: "swarms/examples/concurrent_workflow.md" - - MixtureOfAgents Example: "swarms/examples/mixture_of_agents.md" + # - MixtureOfAgents Example: "swarms/examples/mixture_of_agents.md" + - Mixture of Agents Example: "swarms/examples/moa_example.md" - Unique Swarms: "swarms/examples/unique_swarms.md" - Agents as Tools: "swarms/examples/agents_as_tools.md" - Aggregate Multi-Agent Responses: "swarms/examples/aggregate.md" diff --git a/docs/swarms/examples/moa_example.md b/docs/swarms/examples/moa_example.md new file mode 100644 index 00000000..3ce7d24c --- /dev/null +++ b/docs/swarms/examples/moa_example.md @@ -0,0 +1,132 @@ +# Mixture of Agents Example + +The Mixture of Agents (MoA) is a sophisticated multi-agent architecture that implements parallel processing with iterative refinement. This approach processes multiple specialized agents simultaneously, concatenates their outputs, and then performs multiple parallel runs to achieve consensus or enhanced results. + +## How It Works + +1. **Parallel Processing**: Multiple agents work simultaneously on the same input +2. **Output Concatenation**: Results from all agents are combined into a unified response +3. **Iterative Refinement**: The process repeats for `n` layers/iterations to improve quality +4. **Consensus Building**: Multiple runs help achieve more reliable and comprehensive outputs + +This architecture is particularly effective for complex tasks that benefit from diverse perspectives and iterative improvement, such as financial analysis, risk assessment, and multi-faceted problem solving. + +![Mixture of Agents](https://files.readme.io/ddb138e-moa-3layer.png) + + +## Installation + +Install the swarms package using pip: + +```bash +pip install -U swarms +``` + +## Basic Setup + +1. First, set up your environment variables: + +```python +WORKSPACE_DIR="agent_workspace" +ANTHROPIC_API_KEY="" +``` + +## Code + +```python +from swarms import Agent, MixtureOfAgents + +# Agent 1: Risk Metrics Calculator +risk_metrics_agent = Agent( + agent_name="Risk-Metrics-Calculator", + agent_description="Calculates key risk metrics like VaR, Sharpe ratio, and volatility", + system_prompt="""You are a risk metrics specialist. Calculate and explain: + - Value at Risk (VaR) + - Sharpe ratio + - Volatility + - Maximum drawdown + - Beta coefficient + + Provide clear, numerical results with brief explanations.""", + max_loops=1, + # model_name="gpt-4o-mini", + random_model_enabled=True, + dynamic_temperature_enabled=True, + output_type="str-all-except-first", + max_tokens=4096, +) + +# Agent 2: Portfolio Risk Analyzer +portfolio_risk_agent = Agent( + agent_name="Portfolio-Risk-Analyzer", + agent_description="Analyzes portfolio diversification and concentration risk", + system_prompt="""You are a portfolio risk analyst. Focus on: + - Portfolio diversification analysis + - Concentration risk assessment + - Correlation analysis + - Sector/asset allocation risk + - Liquidity risk evaluation + + Provide actionable insights for risk reduction.""", + max_loops=1, + # model_name="gpt-4o-mini", + random_model_enabled=True, + dynamic_temperature_enabled=True, + output_type="str-all-except-first", + max_tokens=4096, +) + +# Agent 3: Market Risk Monitor +market_risk_agent = Agent( + agent_name="Market-Risk-Monitor", + agent_description="Monitors market conditions and identifies risk factors", + system_prompt="""You are a market risk monitor. Identify and assess: + - Market volatility trends + - Economic risk factors + - Geopolitical risks + - Interest rate risks + - Currency risks + + Provide current risk alerts and trends.""", + max_loops=1, + # model_name="gpt-4o-mini", + random_model_enabled=True, + dynamic_temperature_enabled=True, + output_type="str-all-except-first", + max_tokens=4096, +) + + +swarm = MixtureOfAgents( + agents=[ + risk_metrics_agent, + portfolio_risk_agent, + market_risk_agent, + ], + layers=1, + max_loops=1, + output_type="final", +) + + +out = swarm.run( + "Calculate VaR and Sharpe ratio for a portfolio with 15% annual return and 20% volatility" +) + +print(out) +``` + +## Support and Community + +If you're facing issues or want to learn more, check out the following resources to join our Discord, stay updated on Twitter, and watch tutorials on YouTube! + +| Platform | Link | Description | +|----------|------|-------------| +| šŸ“š Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides | +| šŸ“ Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles | +| šŸ’¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support | +| 🐦 Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements | +| šŸ‘„ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates | +| šŸ“ŗ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos | +| šŸŽ« Events | [Sign up here](https://lu.ma/5p2jnc2v) | Join our community events | + diff --git a/mixture_of_agents_example.py b/mixture_of_agents_example.py new file mode 100644 index 00000000..12bbf837 --- /dev/null +++ b/mixture_of_agents_example.py @@ -0,0 +1,80 @@ +from swarms import Agent, MixtureOfAgents + +# Agent 1: Risk Metrics Calculator +risk_metrics_agent = Agent( + agent_name="Risk-Metrics-Calculator", + agent_description="Calculates key risk metrics like VaR, Sharpe ratio, and volatility", + system_prompt="""You are a risk metrics specialist. Calculate and explain: + - Value at Risk (VaR) + - Sharpe ratio + - Volatility + - Maximum drawdown + - Beta coefficient + + Provide clear, numerical results with brief explanations.""", + max_loops=1, + # model_name="gpt-4o-mini", + random_model_enabled=True, + dynamic_temperature_enabled=True, + output_type="str-all-except-first", + max_tokens=4096, +) + +# Agent 2: Portfolio Risk Analyzer +portfolio_risk_agent = Agent( + agent_name="Portfolio-Risk-Analyzer", + agent_description="Analyzes portfolio diversification and concentration risk", + system_prompt="""You are a portfolio risk analyst. Focus on: + - Portfolio diversification analysis + - Concentration risk assessment + - Correlation analysis + - Sector/asset allocation risk + - Liquidity risk evaluation + + Provide actionable insights for risk reduction.""", + max_loops=1, + # model_name="gpt-4o-mini", + random_model_enabled=True, + dynamic_temperature_enabled=True, + output_type="str-all-except-first", + max_tokens=4096, +) + +# Agent 3: Market Risk Monitor +market_risk_agent = Agent( + agent_name="Market-Risk-Monitor", + agent_description="Monitors market conditions and identifies risk factors", + system_prompt="""You are a market risk monitor. Identify and assess: + - Market volatility trends + - Economic risk factors + - Geopolitical risks + - Interest rate risks + - Currency risks + + Provide current risk alerts and trends.""", + max_loops=1, + # model_name="gpt-4o-mini", + random_model_enabled=True, + dynamic_temperature_enabled=True, + output_type="str-all-except-first", + max_tokens=4096, +) + + +swarm = MixtureOfAgents( + agents=[ + risk_metrics_agent, + portfolio_risk_agent, + market_risk_agent, + ], + layers=1, + max_loops=1, + output_type="final", +) + + +out = swarm.run( + "Calculate VaR and Sharpe ratio for a portfolio with 15% annual return and 20% volatility" +) + +print(out) diff --git a/swarms/structs/ma_utils.py b/swarms/structs/ma_utils.py index 8d28b76e..a3a9eeb8 100644 --- a/swarms/structs/ma_utils.py +++ b/swarms/structs/ma_utils.py @@ -5,8 +5,9 @@ import random def list_all_agents( agents: List[Union[Callable, Any]], conversation: Optional[Any] = None, - name: str = "", - add_to_conversation: bool = False, + name: Optional[str] = None, + description: Optional[str] = None, + add_to_conversation: Optional[bool] = False, ) -> str: """Lists all agents in a swarm and optionally adds them to a conversation. @@ -27,6 +28,7 @@ def list_all_agents( >>> conversation = Conversation() >>> agent_info = list_all_agents(agents, conversation, "MySwarm") >>> print(agent_info) + Swarm: MySwarm Total Agents: 2 Agent: Agent1 @@ -39,8 +41,15 @@ def list_all_agents( # Compile information about all agents total_agents = len(agents) - all_agents = f"Total Agents: {total_agents}\n\n" + "\n\n".join( - f"Agent: {agent.agent_name} \n\n Description: {agent.description or (agent.system_prompt[:50] + '...' if len(agent.system_prompt) > 50 else agent.system_prompt)}" + all_agents = f"Team Name: {name}\n" if name else "" + all_agents += ( + f"Team Description: {description}\n" if description else "" + ) + all_agents += f"Total Agents: {total_agents}\n\n" + all_agents += "| Agent | Description |\n" + all_agents += "|-------|-------------|\n" + all_agents += "\n".join( + f"| {agent.agent_name} | {agent.description or (agent.system_prompt[:50] + '...' if len(agent.system_prompt) > 50 else agent.system_prompt)} |" for agent in agents ) @@ -48,7 +57,7 @@ def list_all_agents( # Add the agent information to the conversation conversation.add( role="System", - content=f"All Agents Available in the Swarm {name}:\n\n{all_agents}", + content=all_agents, ) return all_agents diff --git a/swarms/structs/mixture_of_agents.py b/swarms/structs/mixture_of_agents.py index 9c6b8756..3bab8211 100644 --- a/swarms/structs/mixture_of_agents.py +++ b/swarms/structs/mixture_of_agents.py @@ -1,17 +1,18 @@ -import asyncio import os from typing import List, Optional from swarms.structs.agent import Agent from swarms.prompts.ag_prompt import aggregator_system_prompt_main +from swarms.structs.ma_utils import list_all_agents +from swarms.utils.history_output_formatter import ( + history_output_formatter, +) from swarms.utils.loguru_logger import initialize_logger import concurrent.futures from swarms.utils.output_types import OutputType from swarms.structs.conversation import Conversation -from swarms.utils.history_output_formatter import ( - history_output_formatter, -) + logger = initialize_logger(log_folder="mixture_of_agents") @@ -25,13 +26,13 @@ class MixtureOfAgents: self, name: str = "MixtureOfAgents", description: str = "A class to run a mixture of agents and aggregate their responses.", - agents: List[Agent] = [], + agents: List[Agent] = None, aggregator_agent: Agent = None, aggregator_system_prompt: str = aggregator_system_prompt_main, layers: int = 3, max_loops: int = 1, - return_str_on: bool = False, - output_type: OutputType = "dict", + output_type: OutputType = "final", + aggregator_model_name: str = "claude-3-5-sonnet-20240620", ) -> None: """ Initialize the Mixture of Agents class with agents and configuration. @@ -48,16 +49,36 @@ class MixtureOfAgents: self.description = description self.agents = agents self.aggregator_agent = aggregator_agent - self.aggregator_system_prompt = aggregator_system_prompt_main + self.aggregator_system_prompt = aggregator_system_prompt self.layers = layers self.max_loops = max_loops - self.return_str_on = return_str_on self.output_type = output_type + self.aggregator_model_name = aggregator_model_name + self.aggregator_agent = self.aggregator_agent_setup() self.reliability_check() self.conversation = Conversation() + list_all_agents( + agents=self.agents, + conversation=self.conversation, + description=self.description, + name=self.name, + add_to_conversation=True, + ) + + def aggregator_agent_setup(self): + return Agent( + agent_name="Aggregator Agent", + description="An agent that aggregates the responses of the other agents.", + system_prompt=aggregator_system_prompt_main, + model_name=self.aggregator_model_name, + temperature=0.5, + max_loops=1, + output_type="str-all-except-first", + ) + def reliability_check(self) -> None: """ Performs a reliability check on the Mixture of Agents class. @@ -66,8 +87,8 @@ class MixtureOfAgents: "Checking the reliability of the Mixture of Agents class." ) - if not self.agents: - raise ValueError("No reference agents provided.") + if len(self.agents) == 0: + raise ValueError("No agents provided.") if not self.aggregator_agent: raise ValueError("No aggregator agent provided.") @@ -78,129 +99,83 @@ class MixtureOfAgents: if not self.layers: raise ValueError("No layers provided.") - if self.layers < 1: - raise ValueError("Layers must be greater than 0.") - logger.info("Reliability check passed.") logger.info("Mixture of Agents class is ready for use.") - def _get_final_system_prompt( - self, system_prompt: str, results: List[str] - ) -> str: - """ - Constructs a system prompt for subsequent layers that includes previous responses. + def save_to_markdown_file(self, file_path: str = "moa.md"): + with open(file_path, "w") as f: + f.write(self.conversation.get_str()) - Args: - system_prompt (str): The initial system prompt. - results (List[str]): A list of previous responses. - - Returns: - str: The final system prompt including previous responses. - """ - return ( - system_prompt - + "\n" - + "\n".join( - [ - f"{i+1}. {str(element)}" - for i, element in enumerate(results) - ] - ) - ) - - async def _run_agent_async( + def step( self, - agent: Agent, task: str, - prev_responses: Optional[List[str]] = None, - ) -> str: - """ - Asynchronous method to run a single agent. + img: Optional[str] = None, + imgs: Optional[List[str]] = None, + ): + # self.conversation.add(role="User", content=task) - Args: - agent (Agent): The agent to be run. - task (str): The task for the agent. - prev_responses (Optional[List[str]], optional): A list of previous responses. Defaults to None. + # Run agents concurrently + with concurrent.futures.ThreadPoolExecutor( + max_workers=os.cpu_count() + ) as executor: + # Submit all agent tasks and store with their index + future_to_agent = { + executor.submit( + agent.run, task=task, img=img, imgs=imgs + ): agent + for agent in self.agents + } - Returns: - str: The response from the agent. - """ - # If there are previous responses, update the agent's system prompt - if prev_responses: - system_prompt_with_responses = ( - self._get_final_system_prompt( - self.aggregator_system_prompt, prev_responses - ) - ) - agent.system_prompt = system_prompt_with_responses + # Collect results and add to conversation in completion order + for future in concurrent.futures.as_completed( + future_to_agent + ): + agent = future_to_agent[future] + output = future.result() + self.conversation.add(role=agent.name, content=output) - # Run the agent asynchronously - response = await asyncio.to_thread(agent.run, task) + return self.conversation.get_str() - self.conversation.add(agent.agent_name, response) + def _run( + self, + task: str, + img: Optional[str] = None, + imgs: Optional[List[str]] = None, + ): - # Log the agent's response - print(f"Agent {agent.agent_name} response: {response}") - return response + self.conversation.add(role="User", content=task) - async def _run_async(self, task: str) -> None: - """ - Asynchronous method to run the Mixture of Agents process. + for i in range(self.layers): + out = self.step( + task=self.conversation.get_str(), img=img, imgs=imgs + ) + task = out - Args: - task (str): The task for the mixture of agents. - """ - # Gather initial responses from reference agents - results: List[str] = await asyncio.gather( - *[ - self._run_agent_async(agent, task) - for agent in self.agents - ] + out = self.aggregator_agent.run( + task=self.conversation.get_str() ) - # Process additional layers, if applicable - for _ in range(1, self.layers - 1): - results = await asyncio.gather( - *[ - self._run_agent_async( - agent, task, prev_responses=results - ) - for agent in self.agents - ] - ) - - # Perform final aggregation using the aggregator agent - final_result = await self._run_agent_async( - self.aggregator_agent, task, prev_responses=results + self.conversation.add( + role=self.aggregator_agent.agent_name, content=out ) - print(f"Final Aggregated Response: {final_result}") + out = history_output_formatter( + conversation=self.conversation, type=self.output_type + ) - def run(self, task: str) -> None: - """ - Synchronous wrapper to run the async process. + return out - Args: - task (str): The task for the mixture of agents. - """ + def run( + self, + task: str, + img: Optional[str] = None, + imgs: Optional[List[str]] = None, + ): try: - self.conversation.add("user", task) - - for _ in range(self.max_loops): - # Add previous context to task if available - prompt = f"History: {self.conversation.get_str()}\n\nTask: {task}" - - # Run async process - asyncio.run(self._run_async(prompt)) - - return history_output_formatter( - conversation=self.conversation, - type=self.output_type, - ) - + return self._run(task=task, img=img, imgs=imgs) except Exception as e: - logger.error(f"Error running mixture of agents: {str(e)}") - raise e + logger.error(f"Error running Mixture of Agents: {e}") + return f"Error: {e}" def run_batched(self, tasks: List[str]) -> List[str]: """ From de2382dafedcbc7001c1203e9496fb4be078a39c Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Wed, 25 Jun 2025 15:49:32 -0700 Subject: [PATCH 12/86] vision and tools --- docs/mkdocs.yml | 1 + docs/swarms/examples/vision_tools.md | 140 +++++++++++++++++++++++++++ 2 files changed, 141 insertions(+) create mode 100644 docs/swarms/examples/vision_tools.md diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 99ea5a2a..dd3ee0fb 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -317,6 +317,7 @@ nav: - Agent with Structured Outputs: "swarms/examples/agent_structured_outputs.md" - Agents with Vision: "swarms/examples/vision_processing.md" - Agent with Multiple Images: "swarms/examples/multiple_images.md" + - Agents with Vision and Tool Usage: "swarms/examples/vision_tools.md" - Gradio Chat Interface: "swarms/ui/main.md" - Various Model Providers: - OpenAI: "swarms/examples/openai_example.md" diff --git a/docs/swarms/examples/vision_tools.md b/docs/swarms/examples/vision_tools.md new file mode 100644 index 00000000..e9a47c0d --- /dev/null +++ b/docs/swarms/examples/vision_tools.md @@ -0,0 +1,140 @@ +# Agents with Vision and Tool Usage + +This tutorial demonstrates how to create intelligent agents that can analyze images and use custom tools to perform specific actions based on their visual observations. You'll learn to build a quality control agent that can process images, identify potential security concerns, and automatically trigger appropriate responses using function calling capabilities. + +## What You'll Learn + +- How to configure an agent with multi-modal capabilities for image analysis +- How to integrate custom tools and functions with vision-enabled agents +- How to implement automated security analysis based on visual observations +- How to use function calling to trigger specific actions from image analysis results +- Best practices for building production-ready vision agents with tool integration + +## Use Cases + +This approach is perfect for: + +- **Quality Control Systems**: Automated inspection of manufacturing processes + +- **Security Monitoring**: Real-time threat detection and response + +- **Object Detection**: Identifying and categorizing items in images + +- **Compliance Checking**: Ensuring standards are met in various environments + +- **Automated Reporting**: Generating detailed analysis reports from visual data + +## Installation + +Install the swarms package using pip: + +```bash +pip install -U swarms +``` + +## Basic Setup + +1. First, set up your environment variables: + +```python +WORKSPACE_DIR="agent_workspace" +OPENAI_API_KEY="" +``` + + +## Code + +- Create tools for your agent as a function with types and documentation + +- Pass tools to your agent `Agent(tools=[list_of_callables])` + +- Add your image path to the run method like: `Agent().run(task=task, img=img)` + +- + +```python +from swarms.structs import Agent +from swarms.prompts.logistics import ( + Quality_Control_Agent_Prompt, +) + + +# Image for analysis +factory_image = "image.jpg" + + +def security_analysis(danger_level: str) -> str: + """ + Analyzes the security danger level and returns an appropriate response. + + Args: + danger_level (str, optional): The level of danger to analyze. + Can be "low", "medium", "high", or None. Defaults to None. + + Returns: + str: A string describing the danger level assessment. + - "No danger level provided" if danger_level is None + - "No danger" if danger_level is "low" + - "Medium danger" if danger_level is "medium" + - "High danger" if danger_level is "high" + - "Unknown danger level" for any other value + """ + if danger_level is None: + return "No danger level provided" + + if danger_level == "low": + return "No danger" + + if danger_level == "medium": + return "Medium danger" + + if danger_level == "high": + return "High danger" + + return "Unknown danger level" + + +custom_system_prompt = f""" +{Quality_Control_Agent_Prompt} + +You have access to tools that can help you with your analysis. When you need to perform a security analysis, you MUST use the security_analysis function with an appropriate danger level (low, medium, or high) based on your observations. + +Always use the available tools when they are relevant to the task. If you determine there is any level of danger or security concern, call the security_analysis function with the appropriate danger level. +""" + +# Quality control agent +quality_control_agent = Agent( + agent_name="Quality Control Agent", + agent_description="A quality control agent that analyzes images and provides a detailed report on the quality of the product in the image.", + # model_name="anthropic/claude-3-opus-20240229", + model_name="gpt-4o-mini", + system_prompt=custom_system_prompt, + multi_modal=True, + max_loops=1, + output_type="str-all-except-first", + # tools_list_dictionary=[schema], + tools=[security_analysis], +) + + +response = quality_control_agent.run( + task="Analyze the image and then perform a security analysis. Based on what you see in the image, determine if there is a low, medium, or high danger level and call the security_analysis function with that danger level", + img=factory_image, +) +``` + + +## Support and Community + +If you're facing issues or want to learn more, check out the following resources to join our Discord, stay updated on Twitter, and watch tutorials on YouTube! + +| Platform | Link | Description | +|----------|------|-------------| +| šŸ“š Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides | +| šŸ“ Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles | +| šŸ’¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support | +| 🐦 Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements | +| šŸ‘„ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates | +| šŸ“ŗ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos | +| šŸŽ« Events | [Sign up here](https://lu.ma/5p2jnc2v) | Join our community events | + From b5694e26ae2929cc47feee2626fc4941e45b82fd Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Wed, 25 Jun 2025 15:55:16 -0700 Subject: [PATCH 13/86] clean up vision and tools --- docs/swarms/examples/vision_tools.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/swarms/examples/vision_tools.md b/docs/swarms/examples/vision_tools.md index e9a47c0d..92b487c7 100644 --- a/docs/swarms/examples/vision_tools.md +++ b/docs/swarms/examples/vision_tools.md @@ -50,8 +50,6 @@ OPENAI_API_KEY="" - Add your image path to the run method like: `Agent().run(task=task, img=img)` -- - ```python from swarms.structs import Agent from swarms.prompts.logistics import ( From adfdabba20cb580493a7f1017fb6f4eb533816aa Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Thu, 26 Jun 2025 14:21:05 -0700 Subject: [PATCH 14/86] [FIX][agent.py] -- self.no_print -> self.print_on] [ENHC][Improve the concurrent workflow] [collaborative prompt] [fix][improve list_all_agents prompt] --- .../single_agent/vision/burning_image.jpg | Bin .../single_agent/vision/image.jpg | Bin .../vision/multiple_image_processing.py | 2 +- .../single_agent/vision/vision_tools.py | 0 swarm_router_test.py | 81 +++++ swarms/cli/onboarding_process.py | 4 +- swarms/prompts/collaborative_prompts.py | 177 +++++++++ swarms/structs/agent.py | 12 +- swarms/structs/concurrent_workflow.py | 266 ++++---------- swarms/structs/ma_utils.py | 11 +- swarms/structs/swarm_router.py | 335 ++++++++---------- swarms/telemetry/__init__.py | 22 +- swarms/telemetry/log_executions.py | 43 +++ swarms/telemetry/main.py | 278 +++------------ 14 files changed, 598 insertions(+), 633 deletions(-) rename burning_image.jpg => examples/single_agent/vision/burning_image.jpg (100%) rename image.jpg => examples/single_agent/vision/image.jpg (100%) rename multiple_image_processing.py => examples/single_agent/vision/multiple_image_processing.py (86%) rename vision_tools.py => examples/single_agent/vision/vision_tools.py (100%) create mode 100644 swarm_router_test.py create mode 100644 swarms/prompts/collaborative_prompts.py create mode 100644 swarms/telemetry/log_executions.py diff --git a/burning_image.jpg b/examples/single_agent/vision/burning_image.jpg similarity index 100% rename from burning_image.jpg rename to examples/single_agent/vision/burning_image.jpg diff --git a/image.jpg b/examples/single_agent/vision/image.jpg similarity index 100% rename from image.jpg rename to examples/single_agent/vision/image.jpg diff --git a/multiple_image_processing.py b/examples/single_agent/vision/multiple_image_processing.py similarity index 86% rename from multiple_image_processing.py rename to examples/single_agent/vision/multiple_image_processing.py index 3d90f612..da67bb94 100644 --- a/multiple_image_processing.py +++ b/examples/single_agent/vision/multiple_image_processing.py @@ -18,7 +18,7 @@ quality_control_agent = Agent( response = quality_control_agent.run( - task="what is in the image?", + task="Analyze our factories images and provide a detailed health report for each factory.", imgs=[factory_image, "burning_image.jpg"], ) diff --git a/vision_tools.py b/examples/single_agent/vision/vision_tools.py similarity index 100% rename from vision_tools.py rename to examples/single_agent/vision/vision_tools.py diff --git a/swarm_router_test.py b/swarm_router_test.py new file mode 100644 index 00000000..016953ff --- /dev/null +++ b/swarm_router_test.py @@ -0,0 +1,81 @@ +import json +from swarms import Agent, SwarmRouter + +# Agent 1: Risk Metrics Calculator +risk_metrics_agent = Agent( + agent_name="Risk-Metrics-Calculator", + agent_description="Calculates key risk metrics like VaR, Sharpe ratio, and volatility", + system_prompt="""You are a risk metrics specialist. Calculate and explain: + - Value at Risk (VaR) + - Sharpe ratio + - Volatility + - Maximum drawdown + - Beta coefficient + + Provide clear, numerical results with brief explanations.""", + max_loops=1, + # model_name="gpt-4o-mini", + random_model_enabled=True, + dynamic_temperature_enabled=True, + output_type="str-all-except-first", + max_tokens=4096, +) + +# Agent 2: Portfolio Risk Analyzer +portfolio_risk_agent = Agent( + agent_name="Portfolio-Risk-Analyzer", + agent_description="Analyzes portfolio diversification and concentration risk", + system_prompt="""You are a portfolio risk analyst. Focus on: + - Portfolio diversification analysis + - Concentration risk assessment + - Correlation analysis + - Sector/asset allocation risk + - Liquidity risk evaluation + + Provide actionable insights for risk reduction.""", + max_loops=1, + # model_name="gpt-4o-mini", + random_model_enabled=True, + dynamic_temperature_enabled=True, + output_type="str-all-except-first", + max_tokens=4096, +) + +# Agent 3: Market Risk Monitor +market_risk_agent = Agent( + agent_name="Market-Risk-Monitor", + agent_description="Monitors market conditions and identifies risk factors", + system_prompt="""You are a market risk monitor. Identify and assess: + - Market volatility trends + - Economic risk factors + - Geopolitical risks + - Interest rate risks + - Currency risks + + Provide current risk alerts and trends.""", + max_loops=1, + # model_name="gpt-4o-mini", + random_model_enabled=True, + dynamic_temperature_enabled=True, + output_type="str-all-except-first", + max_tokens=4096, +) + + +swarm = SwarmRouter( + agents=[ + risk_metrics_agent, + portfolio_risk_agent, + ], + max_loops=1, + swarm_type="MixtureOfAgents", + output_type="final", +) + + +# swarm.run( +# "Calculate VaR and Sharpe ratio for a portfolio with 15% annual return and 20% volatility" +# ) + + +print(f"Swarm config: {json.dumps(swarm.to_dict(), indent=4)}") diff --git a/swarms/cli/onboarding_process.py b/swarms/cli/onboarding_process.py index e279d9e3..8085c688 100644 --- a/swarms/cli/onboarding_process.py +++ b/swarms/cli/onboarding_process.py @@ -7,7 +7,6 @@ from swarms.utils.loguru_logger import initialize_logger from swarms.telemetry.main import ( - capture_system_data, log_agent_data, ) @@ -34,7 +33,6 @@ class OnboardingProcess: cache_save_path (str): The path where user data is cached for reliability. """ self.user_data: Dict[str, str] = {} - self.system_data: Dict[str, str] = capture_system_data() self.auto_save_path = auto_save_path self.cache_save_path = cache_save_path self.load_existing_data() @@ -85,7 +83,7 @@ class OnboardingProcess: while attempt < retry_attempts: try: - combined_data = {**self.user_data, **self.system_data} + combined_data = {**self.user_data} log_agent_data(combined_data) return # Exit the function if saving was successful except Exception as e: diff --git a/swarms/prompts/collaborative_prompts.py b/swarms/prompts/collaborative_prompts.py new file mode 100644 index 00000000..4a04245b --- /dev/null +++ b/swarms/prompts/collaborative_prompts.py @@ -0,0 +1,177 @@ +def get_multi_agent_collaboration_prompt_one(agents_in_swarm: str): + MULTI_AGENT_COLLABORATION_PROMPT_ONE = f""" + You are all operating within a multi-agent collaborative system. Your primary objectives are to work effectively with other agents to achieve shared goals while maintaining high reliability and avoiding common failure modes that plague multi-agent systems. + + {agents_in_swarm} + + ## Fundamental Collaboration Principles + + ### 1. Role Adherence & Boundaries + - **STRICTLY adhere to your designated role and responsibilities** - never assume another agent's role or make decisions outside your scope + - If you encounter tasks outside your role, explicitly redirect to the appropriate agent + - Maintain clear hierarchical differentiation - respect the authority structure and escalation paths + - When uncertain about role boundaries, ask for clarification rather than assuming + + ### 2. Communication Excellence + - **Always ask for clarification** when instructions, data, or context are unclear, incomplete, or ambiguous + - Share ALL relevant information that could impact other agents' decision-making - never withhold critical details + - Use structured, explicit communication rather than assuming others understand implicit meanings + - Acknowledge and explicitly reference other agents' inputs before proceeding + - Use consistent terminology and avoid jargon that may cause misunderstanding + + ### 3. Task Specification Compliance + - **Rigorously adhere to task specifications** - review and confirm understanding of requirements before proceeding + - Flag any constraints or requirements that seem impossible or conflicting + - Document assumptions explicitly and seek validation + - Never modify requirements without explicit approval from appropriate authority + + ## Critical Failure Prevention Protocols + + ### Specification & Design Failures Prevention + - Before starting any task, restate your understanding of the requirements and constraints + - Maintain awareness of conversation history - reference previous exchanges when relevant + - Avoid unnecessary repetition of completed steps unless explicitly requested + - Clearly understand termination conditions for your tasks and the overall workflow + + ### Inter-Agent Misalignment Prevention + - **Never reset or restart conversations** without explicit instruction from a supervising agent + - When another agent provides input, explicitly acknowledge it and explain how it affects your approach + - Stay focused on the original task objective - if you notice drift, flag it immediately + - Match your reasoning process with your actions - explain discrepancies when they occur + + ### Verification & Termination Excellence + - **Implement robust verification** of your outputs before declaring tasks complete + - Never terminate prematurely - ensure all objectives are met and verified + - When reviewing others' work, provide thorough, accurate verification + - Use multiple verification approaches when possible (logical check, constraint validation, edge case testing) + + ## Operational Guidelines + + ### Communication Protocol + 1. **State Check**: Begin interactions by confirming your understanding of the current state and context + 2. **Role Confirmation**: Clearly identify your role and the roles of agents you're interacting with + 3. **Objective Alignment**: Confirm shared understanding of immediate objectives + 4. **Information Exchange**: Share relevant information completely and request missing information explicitly + 5. **Action Coordination**: Coordinate actions to avoid conflicts and ensure complementary efforts + 6. **Verification**: Verify outcomes and seek validation when appropriate + 7. **Status Update**: Clearly communicate task status and next steps + + ### When Interacting with Other Agents + - **Listen actively**: Process and acknowledge their inputs completely + - **Seek clarification**: Ask specific questions when anything is unclear + - **Share context**: Provide relevant background information that informs your perspective + - **Coordinate actions**: Ensure your actions complement rather than conflict with others + - **Respect expertise**: Defer to agents with specialized knowledge in their domains + + ### Quality Assurance + - Before finalizing any output, perform self-verification using these checks: + - Does this meet all specified requirements? + - Are there any edge cases or constraints I haven't considered? + - Is this consistent with information provided by other agents? + - Have I clearly communicated my reasoning and any assumptions? + + ### Error Recovery + - If you detect an error or inconsistency, immediately flag it and propose correction + - When receiving feedback about errors, acknowledge the feedback and explain your correction approach + - Learn from failures by explicitly identifying what went wrong and how to prevent recurrence + + ## Interaction Patterns + + ### When Starting a New Task + ``` + 1. Acknowledge the task assignment + 2. Confirm role boundaries and responsibilities + 3. Identify required inputs and information sources + 4. State assumptions and seek validation + 5. Outline approach and request feedback + 6. Proceed with execution while maintaining communication + ``` + + ### When Collaborating with Peers + ``` + 1. Establish communication channel and protocols + 2. Share relevant context and constraints + 3. Coordinate approaches to avoid duplication or conflicts + 4. Maintain regular status updates + 5. Verify integrated outputs collectively + ``` + + ### When Escalating Issues + ``` + 1. Clearly describe the issue and its implications + 2. Provide relevant context and attempted solutions + 3. Specify what type of resolution or guidance is needed + 4. Suggest next steps if appropriate + ``` + + ## Termination Criteria + Only consider a task complete when: + - All specified requirements have been met and verified + - Other agents have confirmed their portions are complete (if applicable) + - Quality checks have been performed and passed + - Appropriate verification has been conducted + - Clear communication of completion has been provided + + ## Meta-Awareness + Continuously monitor for these common failure patterns and actively work to prevent them: + - Role boundary violations + - Information withholding + - Premature termination + - Inadequate verification + - Communication breakdowns + - Task derailment + + Remember: The goal is not just individual success, but collective success through reliable, high-quality collaboration that builds trust and produces superior outcomes. + """ + + return MULTI_AGENT_COLLABORATION_PROMPT_ONE + + +MULTI_AGENT_COLLABORATION_PROMPT_TWO = """ +# Compact Multi-Agent Collaboration Prompt + +## Core Directives + +You are an AI agent in a multi-agent system. Follow these essential collaboration protocols: + +### Role & Boundaries +- **Stay in your designated role** - never assume another agent's responsibilities +- When tasks fall outside your scope, redirect to the appropriate agent +- Respect hierarchy and authority structures + +### Communication Requirements +- **Always ask for clarification** when anything is unclear or incomplete +- **Share all relevant information** - never withhold details that could impact others +- **Acknowledge other agents' inputs** explicitly before proceeding +- Use clear, structured communication + +### Task Execution +- **Confirm task requirements** before starting - restate your understanding +- **Adhere strictly to specifications** - flag conflicts or impossibilities +- **Maintain conversation context** - reference previous exchanges when relevant +- **Verify your work thoroughly** before declaring completion + +### Collaboration Protocol +1. **State Check**: Confirm current context and your role +2. **Clarify**: Ask specific questions about unclear elements +3. **Coordinate**: Align actions with other agents to avoid conflicts +4. **Verify**: Check outputs meet requirements and constraints +5. **Communicate**: Clearly report status and next steps + +### Termination Criteria +Only mark tasks complete when: +- All requirements verified as met +- Quality checks passed +- Other agents confirm their portions (if applicable) +- Clear completion communication provided + +### Failure Prevention +Actively watch for and prevent: +- Role boundary violations +- Information withholding +- Premature task termination +- Inadequate verification +- Task objective drift + +**Remember**: Success requires reliable collaboration, not just individual performance. +""" diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 12d2306a..bf8d1ab7 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -403,7 +403,7 @@ class Agent: llm_args: dict = None, load_state_path: str = None, role: agent_roles = "worker", - no_print: bool = False, + print_on: bool = False, tools_list_dictionary: Optional[List[Dict[str, Any]]] = None, mcp_url: Optional[Union[str, MCPConnection]] = None, mcp_urls: List[str] = None, @@ -540,7 +540,7 @@ class Agent: self.llm_args = llm_args self.load_state_path = load_state_path self.role = role - self.no_print = no_print + self.print_on = print_on self.tools_list_dictionary = tools_list_dictionary self.mcp_url = mcp_url self.mcp_urls = mcp_urls @@ -631,7 +631,7 @@ class Agent: ) self.short_memory.add( - role=f"{self.agent_name}", + role=self.agent_name, content=self.tools_list_dictionary, ) @@ -2691,14 +2691,14 @@ class Agent: return self.role def pretty_print(self, response: str, loop_count: int): - if self.no_print is False: + if self.print_on is False: if self.streaming_on is True: # self.stream_response(response) formatter.print_panel_token_by_token( f"{self.agent_name}: {response}", title=f"Agent Name: {self.agent_name} [Max Loops: {loop_count}]", ) - elif self.no_print is True: + elif self.print_on is True: pass else: # logger.info(f"Response: {response}") @@ -2818,7 +2818,7 @@ class Agent: # execute_tool_call_simple returns a string directly, not an object with content attribute text_content = f"MCP Tool Response: \n\n {json.dumps(tool_response, indent=2)}" - if self.no_print is False: + if self.print_on is False: formatter.print_panel( text_content, "MCP Tool Response: šŸ› ļø", diff --git a/swarms/structs/concurrent_workflow.py b/swarms/structs/concurrent_workflow.py index c6a653ae..86951e95 100644 --- a/swarms/structs/concurrent_workflow.py +++ b/swarms/structs/concurrent_workflow.py @@ -1,13 +1,10 @@ +import concurrent.futures import os -import time -from concurrent.futures import ThreadPoolExecutor -from functools import lru_cache -from typing import Any, Callable, Dict, List, Optional, Union +from typing import Callable, List, Optional, Union from swarms.structs.agent import Agent from swarms.structs.base_swarm import BaseSwarm from swarms.structs.conversation import Conversation -from swarms.utils.formatter import formatter from swarms.utils.history_output_formatter import ( history_output_formatter, ) @@ -35,9 +32,7 @@ class ConcurrentWorkflow(BaseSwarm): return_str_on (bool): Flag indicating whether to return the output as a string. Defaults to False. auto_generate_prompts (bool): Flag indicating whether to auto-generate prompts for agents. Defaults to False. return_entire_history (bool): Flag indicating whether to return the entire conversation history. Defaults to False. - cache_size (int): The size of the cache. Defaults to 100. - max_retries (int): The maximum number of retry attempts. Defaults to 3. - retry_delay (float): The delay between retry attempts in seconds. Defaults to 1.0. + Raises: ValueError: If the list of agents is empty or if the description is empty. @@ -50,13 +45,7 @@ class ConcurrentWorkflow(BaseSwarm): auto_save (bool): Flag indicating whether to automatically save the metadata. output_type (str): The type of output format. max_loops (int): The maximum number of loops for each agent. - return_str_on (bool): Flag indicating whether to return the output as a string. auto_generate_prompts (bool): Flag indicating whether to auto-generate prompts for agents. - return_entire_history (bool): Flag indicating whether to return the entire conversation history. - cache_size (int): The size of the cache. - max_retries (int): The maximum number of retry attempts. - retry_delay (float): The delay between retry attempts in seconds. - _cache (dict): The cache for storing agent outputs. """ def __init__( @@ -68,12 +57,7 @@ class ConcurrentWorkflow(BaseSwarm): auto_save: bool = True, output_type: str = "dict-all-except-first", max_loops: int = 1, - return_str_on: bool = False, auto_generate_prompts: bool = False, - return_entire_history: bool = False, - cache_size: int = 100, - max_retries: int = 3, - retry_delay: float = 1.0, *args, **kwargs, ): @@ -90,63 +74,31 @@ class ConcurrentWorkflow(BaseSwarm): self.metadata_output_path = metadata_output_path self.auto_save = auto_save self.max_loops = max_loops - self.return_str_on = return_str_on self.auto_generate_prompts = auto_generate_prompts - self.max_workers = os.cpu_count() self.output_type = output_type - self.return_entire_history = return_entire_history - self.tasks = [] # Initialize tasks list - self.cache_size = cache_size - self.max_retries = max_retries - self.retry_delay = retry_delay - self._cache = {} self.reliability_check() self.conversation = Conversation() def reliability_check(self): try: - formatter.print_panel( - content=f"\n šŸ·ļø Name: {self.name}\n šŸ“ Description: {self.description}\n šŸ¤– Agents: {len(self.agents)}\n šŸ”„ Max Loops: {self.max_loops}\n ", - title="āš™ļø Concurrent Workflow Settings", - style="bold blue", - ) - formatter.print_panel( - content="šŸ” Starting reliability checks", - title="šŸ”’ Reliability Checks", - style="bold blue", - ) - - if self.name is None: - logger.error("āŒ A name is required for the swarm") + if self.agents is None: raise ValueError( - "āŒ A name is required for the swarm" + "ConcurrentWorkflow: No agents provided" ) - if not self.agents or len(self.agents) <= 1: - logger.error( - "āŒ The list of agents must not be empty." - ) + if len(self.agents) == 0: raise ValueError( - "āŒ The list of agents must not be empty." + "ConcurrentWorkflow: No agents provided" ) - if not self.description: - logger.error("āŒ A description is required.") - raise ValueError("āŒ A description is required.") - - formatter.print_panel( - content="āœ… Reliability checks completed successfully", - title="šŸŽ‰ Reliability Checks", - style="bold green", - ) - - except ValueError as e: - logger.error(f"āŒ Reliability check failed: {e}") - raise + if len(self.agents) == 1: + logger.warning( + "ConcurrentWorkflow: Only one agent provided. With ConcurrentWorkflow, you should use at least 2+ agents." + ) except Exception as e: logger.error( - f"šŸ’„ An unexpected error occurred during reliability checks: {e}" + f"ConcurrentWorkflow: Reliability check failed: {e}" ) raise @@ -163,162 +115,84 @@ class ConcurrentWorkflow(BaseSwarm): for agent in self.agents: agent.auto_generate_prompt = True - @lru_cache(maxsize=100) - def _cached_run(self, task: str, agent_id: int) -> Any: - """Cached version of agent execution to avoid redundant computations""" - return self.agents[agent_id].run(task=task) - - def _validate_input(self, task: str) -> bool: - """Validate input task""" - if not isinstance(task, str): - raise ValueError("Task must be a string") - if not task.strip(): - raise ValueError("Task cannot be empty") - return True - - def _run_with_retry( - self, agent: Agent, task: str, img: str = None - ) -> Any: - """Run agent with retry mechanism""" - for attempt in range(self.max_retries): - try: - output = agent.run(task=task, img=img) - self.conversation.add(agent.agent_name, output) - return output - except Exception as e: - if attempt == self.max_retries - 1: - logger.error( - f"Error running agent {agent.agent_name} after {self.max_retries} attempts: {e}" - ) - raise - logger.warning( - f"Attempt {attempt + 1} failed for agent {agent.agent_name}: {e}" - ) - time.sleep( - self.retry_delay * (attempt + 1) - ) # Exponential backoff - - def _process_agent( - self, agent: Agent, task: str, img: str = None - ) -> Any: + def run( + self, + task: str, + img: Optional[str] = None, + imgs: Optional[List[str]] = None, + ): """ - Process a single agent with caching and error handling. + Executes all agents in the workflow concurrently on the given task. Args: - agent: The agent to process - task: Task to execute - img: Optional image input + task (str): The task to be executed by all agents. + img (Optional[str]): Optional image path for agents that support image input. + imgs (Optional[List[str]]): Optional list of image paths for agents that support multiple image inputs. Returns: - The agent's output - """ - try: - # Fast path - check cache first - cache_key = f"{task}_{agent.agent_name}" - if cache_key in self._cache: - output = self._cache[cache_key] - else: - # Slow path - run agent and update cache - output = self._run_with_retry(agent, task, img) - - if len(self._cache) >= self.cache_size: - self._cache.pop(next(iter(self._cache))) + The formatted output based on the configured output_type. - self._cache[cache_key] = output - - return output - except Exception as e: - logger.error( - f"Error running agent {agent.agent_name}: {e}" - ) - raise - - def _run( - self, task: str, img: str = None, *args, **kwargs - ) -> Union[Dict[str, Any], str]: - """ - Enhanced run method with parallel execution. + Example: + >>> workflow = ConcurrentWorkflow(agents=[agent1, agent2]) + >>> result = workflow.run("Analyze this financial data") + >>> print(result) """ - # Fast validation - self._validate_input(task) - self.conversation.add("User", task) - - try: - # Parallel execution with optimized thread pool - with ThreadPoolExecutor( - max_workers=self.max_workers - ) as executor: - futures = [ - executor.submit( - self._process_agent, agent, task, img - ) - for agent in self.agents - ] - # Wait for all futures to complete - for future in futures: - future.result() - - except Exception as e: - logger.error(f"An error occurred during execution: {e}") - raise e + self.conversation.add(role="User", content=task) + + # Use 95% of available CPU cores for optimal performance + max_workers = int(os.cpu_count() * 0.95) + + # Run agents concurrently using ThreadPoolExecutor + with concurrent.futures.ThreadPoolExecutor( + max_workers=max_workers + ) as executor: + # Submit all agent tasks and store with their index + future_to_agent = { + executor.submit( + agent.run, task=task, img=img, imgs=imgs + ): agent + for agent in self.agents + } + + # Collect results and add to conversation in completion order + for future in concurrent.futures.as_completed( + future_to_agent + ): + agent = future_to_agent[future] + output = future.result() + self.conversation.add(role=agent.name, content=output) return history_output_formatter( - self.conversation, - type=self.output_type, + conversation=self.conversation, + output_type=self.output_type, ) - def run( + def batch_run( self, - task: Optional[str] = None, + tasks: List[str], img: Optional[str] = None, - *args, - **kwargs, - ) -> Any: + imgs: Optional[List[str]] = None, + ): """ - Executes the agent's run method with parallel execution. + Executes the workflow on multiple tasks sequentially. Args: - task (Optional[str], optional): The task to be executed. Defaults to None. - img (Optional[str], optional): The image to be processed. Defaults to None. - *args: Additional positional arguments to be passed to the execution method. - **kwargs: Additional keyword arguments to be passed to the execution method. + tasks (List[str]): List of tasks to be executed by all agents. + img (Optional[str]): Optional image path for agents that support image input. + imgs (Optional[List[str]]): Optional list of image paths for agents that support multiple image inputs. Returns: - Any: The result of the execution. - - Raises: - ValueError: If task validation fails. - Exception: If any other error occurs during execution. - """ - if task is not None: - self.tasks.append(task) - - try: - outputs = self._run(task, img, *args, **kwargs) - return outputs - except Exception as e: - logger.error(f"An error occurred during execution: {e}") - raise e + List of results, one for each task. - def run_batched(self, tasks: List[str]) -> Any: - """ - Enhanced batched execution + Example: + >>> workflow = ConcurrentWorkflow(agents=[agent1, agent2]) + >>> tasks = ["Task 1", "Task 2", "Task 3"] + >>> results = workflow.batch_run(tasks) + >>> print(len(results)) # 3 """ - if not tasks: - raise ValueError("Tasks list cannot be empty") - - return [self.run(task) for task in tasks] - - def clear_cache(self): - """Clear the task cache""" - self._cache.clear() - - def get_cache_stats(self) -> Dict[str, int]: - """Get cache statistics""" - return { - "cache_size": len(self._cache), - "max_cache_size": self.cache_size, - } + return [ + self.run(task=task, img=img, imgs=imgs) for task in tasks + ] # if __name__ == "__main__": diff --git a/swarms/structs/ma_utils.py b/swarms/structs/ma_utils.py index a3a9eeb8..b47080b8 100644 --- a/swarms/structs/ma_utils.py +++ b/swarms/structs/ma_utils.py @@ -1,5 +1,8 @@ from typing import List, Any, Optional, Union, Callable import random +from swarms.prompts.collaborative_prompts import ( + get_multi_agent_collaboration_prompt_one, +) def list_all_agents( @@ -8,6 +11,7 @@ def list_all_agents( name: Optional[str] = None, description: Optional[str] = None, add_to_conversation: Optional[bool] = False, + add_collaboration_prompt: Optional[bool] = True, ) -> str: """Lists all agents in a swarm and optionally adds them to a conversation. @@ -60,7 +64,12 @@ def list_all_agents( content=all_agents, ) - return all_agents + if add_collaboration_prompt: + return get_multi_agent_collaboration_prompt_one( + agents_in_swarm=all_agents + ) + else: + return all_agents models = [ diff --git a/swarms/structs/swarm_router.py b/swarms/structs/swarm_router.py index 023db9d0..4cfe119e 100644 --- a/swarms/structs/swarm_router.py +++ b/swarms/structs/swarm_router.py @@ -1,6 +1,7 @@ +import concurrent.futures +import json import os -import uuid -from datetime import datetime +import traceback from typing import Any, Callable, Dict, List, Literal, Optional, Union from pydantic import BaseModel, Field @@ -20,13 +21,15 @@ from swarms.structs.rearrange import AgentRearrange from swarms.structs.sequential_workflow import SequentialWorkflow from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm from swarms.structs.swarm_matcher import swarm_matcher +from swarms.telemetry.log_executions import log_execution from swarms.utils.output_types import OutputType from swarms.utils.loguru_logger import initialize_logger from swarms.structs.malt import MALT from swarms.structs.deep_research_swarm import DeepResearchSwarm from swarms.structs.council_judge import CouncilAsAJudge from swarms.structs.interactive_groupchat import InteractiveGroupChat - +from swarms.structs.ma_utils import list_all_agents +from swarms.utils.generate_keys import generate_api_key logger = initialize_logger(log_folder="swarm_router") @@ -54,25 +57,6 @@ class Document(BaseModel): data: str -class SwarmLog(BaseModel): - """ - A Pydantic model to capture log entries. - """ - - id: Optional[str] = Field( - default_factory=lambda: str(uuid.uuid4()) - ) - timestamp: Optional[datetime] = Field( - default_factory=datetime.utcnow - ) - level: Optional[str] = None - message: Optional[str] = None - swarm_type: Optional[SwarmType] = None - task: Optional[str] = "" - metadata: Optional[Dict[str, Any]] = Field(default_factory=dict) - documents: List[Document] = [] - - class SwarmRouterConfig(BaseModel): """Configuration model for SwarmRouter.""" @@ -172,12 +156,11 @@ class SwarmRouter: concurrent_batch_run(tasks: List[str], *args, **kwargs) -> List[Any]: Executes multiple tasks concurrently - get_logs() -> List[SwarmLog]: - Retrieves execution logs """ def __init__( self, + id: str = generate_api_key(prefix="swarm-router"), name: str = "swarm-router", description: str = "Routes your task to the desired swarm", max_loops: int = 1, @@ -191,15 +174,18 @@ class SwarmRouter: rules: str = None, documents: List[str] = [], # A list of docs file paths output_type: OutputType = "dict-all-except-first", - no_cluster_ops: bool = False, speaker_fn: callable = None, load_agents_from_csv: bool = False, csv_file_path: str = None, return_entire_history: bool = True, multi_agent_collab_prompt: bool = True, + list_all_agents: bool = False, + conversation: Any = None, + agents_config: Optional[Dict[Any, Any]] = None, *args, **kwargs, ): + self.id = id self.name = name self.description = description self.max_loops = max_loops @@ -213,13 +199,15 @@ class SwarmRouter: self.rules = rules self.documents = documents self.output_type = output_type - self.no_cluster_ops = no_cluster_ops self.speaker_fn = speaker_fn self.logs = [] self.load_agents_from_csv = load_agents_from_csv self.csv_file_path = csv_file_path self.return_entire_history = return_entire_history self.multi_agent_collab_prompt = multi_agent_collab_prompt + self.list_all_agents = list_all_agents + self.conversation = conversation + self.agents_config = agents_config # Reliability check self.reliability_check() @@ -230,6 +218,8 @@ class SwarmRouter: csv_path=self.csv_file_path ).load_agents() + self.agent_config = self.agent_config() + def setup(self): if self.auto_generate_prompts is True: self.activate_ape() @@ -276,15 +266,12 @@ class SwarmRouter: logger.info( f"Successfully activated APE for {activated_count} agents" ) - self._log( - "info", - f"Activated automatic prompt engineering for {activated_count} agents", - ) except Exception as e: error_msg = f"Error activating automatic prompt engineering: {str(e)}" - logger.error(error_msg) - self._log("error", error_msg) + logger.error( + f"Error activating automatic prompt engineering in SwarmRouter: {str(e)}" + ) raise RuntimeError(error_msg) from e def reliability_check(self): @@ -293,48 +280,24 @@ class SwarmRouter: Validates essential swarm parameters and configuration before execution. Handles special case for CouncilAsAJudge which may not require agents. """ - logger.info( - "šŸ” [SYSTEM] Initializing advanced swarm reliability diagnostics..." - ) - logger.info( - "⚔ [SYSTEM] Running pre-flight checks and system validation..." - ) # Check swarm type first since it affects other validations if self.swarm_type is None: - logger.error( - "āŒ [CRITICAL] Swarm type validation failed - type cannot be 'none'" + raise ValueError( + "SwarmRouter: Swarm type cannot be 'none'." ) - raise ValueError("Swarm type cannot be 'none'.") - # Special handling for CouncilAsAJudge - if self.swarm_type == "CouncilAsAJudge": - if self.agents is not None: - logger.warning( - "āš ļø [ADVISORY] CouncilAsAJudge detected with agents - this is atypical" - ) - elif not self.agents: - logger.error( - "āŒ [CRITICAL] Agent validation failed - no agents detected in swarm" + if self.agents is None: + raise ValueError( + "SwarmRouter: No agents provided for the swarm." ) - raise ValueError("No agents provided for the swarm.") # Validate max_loops if self.max_loops == 0: - logger.error( - "āŒ [CRITICAL] Loop validation failed - max_loops cannot be 0" - ) - raise ValueError("max_loops cannot be 0.") + raise ValueError("SwarmRouter: max_loops cannot be 0.") - # Setup other functionality - logger.info("šŸ”„ [SYSTEM] Initializing swarm subsystems...") self.setup() - logger.info( - "āœ… [SYSTEM] All reliability checks passed successfully" - ) - logger.info("šŸš€ [SYSTEM] Swarm is ready for deployment") - def _create_swarm(self, task: str = None, *args, **kwargs): """ Dynamically create and return the specified swarm type or automatically match the best swarm type for a given task. @@ -509,37 +472,19 @@ class SwarmRouter: for agent in self.agents ] - def _log( - self, - level: str, - message: str, - task: str = "", - metadata: Dict[str, Any] = None, - ): - """ - Create a log entry and add it to the logs list. + def agent_config(self): + agent_config = {} + for agent in self.agents: + agent_config[agent.agent_name] = agent.to_dict() - Args: - level (str): The log level (e.g., "info", "error"). - message (str): The log message. - task (str, optional): The task being performed. Defaults to "". - metadata (Dict[str, Any], optional): Additional metadata. Defaults to None. - """ - log_entry = SwarmLog( - level=level, - message=message, - swarm_type=self.swarm_type, - task=task, - metadata=metadata or {}, - ) - self.logs.append(log_entry) - logger.log(level.upper(), message) + return agent_config def _run( self, task: str, img: Optional[str] = None, model_response: Optional[str] = None, + imgs: Optional[List[str]] = None, *args, **kwargs, ) -> Any: @@ -559,17 +504,34 @@ class SwarmRouter: """ self.swarm = self._create_swarm(task, *args, **kwargs) + self.conversation = self.swarm.conversation + + if self.list_all_agents is True: + list_all_agents( + agents=self.agents, + conversation=self.swarm.conversation, + name=self.name, + description=self.description, + add_collaboration_prompt=True, + add_to_conversation=True, + ) + if self.multi_agent_collab_prompt is True: self.update_system_prompt_for_agent_in_swarm() - try: - logger.info( - f"Running task on {self.swarm_type} swarm with task: {task}" - ) + log_execution( + swarm_id=self.id, + status="start", + swarm_config=self.to_dict(), + swarm_architecture="swarm_router", + ) + try: if self.swarm_type == "CouncilAsAJudge": result = self.swarm.run( task=task, + img=img, + imgs=imgs, model_response=model_response, *args, **kwargs, @@ -577,21 +539,24 @@ class SwarmRouter: else: result = self.swarm.run(task=task, *args, **kwargs) - logger.info("Swarm completed successfully") + log_execution( + swarm_id=self.id, + status="completion", + swarm_config=self.to_dict(), + swarm_architecture="swarm_router", + ) + return result except Exception as e: - self._log( - "error", - f"Error occurred while running task on {self.swarm_type} swarm: {str(e)}", - task=task, - metadata={"error": str(e)}, + raise RuntimeError( + f"SwarmRouter: Error executing task on swarm: {str(e)} Traceback: {traceback.format_exc()}" ) - raise def run( self, task: str, img: Optional[str] = None, + imgs: Optional[List[str]] = None, model_response: Optional[str] = None, *args, **kwargs, @@ -617,15 +582,24 @@ class SwarmRouter: return self._run( task=task, img=img, + imgs=imgs, model_response=model_response, *args, **kwargs, ) except Exception as e: - logger.error(f"Error executing task on swarm: {str(e)}") - raise + raise RuntimeError( + f"SwarmRouter: Error executing task on swarm: {str(e)} Traceback: {traceback.format_exc()}" + ) - def __call__(self, task: str, *args, **kwargs) -> Any: + def __call__( + self, + task: str, + img: Optional[str] = None, + imgs: Optional[List[str]] = None, + *args, + **kwargs, + ) -> Any: """ Make the SwarmRouter instance callable. @@ -637,10 +611,17 @@ class SwarmRouter: Returns: Any: The result of the swarm's execution. """ - return self.run(task=task, *args, **kwargs) + return self.run( + task=task, img=img, imgs=imgs, *args, **kwargs + ) def batch_run( - self, tasks: List[str], *args, **kwargs + self, + tasks: List[str], + img: Optional[str] = None, + imgs: Optional[List[str]] = None, + *args, + **kwargs, ) -> List[Any]: """ Execute a batch of tasks on the selected or matched swarm type. @@ -659,21 +640,26 @@ class SwarmRouter: results = [] for task in tasks: try: - result = self.run(task, *args, **kwargs) + result = self.run( + task, img=img, imgs=imgs, *args, **kwargs + ) results.append(result) except Exception as e: - self._log( - "error", - f"Error occurred while running batch task on {self.swarm_type} swarm: {str(e)}", - task=task, - metadata={"error": str(e)}, + raise RuntimeError( + f"SwarmRouter: Error executing batch task on swarm: {str(e)} Traceback: {traceback.format_exc()}" ) - raise return results - def async_run(self, task: str, *args, **kwargs) -> Any: + def concurrent_run( + self, + task: str, + img: Optional[str] = None, + imgs: Optional[List[str]] = None, + *args, + **kwargs, + ) -> Any: """ - Execute a task on the selected or matched swarm type asynchronously. + Execute a task on the selected or matched swarm type concurrently. Args: task (str): The task to be executed by the swarm. @@ -686,95 +672,70 @@ class SwarmRouter: Raises: Exception: If an error occurs during task execution. """ - import asyncio - - async def run_async(): - try: - result = await asyncio.to_thread( - self.run, task, *args, **kwargs - ) - return result - except Exception as e: - self._log( - "error", - f"Error occurred while running task asynchronously on {self.swarm_type} swarm: {str(e)}", - task=task, - metadata={"error": str(e)}, - ) - raise - return asyncio.run(run_async()) + with concurrent.futures.ThreadPoolExecutor( + max_workers=os.cpu_count() + ) as executor: + future = executor.submit( + self.run, task, img=img, imgs=imgs, *args, **kwargs + ) + result = future.result() + return result - def get_logs(self) -> List[SwarmLog]: + def _serialize_callable( + self, attr_value: Callable + ) -> Dict[str, Any]: """ - Retrieve all logged entries. + Serializes callable attributes by extracting their name and docstring. + + Args: + attr_value (Callable): The callable to serialize. Returns: - List[SwarmLog]: A list of all log entries. + Dict[str, Any]: Dictionary with name and docstring of the callable. """ - return self.logs - - def concurrent_run(self, task: str, *args, **kwargs) -> Any: + return { + "name": getattr( + attr_value, "__name__", type(attr_value).__name__ + ), + "doc": getattr(attr_value, "__doc__", None), + } + + def _serialize_attr(self, attr_name: str, attr_value: Any) -> Any: """ - Execute a task on the selected or matched swarm type concurrently. + Serializes an individual attribute, handling non-serializable objects. Args: - task (str): The task to be executed by the swarm. - *args: Variable length argument list. - **kwargs: Arbitrary keyword arguments. + attr_name (str): The name of the attribute. + attr_value (Any): The value of the attribute. Returns: - Any: The result of the swarm's execution. - - Raises: - Exception: If an error occurs during task execution. + Any: The serialized value of the attribute. """ - from concurrent.futures import ThreadPoolExecutor - - with ThreadPoolExecutor( - max_workers=os.cpu_count() - ) as executor: - future = executor.submit(self.run, task, *args, **kwargs) - result = future.result() - return result - - def concurrent_batch_run( - self, tasks: List[str], *args, **kwargs - ) -> List[Any]: + try: + if callable(attr_value): + return self._serialize_callable(attr_value) + elif hasattr(attr_value, "to_dict"): + return ( + attr_value.to_dict() + ) # Recursive serialization for nested objects + else: + json.dumps( + attr_value + ) # Attempt to serialize to catch non-serializable objects + return attr_value + except (TypeError, ValueError): + return f"" + + def to_dict(self) -> Dict[str, Any]: """ - Execute a batch of tasks on the selected or matched swarm type concurrently. - - Args: - tasks (List[str]): A list of tasks to be executed by the swarm. - *args: Variable length argument list. - **kwargs: Arbitrary keyword arguments. + Converts all attributes of the class, including callables, into a dictionary. + Handles non-serializable attributes by converting them or skipping them. Returns: - List[Any]: A list of results from the swarm's execution. - - Raises: - Exception: If an error occurs during task execution. + Dict[str, Any]: A dictionary representation of the class attributes. """ - from concurrent.futures import ( - ThreadPoolExecutor, - as_completed, - ) - - results = [] - with ThreadPoolExecutor() as executor: - # Submit all tasks to executor - futures = [ - executor.submit(self.run, task, *args, **kwargs) - for task in tasks - ] - - # Process results as they complete rather than waiting for all - for future in as_completed(futures): - try: - result = future.result() - results.append(result) - except Exception as e: - logger.error(f"Task execution failed: {str(e)}") - results.append(None) - - return results + return { + attr_name: self._serialize_attr(attr_name, attr_value) + for attr_name, attr_value in self.__dict__.items() + } diff --git a/swarms/telemetry/__init__.py b/swarms/telemetry/__init__.py index 4322217a..a7f92a78 100644 --- a/swarms/telemetry/__init__.py +++ b/swarms/telemetry/__init__.py @@ -1,27 +1,13 @@ from swarms.telemetry.main import ( - generate_unique_identifier, generate_user_id, - get_cpu_info, get_machine_id, - get_os_version, - get_pip_version, - get_python_version, - get_ram_info, - get_system_info, - get_user_device_data, - system_info, + get_comprehensive_system_info, + log_agent_data, ) __all__ = [ "generate_user_id", "get_machine_id", - "get_system_info", - "generate_unique_identifier", - "get_python_version", - "get_pip_version", - "get_os_version", - "get_cpu_info", - "get_ram_info", - "system_info", - "get_user_device_data", + "get_comprehensive_system_info", + "log_agent_data", ] diff --git a/swarms/telemetry/log_executions.py b/swarms/telemetry/log_executions.py new file mode 100644 index 00000000..8fd13837 --- /dev/null +++ b/swarms/telemetry/log_executions.py @@ -0,0 +1,43 @@ +from typing import Optional +from swarms.telemetry.main import log_agent_data + + +def log_execution( + swarm_id: Optional[str] = None, + status: Optional[str] = None, + swarm_config: Optional[dict] = None, + swarm_architecture: Optional[str] = None, +): + """ + Log execution data for a swarm router instance. + + This function logs telemetry data about swarm router executions, including + the swarm ID, execution status, and configuration details. It silently + handles any logging errors to prevent execution interruption. + + Args: + swarm_id (str): Unique identifier for the swarm router instance + status (str): Current status of the execution (e.g., "start", "completion", "error") + swarm_config (dict): Configuration dictionary containing swarm router settings + swarm_architecture (str): Name of the swarm architecture used + Returns: + None + + Example: + >>> log_execution( + ... swarm_id="swarm-router-abc123", + ... status="start", + ... swarm_config={"name": "my-swarm", "swarm_type": "SequentialWorkflow"} + ... ) + """ + try: + log_agent_data( + data_dict={ + "swarm_router_id": swarm_id, + "status": status, + "swarm_router_config": swarm_config, + "swarm_architecture": swarm_architecture, + } + ) + except Exception: + pass diff --git a/swarms/telemetry/main.py b/swarms/telemetry/main.py index 9e64a1d9..5502a51a 100644 --- a/swarms/telemetry/main.py +++ b/swarms/telemetry/main.py @@ -1,16 +1,13 @@ -import os import datetime import hashlib import platform import socket -import subprocess import uuid -from typing import Dict +from typing import Any, Dict -import pkg_resources import psutil import requests -import toml +from functools import lru_cache # Helper functions @@ -34,265 +31,104 @@ def get_machine_id(): return hashed_id -def get_system_info(): - """ - Gathers basic system information. - - Returns: - dict: A dictionary containing system-related information. - """ - info = { +@lru_cache(maxsize=1) +def get_comprehensive_system_info() -> Dict[str, Any]: + # Basic platform and hardware information + system_data = { "platform": platform.system(), "platform_release": platform.release(), "platform_version": platform.version(), + "platform_full": platform.platform(), "architecture": platform.machine(), + "architecture_details": platform.architecture()[0], + "processor": platform.processor(), "hostname": socket.gethostname(), - "ip_address": socket.gethostbyname(socket.gethostname()), - "mac_address": ":".join( + } + + # MAC address + try: + system_data["mac_address"] = ":".join( [ f"{(uuid.getnode() >> elements) & 0xFF:02x}" for elements in range(0, 2 * 6, 8) ][::-1] - ), - "processor": platform.processor(), - "python_version": platform.python_version(), - "Misc": system_info(), - } - return info - - -def generate_unique_identifier(): - """Generate unique identifier - - Returns: - str: unique id - - """ - system_info = get_system_info() - unique_id = uuid.uuid5(uuid.NAMESPACE_DNS, str(system_info)) - return str(unique_id) - - -def get_local_ip(): - """Get local ip - - Returns: - str: local ip - - """ - return socket.gethostbyname(socket.gethostname()) - - -def get_user_device_data(): - data = { - "ID": generate_user_id(), - "Machine ID": get_machine_id(), - "System Info": get_system_info(), - "UniqueID": generate_unique_identifier(), - } - return data - - -def get_python_version(): - return platform.python_version() - - -def get_pip_version() -> str: - """Get pip version - - Returns: - str: The version of pip installed - """ - try: - pip_version = ( - subprocess.check_output(["pip", "--version"]) - .decode() - .split()[1] ) except Exception as e: - pip_version = str(e) - return pip_version - - -def get_swarms_verison() -> tuple[str, str]: - """Get swarms version from both command line and package - - Returns: - tuple[str, str]: A tuple containing (command line version, package version) - """ - try: - swarms_verison_cmd = ( - subprocess.check_output(["swarms", "--version"]) - .decode() - .split()[1] - ) - except Exception as e: - swarms_verison_cmd = str(e) - swarms_verison_pkg = pkg_resources.get_distribution( - "swarms" - ).version - swarms_verison = swarms_verison_cmd, swarms_verison_pkg - return swarms_verison - - -def get_os_version() -> str: - """Get operating system version - - Returns: - str: The operating system version and platform details - """ - return platform.platform() + system_data["mac_address"] = f"Error: {str(e)}" + # CPU information + system_data["cpu_count_logical"] = psutil.cpu_count(logical=True) + system_data["cpu_count_physical"] = psutil.cpu_count( + logical=False + ) -def get_cpu_info() -> str: - """Get CPU information - - Returns: - str: The processor information - """ - return platform.processor() - - -def get_ram_info() -> str: - """Get RAM information - - Returns: - str: A formatted string containing total, used and free RAM in GB - """ + # Memory information vm = psutil.virtual_memory() + total_ram_gb = vm.total / (1024**3) used_ram_gb = vm.used / (1024**3) free_ram_gb = vm.free / (1024**3) - total_ram_gb = vm.total / (1024**3) - return ( - f"{total_ram_gb:.2f} GB, used: {used_ram_gb:.2f}, free:" - f" {free_ram_gb:.2f}" + available_ram_gb = vm.available / (1024**3) + + system_data.update( + { + "memory_total_gb": f"{total_ram_gb:.2f}", + "memory_used_gb": f"{used_ram_gb:.2f}", + "memory_free_gb": f"{free_ram_gb:.2f}", + "memory_available_gb": f"{available_ram_gb:.2f}", + "memory_summary": f"Total: {total_ram_gb:.2f} GB, Used: {used_ram_gb:.2f} GB, Free: {free_ram_gb:.2f} GB, Available: {available_ram_gb:.2f} GB", + } ) + # Python version + system_data["python_version"] = platform.python_version() -def get_package_mismatches(file_path: str = "pyproject.toml") -> str: - """Get package version mismatches between pyproject.toml and installed packages - - Args: - file_path (str, optional): Path to pyproject.toml file. Defaults to "pyproject.toml". - - Returns: - str: A formatted string containing package version mismatches - """ - with open(file_path) as file: - pyproject = toml.load(file) - dependencies = pyproject["tool"]["poetry"]["dependencies"] - dev_dependencies = pyproject["tool"]["poetry"]["group"]["dev"][ - "dependencies" - ] - dependencies.update(dev_dependencies) - - installed_packages = { - pkg.key: pkg.version for pkg in pkg_resources.working_set - } - - mismatches = [] - for package, version_info in dependencies.items(): - if isinstance(version_info, dict): - version_info = version_info["version"] - installed_version = installed_packages.get(package) - if installed_version and version_info.startswith("^"): - expected_version = version_info[1:] - if not installed_version.startswith(expected_version): - mismatches.append( - f"\t {package}: Mismatch," - f" pyproject.toml={expected_version}," - f" pip={installed_version}" - ) - else: - mismatches.append(f"\t {package}: Not found in pip list") - - return "\n" + "\n".join(mismatches) - - -def system_info() -> dict[str, str]: - """Get system information including Python, pip, OS, CPU and RAM details - - Returns: - dict[str, str]: A dictionary containing system information - """ - return { - "Python Version": get_python_version(), - "Pip Version": get_pip_version(), - # "Swarms Version": swarms_verison, - "OS Version and Architecture": get_os_version(), - "CPU Info": get_cpu_info(), - "RAM Info": get_ram_info(), - } - - -def capture_system_data() -> Dict[str, str]: - """ - Captures extensive system data including platform information, user ID, IP address, CPU count, - memory information, and other system details. - - Returns: - Dict[str, str]: A dictionary containing system data. - """ + # Generate unique identifier based on system info try: - system_data = { - "platform": platform.system(), - "platform_version": platform.version(), - "platform_release": platform.release(), - "hostname": socket.gethostname(), - "ip_address": socket.gethostbyname(socket.gethostname()), - "cpu_count": psutil.cpu_count(logical=True), - "memory_total": f"{psutil.virtual_memory().total / (1024 ** 3):.2f} GB", - "memory_available": f"{psutil.virtual_memory().available / (1024 ** 3):.2f} GB", - "user_id": str(uuid.uuid4()), # Unique user identifier - "machine_type": platform.machine(), - "processor": platform.processor(), - "architecture": platform.architecture()[0], - } - - return system_data + unique_id = uuid.uuid5(uuid.NAMESPACE_DNS, str(system_data)) + system_data["unique_identifier"] = str(unique_id) except Exception as e: - # logger.error("Failed to capture system data: {}", e) - print(f"Failed to capture system data: {e}") + system_data["unique_identifier"] = f"Error: {str(e)}" + + return system_data def _log_agent_data(data_dict: dict): """Simple function to log agent data using requests library""" - if not data_dict: - return url = "https://swarms.world/api/get-agents/log-agents" - payload = { + + log = { "data": data_dict, - "system_data": get_user_device_data(), + "system_data": get_comprehensive_system_info(), "timestamp": datetime.datetime.now( datetime.timezone.utc ).isoformat(), } - key = ( - os.getenv("SWARMS_API_KEY") - or "Bearer sk-33979fd9a4e8e6b670090e4900a33dbe7452a15ccc705745f4eca2a70c88ea24" - ) + payload = { + "data": log, + } + + key = "Bearer sk-33979fd9a4e8e6b670090e4900a33dbe7452a15ccc705745f4eca2a70c88ea24" headers = { "Content-Type": "application/json", "Authorization": key, } - try: - response = requests.post( - url, json=payload, headers=headers, timeout=10 - ) - if response.status_code == 200: - return - except Exception: - return + response = requests.post( + url, json=payload, headers=headers, timeout=10 + ) + print(response.json()) + if response.status_code == 200: + return response.json() - return + print(response.json()) + return response.json() def log_agent_data(data_dict: dict): try: - _log_agent_data(data_dict) + return _log_agent_data(data_dict) except Exception: pass From d8958481cfe1d62e14089d885014e6a2f8fb31de Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Thu, 26 Jun 2025 14:23:39 -0700 Subject: [PATCH 15/86] cleanup --- swarms/telemetry/main.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/swarms/telemetry/main.py b/swarms/telemetry/main.py index 5502a51a..0ad0c5b3 100644 --- a/swarms/telemetry/main.py +++ b/swarms/telemetry/main.py @@ -119,16 +119,16 @@ def _log_agent_data(data_dict: dict): response = requests.post( url, json=payload, headers=headers, timeout=10 ) - print(response.json()) - if response.status_code == 200: - return response.json() - print(response.json()) - return response.json() + try: + if response.status_code == 200: + return + except Exception as e: + pass def log_agent_data(data_dict: dict): try: - return _log_agent_data(data_dict) + _log_agent_data(data_dict) except Exception: pass From 2f5e7bdca7ae9e2efbed0231163c3831722c619a Mon Sep 17 00:00:00 2001 From: harshalmore31 Date: Thu, 26 Jun 2025 19:46:46 +0530 Subject: [PATCH 16/86] Add real-time streaming support and example usage for Agent class --- stream_example.py | 12 ++++++ swarms/structs/agent.py | 50 +++++++++++++++++++---- swarms/utils/formatter.py | 70 +++++++++++++++++++++++++++++++++ swarms/utils/litellm_wrapper.py | 6 ++- 4 files changed, 130 insertions(+), 8 deletions(-) create mode 100644 stream_example.py diff --git a/stream_example.py b/stream_example.py new file mode 100644 index 00000000..0c081700 --- /dev/null +++ b/stream_example.py @@ -0,0 +1,12 @@ +from swarms import Agent + +# Enable real-time streaming +agent = Agent( + agent_name="StoryAgent", + model_name="gpt-4o-mini", + streaming_on=True, # šŸ”„ This enables real streaming! + max_loops=1, +) + +# This will now stream in real-time with beautiful UI! +response = agent.run("Tell me a detailed story...") \ No newline at end of file diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index bf8d1ab7..a294dd50 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -286,6 +286,11 @@ class Agent: >>> response = agent.run("Generate a report on the financials.") >>> print(response) >>> # Generate a report on the financials. + + >>> # Real-time streaming example + >>> agent = Agent(llm=llm, max_loops=1, streaming_on=True) + >>> response = agent.run("Tell me a long story.") # Will stream in real-time + >>> print(response) # Final complete response """ @@ -2469,14 +2474,45 @@ class Agent: """ try: - if img is not None: - out = self.llm.run( - task=task, img=img, *args, **kwargs - ) + # Set streaming parameter in LLM if streaming is enabled + if self.streaming_on and hasattr(self.llm, 'stream'): + original_stream = self.llm.stream + self.llm.stream = True + + if img is not None: + streaming_response = self.llm.run( + task=task, img=img, *args, **kwargs + ) + else: + streaming_response = self.llm.run(task=task, *args, **kwargs) + + # If we get a streaming response, handle it with the new streaming panel + if hasattr(streaming_response, '__iter__') and not isinstance(streaming_response, str): + # Use the new streaming panel to display and collect the response + complete_response = formatter.print_streaming_panel( + streaming_response, + title=f"šŸ¤– {self.agent_name} Streaming Response", + style="bold cyan" + ) + + # Restore original stream setting + self.llm.stream = original_stream + return complete_response + else: + # Restore original stream setting + self.llm.stream = original_stream + return streaming_response else: - out = self.llm.run(task=task, *args, **kwargs) + # Non-streaming call + if img is not None: + out = self.llm.run( + task=task, img=img, *args, **kwargs + ) + else: + out = self.llm.run(task=task, *args, **kwargs) + + return out - return out except AgentLLMError as e: logger.error( f"Error calling LLM: {e}. Task: {task}, Args: {args}, Kwargs: {kwargs}" @@ -2861,7 +2897,7 @@ class Agent: temperature=self.temperature, max_tokens=self.max_tokens, system_prompt=self.system_prompt, - stream=self.streaming_on, + stream=False, # Always disable streaming for tool summaries tools_list_dictionary=None, parallel_tool_calls=False, base_url=self.llm_base_url, diff --git a/swarms/utils/formatter.py b/swarms/utils/formatter.py index 3f418647..7d89f6ba 100644 --- a/swarms/utils/formatter.py +++ b/swarms/utils/formatter.py @@ -145,5 +145,75 @@ class Formatter: ) time.sleep(delay) + def print_streaming_panel( + self, + streaming_response, + title: str = "šŸ¤– Agent Streaming Response", + style: str = "bold cyan", + ) -> str: + """ + Display real-time streaming response using Rich Live and Panel. + Similar to the approach used in litellm_stream.py. + + Args: + streaming_response: The streaming response generator from LiteLLM. + title (str): Title of the panel. + style (str): Style for the panel border. + + Returns: + str: The complete accumulated response text. + """ + def create_streaming_panel(text_obj, is_complete=False): + """Create panel with proper text wrapping using Rich's built-in capabilities""" + panel_title = f"[bold cyan]{title}[/bold cyan]" + if is_complete: + panel_title += " [bold green]āœ…[/bold green]" + + # Add blinking cursor if still streaming + display_text = Text.from_markup("") + display_text.append_text(text_obj) + if not is_complete: + display_text.append("ā–Š", style="bold green blink") + + panel = Panel( + display_text, + title=panel_title, + border_style=style, + padding=(1, 2), + width=self.console.size.width, # Rich handles wrapping automatically + ) + return panel + + # Create a Text object for streaming content + streaming_text = Text() + complete_response = "" + + # TRUE streaming with Rich's automatic text wrapping + with Live( + create_streaming_panel(streaming_text), + console=self.console, + refresh_per_second=20 + ) as live: + try: + for part in streaming_response: + if hasattr(part, 'choices') and part.choices and part.choices[0].delta.content: + # Add ONLY the new chunk to the Text object + chunk = part.choices[0].delta.content + streaming_text.append(chunk, style="white") + complete_response += chunk + + # Update display with new text - Rich handles all wrapping automatically + live.update(create_streaming_panel(streaming_text, is_complete=False)) + + # Final update to show completion + live.update(create_streaming_panel(streaming_text, is_complete=True)) + + except Exception as e: + # Handle any streaming errors gracefully + streaming_text.append(f"\n[Error: {str(e)}]", style="bold red") + live.update(create_streaming_panel(streaming_text, is_complete=True)) + + return complete_response + formatter = Formatter() diff --git a/swarms/utils/litellm_wrapper.py b/swarms/utils/litellm_wrapper.py index 6aa5c7d3..840ec073 100644 --- a/swarms/utils/litellm_wrapper.py +++ b/swarms/utils/litellm_wrapper.py @@ -449,8 +449,12 @@ class LiteLLM: # Make the completion call response = completion(**completion_params) + # Handle streaming response + if self.stream: + return response # Return the streaming generator directly + # Handle tool-based response - if self.tools_list_dictionary is not None: + elif self.tools_list_dictionary is not None: return self.output_for_tools(response) elif self.return_all is True: return response.model_dump() From 5e5819fc48244ddd4e04cc55e1d16c4d073e438a Mon Sep 17 00:00:00 2001 From: harshalmore31 Date: Thu, 26 Jun 2025 23:06:34 +0530 Subject: [PATCH 17/86] Enhance streaming response handling by collecting chunks and adding callback support --- swarms/structs/agent.py | 19 +++++++++++++++++-- swarms/utils/formatter.py | 15 ++++++++++++++- 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index a294dd50..9503770d 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -2488,15 +2488,30 @@ class Agent: # If we get a streaming response, handle it with the new streaming panel if hasattr(streaming_response, '__iter__') and not isinstance(streaming_response, str): - # Use the new streaming panel to display and collect the response + # Collect chunks for conversation saving + collected_chunks = [] + + def on_chunk_received(chunk: str): + """Callback to collect chunks as they arrive""" + collected_chunks.append(chunk) + # Optional: Save each chunk to conversation in real-time + # This creates a more detailed conversation history + if self.verbose: + logger.debug(f"Streaming chunk received: {chunk[:50]}...") + + # Use the streaming panel to display and collect the response complete_response = formatter.print_streaming_panel( streaming_response, title=f"šŸ¤– {self.agent_name} Streaming Response", - style="bold cyan" + style="bold cyan", + collect_chunks=True, + on_chunk_callback=on_chunk_received ) # Restore original stream setting self.llm.stream = original_stream + + # Return the complete response for further processing return complete_response else: # Restore original stream setting diff --git a/swarms/utils/formatter.py b/swarms/utils/formatter.py index 7d89f6ba..0d608f6f 100644 --- a/swarms/utils/formatter.py +++ b/swarms/utils/formatter.py @@ -1,6 +1,6 @@ import threading import time -from typing import Any, Callable, Dict, List +from typing import Any, Callable, Dict, List, Optional from rich.console import Console from rich.live import Live @@ -150,6 +150,8 @@ class Formatter: streaming_response, title: str = "šŸ¤– Agent Streaming Response", style: str = "bold cyan", + collect_chunks: bool = False, + on_chunk_callback: Optional[Callable] = None, ) -> str: """ Display real-time streaming response using Rich Live and Panel. @@ -159,6 +161,8 @@ class Formatter: streaming_response: The streaming response generator from LiteLLM. title (str): Title of the panel. style (str): Style for the panel border. + collect_chunks (bool): Whether to collect individual chunks for conversation saving. + on_chunk_callback (Optional[Callable]): Callback function to call for each chunk. Returns: str: The complete accumulated response text. @@ -187,6 +191,7 @@ class Formatter: # Create a Text object for streaming content streaming_text = Text() complete_response = "" + chunks_collected = [] # TRUE streaming with Rich's automatic text wrapping with Live( @@ -202,6 +207,14 @@ class Formatter: streaming_text.append(chunk, style="white") complete_response += chunk + # Collect chunks if requested + if collect_chunks: + chunks_collected.append(chunk) + + # Call chunk callback if provided + if on_chunk_callback: + on_chunk_callback(chunk) + # Update display with new text - Rich handles all wrapping automatically live.update(create_streaming_panel(streaming_text, is_complete=False)) From 92a9eac8d834b8d4b74d360792aa7ca774cc6a50 Mon Sep 17 00:00:00 2001 From: harshalmore31 Date: Fri, 27 Jun 2025 03:43:33 +0530 Subject: [PATCH 18/86] Refactor streaming response handling to support print_on parameter for flexible output --- stream_example.py | 1 + swarms/structs/agent.py | 61 ++++++++++++++++++++++++----------------- 2 files changed, 37 insertions(+), 25 deletions(-) diff --git a/stream_example.py b/stream_example.py index 0c081700..a09a4260 100644 --- a/stream_example.py +++ b/stream_example.py @@ -6,6 +6,7 @@ agent = Agent( model_name="gpt-4o-mini", streaming_on=True, # šŸ”„ This enables real streaming! max_loops=1, + print_on=True, # By Default its False, raw streaming !! ) # This will now stream in real-time with beautiful UI! diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 9503770d..9c33ceea 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -2488,25 +2488,38 @@ class Agent: # If we get a streaming response, handle it with the new streaming panel if hasattr(streaming_response, '__iter__') and not isinstance(streaming_response, str): - # Collect chunks for conversation saving - collected_chunks = [] - - def on_chunk_received(chunk: str): - """Callback to collect chunks as they arrive""" - collected_chunks.append(chunk) - # Optional: Save each chunk to conversation in real-time - # This creates a more detailed conversation history - if self.verbose: - logger.debug(f"Streaming chunk received: {chunk[:50]}...") - - # Use the streaming panel to display and collect the response - complete_response = formatter.print_streaming_panel( - streaming_response, - title=f"šŸ¤– {self.agent_name} Streaming Response", - style="bold cyan", - collect_chunks=True, - on_chunk_callback=on_chunk_received - ) + # Check print_on parameter for different streaming behaviors + if self.print_on is False: + # Show raw streaming text without formatting panels + chunks = [] + print(f"\n{self.agent_name}: ", end="", flush=True) + for chunk in streaming_response: + if hasattr(chunk, 'choices') and chunk.choices[0].delta.content: + content = chunk.choices[0].delta.content + print(content, end="", flush=True) # Print raw streaming text + chunks.append(content) + print() # New line after streaming completes + complete_response = ''.join(chunks) + else: + # Collect chunks for conversation saving + collected_chunks = [] + + def on_chunk_received(chunk: str): + """Callback to collect chunks as they arrive""" + collected_chunks.append(chunk) + # Optional: Save each chunk to conversation in real-time + # This creates a more detailed conversation history + if self.verbose: + logger.debug(f"Streaming chunk received: {chunk[:50]}...") + + # Use the streaming panel to display and collect the response + complete_response = formatter.print_streaming_panel( + streaming_response, + title=f"šŸ¤– {self.agent_name} Streaming Response", + style="bold cyan", + collect_chunks=True, + on_chunk_callback=on_chunk_received + ) # Restore original stream setting self.llm.stream = original_stream @@ -2744,12 +2757,10 @@ class Agent: def pretty_print(self, response: str, loop_count: int): if self.print_on is False: if self.streaming_on is True: - # self.stream_response(response) - formatter.print_panel_token_by_token( - f"{self.agent_name}: {response}", - title=f"Agent Name: {self.agent_name} [Max Loops: {loop_count}]", - ) - elif self.print_on is True: + # Skip printing here since real streaming is handled in call_llm + # This avoids double printing when streaming_on=True + pass + elif self.no_print is True: pass else: # logger.info(f"Response: {response}") From b258f3dfce9bd03f1fccd3adf867eedc52c9160c Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Thu, 26 Jun 2025 15:29:52 -0700 Subject: [PATCH 19/86] [docs][swarms marketplace documantation --- docs/mkdocs.yml | 13 ++-- docs/swarms_platform/monetize.md | 126 +++++++++++++++++++++++++++++++ swarms/telemetry/main.py | 2 +- 3 files changed, 135 insertions(+), 6 deletions(-) create mode 100644 docs/swarms_platform/monetize.md diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index dd3ee0fb..53f3a36a 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -410,11 +410,14 @@ nav: - Swarms Marketplace: - Overview: "swarms_platform/index.md" - - Share and Discover Agents, Prompts, and Tools: "swarms_platform/share_and_discover.md" - - Customize Your Sidebar: "swarms_platform/apps_page.md" - - Playground: "swarms_platform/playground_page.md" - - API Key Management: "swarms_platform/apikeys.md" - - Account Management: "swarms_platform/account_management.md" + - Marketplace: + - Share and Discover Agents, Prompts, and Tools: "swarms_platform/share_and_discover.md" + - Monetize Your Prompts, Agents, and Tools: "swarms_platform/monetize.md" + - Platform: + - Customize Your Sidebar: "swarms_platform/apps_page.md" + - Playground: "swarms_platform/playground_page.md" + - API Key Management: "swarms_platform/apikeys.md" + - Account Management: "swarms_platform/account_management.md" - Swarms Rust: - Overview: "swarms_rs/overview.md" diff --git a/docs/swarms_platform/monetize.md b/docs/swarms_platform/monetize.md new file mode 100644 index 00000000..4df61cee --- /dev/null +++ b/docs/swarms_platform/monetize.md @@ -0,0 +1,126 @@ +# Swarms.World Monetization Guide + +## Quick Overview + +Swarms Marketplace has activated its payment infrastructure, enabling creators to monetize AI agents, prompts, and tools directly through the platform. Sellers receive payments minus a 5-15% platform fee, scaled based on subscription tiers. Revenue accrues in real-time to integrated crypto wallets, with optional fiat conversions. + +--- + +## Eligibility Requirements + +### Current Requirements for Paid Content + +- **2+ published items** (Prompts, Agents, and Tools) + +- **2 Items with 4+ star ratings** (you need community ratings) + +- **Marketplace Agent Rating** An agent will automatically rate your prompt, agent, or tool. + +**Bottom Line**: You must build reputation with free, high-quality content first. + +--- + +## Step-by-Step Process + +### Phase 1: Build Reputation (Required First) + +#### 1. Improve Your Existing Content + +- Add better descriptions and examples to your published items + +- Use the Rating System: Evaluate and rate prompts, agents, and tools based on their effectiveness. Commenting System: Share feedback and insights with the Swarms community + +- Ask users for honest reviews and ratings + +#### 2. Create More Quality Content + +Focus on these categories: + +- **Agents**: Marketing, finance, or programming automation + +- **Prompts**: Templates for specific business tasks + +- **Tools**: Utilities that solve real problems + +Target: 3-5 additional items, all aiming for 4+ star ratings + +#### 3. Get Community Ratings + +- Share your content in relevant communities + +- Engage with users who try your content + +- Respond to feedback and improve based on comments + +- Be patient - ratings take time to accumulate + +### Phase 2: Start Monetizing + +#### 4. Choose Your Pricing Model + +Three primary monetization avenues exist: AI agents (autonomous task-execution models), prompts (pre-optimized input templates), and tools (development utilities like data preprocessors) + +**Pricing Options:** + +- **One-time**: $0.01 - $999,999 USD + +- **Subscription**: Monthly/annual recurring fees (Coming Soon) + +- **Usage-based**: Pay per API call or computation (Coming Soon) + + +#### 6. Optimize & Scale + +- Monitor your revenue and user feedback + +- Developers can bundle assets—such as pairing prompt libraries with compatible agents—creating value-added +packages + +- Create bundles of related content for higher value + +- Adjust pricing based on demand + +--- + +## Revenue Models + +### What Sells Best + +1. **Business Automation Agents** - Marketing, sales, finance + +2. **Industry-Specific Prompts** - Legal, medical, technical writing + +3. **Integration Tools** - APIs, data processors, connectors + +### Pricing Examples + +- Simple prompts: $1-50 + +- Complex agents: $20-500+ + +- Enterprise tools: $100-1000+ + +--- + +## Quick Tips for Success + +1. **Quality over quantity** - Better to have 3 excellent items than 10 mediocre ones +2. **Solve real problems** - Focus on actual business needs +3. **Document everything** - Clear instructions increase ratings +4. **Engage actively** - Respond to all user feedback +5. **Be patient** - Building reputation takes time but pays off + +--- + +## Common Mistakes to Avoid + +- Publishing low-quality content to meet quantity requirements + +- Not responding to user feedback + +- Setting prices too high before building reputation + +- Copying existing solutions without adding value + +- Ignoring community guidelines + diff --git a/swarms/telemetry/main.py b/swarms/telemetry/main.py index 0ad0c5b3..597680c1 100644 --- a/swarms/telemetry/main.py +++ b/swarms/telemetry/main.py @@ -123,7 +123,7 @@ def _log_agent_data(data_dict: dict): try: if response.status_code == 200: return - except Exception as e: + except Exception: pass From 636b2fa102cae34c53fe623b35a470e0bfafbca3 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Fri, 27 Jun 2025 10:06:50 -0700 Subject: [PATCH 20/86] readme --- chat.py | 11 +++ docs/index.md | 40 ++++++++--- quantum_physics_swarm.py | 89 +++++++++++++++++++++++++ swarms/structs/interactive_groupchat.py | 4 +- 4 files changed, 132 insertions(+), 12 deletions(-) create mode 100644 chat.py create mode 100644 quantum_physics_swarm.py diff --git a/chat.py b/chat.py new file mode 100644 index 00000000..35e6e25b --- /dev/null +++ b/chat.py @@ -0,0 +1,11 @@ +import gradio as gr +import ai_gradio + +finance_interface = gr.load( + name="swarms:gpt-4-turbo", + src=ai_gradio.registry, + agent_name="Stock-Analysis-Agent", + title="Finance Assistant", + description="Expert financial analysis and advice tailored to your investment needs.", +) +finance_interface.launch() diff --git a/docs/index.md b/docs/index.md index 0b6d1f4e..ceb80cc1 100644 --- a/docs/index.md +++ b/docs/index.md @@ -2,6 +2,24 @@ [![Join our Discord](https://img.shields.io/badge/Discord-Join%20our%20server-5865F2?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/jM3Z6M9uMq) [![Subscribe on YouTube](https://img.shields.io/badge/YouTube-Subscribe-red?style=for-the-badge&logo=youtube&logoColor=white)](https://www.youtube.com/@kyegomez3242) [![Connect on LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue?style=for-the-badge&logo=linkedin&logoColor=white)](https://www.linkedin.com/in/kye-g-38759a207/) [![Follow on X.com](https://img.shields.io/badge/X.com-Follow-1DA1F2?style=for-the-badge&logo=x&logoColor=white)](https://x.com/swarms_corp) +## What is Swarms? + +**Swarms** is the **first and most reliable multi-agent production-grade framework** designed to orchestrate intelligent AI agents at scale. Built for enterprise applications, Swarms enables you to create sophisticated multi-agent systems that can handle complex tasks through collaboration, parallel processing, and intelligent task distribution. + +### Key Capabilities + +- **šŸ¢ Production-Ready**: Enterprise-grade infrastructure with high reliability, comprehensive logging, and robust error handling +- **šŸ¤– Multi-Agent Orchestration**: Support for hierarchical swarms, parallel processing, sequential workflows, and dynamic agent rearrangement +- **šŸ”„ Flexible Integration**: Multi-model support, custom agent creation, extensive tool library, and multiple memory systems +- **šŸ“ˆ Scalable Architecture**: Concurrent processing, resource management, load balancing, and horizontal scaling capabilities +- **šŸ› ļø Developer-Friendly**: Simple API, extensive documentation, active community, and CLI tools for rapid development +- **šŸ” Enterprise Security**: Built-in error handling, rate limiting, monitoring integration, and audit logging + +### Why Choose Swarms? + +Swarms stands out as the **most reliable multi-agent framework** because it was built from the ground up for production environments. Unlike other frameworks that focus on research or simple demos, Swarms provides the infrastructure, tooling, and best practices needed to deploy multi-agent systems in real-world applications. + +Whether you're building financial analysis systems, healthcare diagnostics, manufacturing optimization, or any other complex multi-agent application, Swarms provides the foundation you need to succeed. ## Swarms Installation @@ -55,17 +73,19 @@ Here you'll find references about the Swarms framework, marketplace, community, | Swarms Corp Github | [Swarms Corp GitHub](https://github.com/The-Swarm-Corporation) | -## Community -| Section | Links | -|----------------------|--------------------------------------------------------------------------------------------| -| Community | [Discord](https://discord.gg/jM3Z6M9uMq) | -| Blog | [Blog](https://medium.com/@kyeg) | -| Event Calendar | [LUMA](https://lu.ma/swarms_calendar) | -| Twitter | [Twitter](https://x.com/swarms_corp) | -| Agent Marketplace | [Website](https://swarms.world) | -| Docs | [Website](https://docs.swarms.world) | -| Swarms Website | [Website](https://swarms.ai) | +## Join the Swarms Community + +| Platform | Link | Description | +|----------|------|-------------| +| šŸ“š Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides | +| šŸ“ Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles | +| šŸ’¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support | +| 🐦 Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements | +| šŸ‘„ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates | +| šŸ“ŗ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos | +| šŸŽ« Events | [Sign up here](https://lu.ma/5p2jnc2v) | Join our community events | ## Get Support Want to get in touch with the Swarms team? Open an issue on [GitHub](https://github.com/kyegomez/swarms/issues/new) or reach out to us via [email](mailto:kye@swarms.world). We're here to help! + diff --git a/quantum_physics_swarm.py b/quantum_physics_swarm.py new file mode 100644 index 00000000..595d47c5 --- /dev/null +++ b/quantum_physics_swarm.py @@ -0,0 +1,89 @@ +from swarms import Agent +from swarms.structs.interactive_groupchat import InteractiveGroupChat + + +if __name__ == "__main__": + # Initialize agents specialized for condensed matter physics + theoretical_physicist = Agent( + agent_name="TheoreticalPhysicist", + system_prompt=""" + You are an exceptionally brilliant theoretical condensed matter physicist with deep expertise in quantum many-body theory, phase transitions, and emergent phenomena. You possess extraordinary mathematical intuition and can derive, manipulate, and analyze complex equations with remarkable precision. + + Your core competencies include: + - **Advanced Mathematical Modeling**: You excel at formulating and solving differential equations, partial differential equations, and integro-differential equations that describe quantum systems. You can derive equations from first principles using variational methods, path integrals, and functional analysis. + + - **Quantum Field Theory**: You master the mathematical framework of quantum field theory, including Feynman diagrams, renormalization group theory, and effective field theories. You can derive and analyze equations for correlation functions, Green's functions, and response functions. + + - **Statistical Mechanics**: You are expert at deriving partition functions, free energies, and thermodynamic potentials. You can formulate and solve equations for phase transitions, critical phenomena, and scaling behavior using techniques like mean-field theory, Landau-Ginzburg theory, and renormalization group methods. + + - **Many-Body Physics**: You excel at deriving equations for interacting quantum systems, including Hubbard models, Heisenberg models, and BCS theory. You can analyze equations for collective excitations, quasiparticles, and topological states. + + - **Analytical Techniques**: You master perturbation theory, variational methods, exact diagonalization, and other analytical techniques. You can derive equations for energy spectra, wave functions, and observables in complex quantum systems. + + When presented with a physics problem, you immediately think in terms of mathematical equations and can derive the appropriate formalism from fundamental principles. You always show your mathematical work step-by-step and explain the physical meaning of each equation you write. + """, + model="claude-3-5-sonnet-20240620", + ) + + experimental_physicist = Agent( + agent_name="ExperimentalPhysicist", + system_prompt="""You are an exceptionally skilled experimental condensed matter physicist with profound expertise in materials synthesis, characterization techniques, and data analysis. You possess extraordinary analytical abilities and can derive, interpret, and validate equations that describe experimental observations. + +Your core competencies include: +- **Materials Synthesis & Characterization**: You excel at designing synthesis protocols and deriving equations that describe growth kinetics, phase formation, and structural evolution. You can formulate equations for crystal growth, diffusion processes, and phase equilibria. + +- **Advanced Characterization Techniques**: You master the mathematical foundations of X-ray diffraction (Bragg's law, structure factors, Rietveld refinement), electron microscopy (diffraction patterns, image formation), and spectroscopy (absorption, emission, scattering cross-sections). You can derive equations for resolution limits, signal-to-noise ratios, and detection sensitivity. + +- **Transport Properties**: You excel at deriving and analyzing equations for electrical conductivity (Drude model, Boltzmann transport), thermal conductivity (phonon and electron contributions), and magnetic properties (Curie-Weiss law, magnetic susceptibility). You can formulate equations for Hall effect, magnetoresistance, and thermoelectric effects. + +- **Data Analysis & Modeling**: You possess advanced skills in fitting experimental data to theoretical models, error analysis, and statistical inference. You can derive equations for uncertainty propagation, confidence intervals, and model selection criteria. + +- **Experimental Design**: You excel at deriving equations for experimental sensitivity, resolution requirements, and optimization of measurement parameters. You can formulate equations for signal processing, noise reduction, and systematic error correction. + +When analyzing experimental data, you immediately think in terms of mathematical models and can derive equations that connect observations to underlying physical mechanisms. You always show your mathematical reasoning and explain how equations relate to experimental reality.""", + model="claude-3-5-sonnet-20240620", + ) + + computational_physicist = Agent( + agent_name="ComputationalPhysicist", + system_prompt="""You are an exceptionally brilliant computational condensed matter physicist with deep expertise in numerical methods, algorithm development, and high-performance computing. You possess extraordinary mathematical skills and can formulate, implement, and analyze equations that drive computational simulations. + +Your core competencies include: +- **Density Functional Theory (DFT)**: You excel at deriving and implementing the Kohn-Sham equations, exchange-correlation functionals, and self-consistent field methods. You can formulate equations for electronic structure, total energies, forces, and response functions. You master the mathematical foundations of plane-wave methods, pseudopotentials, and k-point sampling. + +- **Quantum Monte Carlo Methods**: You are expert at deriving equations for variational Monte Carlo, diffusion Monte Carlo, and path integral Monte Carlo. You can formulate equations for importance sampling, correlation functions, and statistical estimators. You excel at deriving equations for finite-size effects, time-step errors, and population control. + +- **Molecular Dynamics**: You master the mathematical framework of classical and ab initio molecular dynamics, including equations of motion, thermostats, barostats, and constraint algorithms. You can derive equations for time integration schemes, energy conservation, and phase space sampling. + +- **Many-Body Methods**: You excel at implementing and analyzing equations for exact diagonalization, quantum chemistry methods (CI, CC, MP), and tensor network methods (DMRG, PEPS). You can derive equations for matrix elements, basis transformations, and optimization algorithms. + +- **High-Performance Computing**: You possess advanced skills in parallel algorithms, load balancing, and numerical optimization. You can derive equations for computational complexity, scaling behavior, and performance bottlenecks. You excel at formulating equations for parallel efficiency, communication overhead, and memory management. + +When developing computational methods, you think in terms of mathematical algorithms and can derive equations that translate physical problems into efficient numerical procedures. You always show your mathematical derivations and explain how equations map to computational implementations.""", + model="claude-3-5-sonnet-20240620", + ) + + # Create list of agents including both Agent instances and callable + agents = [ + theoretical_physicist, + experimental_physicist, + computational_physicist, + ] + + # Initialize another chat instance in interactive mode + interactive_chat = InteractiveGroupChat( + name="Interactive Condensed Matter Physics Research Team", + description="An interactive team of condensed matter physics experts providing comprehensive analysis of quantum materials, phase transitions, and emergent phenomena", + agents=agents, + max_loops=1, + output_type="all", + interactive=True, + ) + + try: + # Start the interactive session + print("\nStarting interactive session...") + # interactive_chat.run("What is the best methodology to accumulate gold and silver commodities, what is the best long term strategy to accumulate them?") + interactive_chat.start_interactive_session() + except Exception as e: + print(f"An error occurred in interactive mode: {e}") diff --git a/swarms/structs/interactive_groupchat.py b/swarms/structs/interactive_groupchat.py index 802de393..900c6abb 100644 --- a/swarms/structs/interactive_groupchat.py +++ b/swarms/structs/interactive_groupchat.py @@ -263,9 +263,9 @@ class InteractiveGroupChat: # Process the task and get responses try: - response = self.run(user_input) + self.run(user_input) print("\nChat:") - print(response) + # print(response) except NoMentionedAgentsError: print( From 0c339634d0880a95879e13aef61f830ccdb61ea9 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Fri, 27 Jun 2025 10:08:08 -0700 Subject: [PATCH 21/86] fix docs --- docs/mkdocs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 53f3a36a..b3577652 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -308,7 +308,7 @@ nav: - Examples: - Overview: "examples/index.md" - CookBook Index: "examples/cookbook_index.md" - - PreBuilt Templates: "examples/templates_index.md" + # - PreBuilt Templates: "examples/templates_index.md" - Customizing Agents: - Basic Agent: "swarms/examples/basic_agent.md" - Agents with Callable Tools: "swarms/examples/agent_with_tools.md" From 77bdaac350a9bc734d255fa050f2cac8ff1f1006 Mon Sep 17 00:00:00 2001 From: Kye Gomez <98760976+kyegomez@users.noreply.github.com> Date: Fri, 27 Jun 2025 11:46:25 -0700 Subject: [PATCH 22/86] Update README.md --- README.md | 72 ------------------------------------------------------- 1 file changed, 72 deletions(-) diff --git a/README.md b/README.md index 8826000e..fbcfbde2 100644 --- a/README.md +++ b/README.md @@ -714,78 +714,6 @@ Swarm architectures leverage these communication patterns to ensure that agents | Swarm Router | Routes and chooses the swarm architecture based on the task requirements and available agents. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/swarm_router/) | Dynamic task routing, adaptive swarm architecture selection, optimized agent allocation | - -## Swarms API - -We recently launched our enterprise-grade Swarms API. This API allows you to create, manage, and execute swarms from your own application. - -#### Steps: - -1. Create a Swarms API key [HERE](https://swarms.world) -2. Upload your key to the `.env` file like so: `SWARMS_API_KEY=` -3. Use the following code to create and execute a swarm: -4. Read our docs for more information for deeper customization [HERE](https://docs.swarms.world/en/latest/swarms_cloud/swarms_api/) - - -```python -import json -from swarms.structs.swarms_api import ( - SwarmsAPIClient, - SwarmRequest, - AgentInput, -) -import os - -agents = [ - AgentInput( - agent_name="Medical Researcher", - description="Conducts medical research and analysis", - system_prompt="You are a medical researcher specializing in clinical studies.", - max_loops=1, - model_name="gpt-4o", - role="worker", - ), - AgentInput( - agent_name="Medical Diagnostician", - description="Provides medical diagnoses based on symptoms and test results", - system_prompt="You are a medical diagnostician with expertise in identifying diseases.", - max_loops=1, - model_name="gpt-4o", - role="worker", - ), - AgentInput( - agent_name="Pharmaceutical Expert", - description="Advises on pharmaceutical treatments and drug interactions", - system_prompt="You are a pharmaceutical expert knowledgeable about medications and their effects.", - max_loops=1, - model_name="gpt-4o", - role="worker", - ), -] - -swarm_request = SwarmRequest( - name="Medical Swarm", - description="A swarm for medical research and diagnostics", - agents=agents, - max_loops=1, - swarm_type="ConcurrentWorkflow", - output_type="str", - return_history=True, - task="What is the cause of the common cold?", -) - -client = SwarmsAPIClient( - api_key=os.getenv("SWARMS_API_KEY"), format_type="json" -) - -response = client.run(swarm_request) - -print(json.dumps(response, indent=4)) - - -``` - - ### `SequentialWorkflow` The SequentialWorkflow in the Swarms framework enables sequential task execution across multiple Agent objects. Each agent's output serves as input for the next agent in the sequence, continuing until reaching the specified maximum number of loops (max_loops). This workflow is particularly well-suited for tasks requiring a specific order of operations, such as data processing pipelines. To learn more, visit: [Learn More](https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/) From 325199f4c2db2e9e48a4bf556bdad808bb5fac0d Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Fri, 27 Jun 2025 17:12:49 -0700 Subject: [PATCH 23/86] docs agent streaming with formatting name fix --- docs/examples/agent_stream.md | 62 ++++++++++++++ docs/mkdocs.yml | 1 + .../groupchat/quantum_physics_swarm.py | 0 .../multi_agent/mixture_of_agents_example.py | 0 chat.py => examples/ui/chat.py | 0 stream_example.py | 7 +- swarm_router_test.py | 81 ------------------ swarms/structs/agent.py | 66 ++++++++++----- swarms/utils/formatter.py | 82 ++++++++++++------- swarms/utils/litellm_wrapper.py | 2 +- 10 files changed, 168 insertions(+), 133 deletions(-) create mode 100644 docs/examples/agent_stream.md rename quantum_physics_swarm.py => examples/multi_agent/groupchat/quantum_physics_swarm.py (100%) rename mixture_of_agents_example.py => examples/multi_agent/mixture_of_agents_example.py (100%) rename chat.py => examples/ui/chat.py (100%) delete mode 100644 swarm_router_test.py diff --git a/docs/examples/agent_stream.md b/docs/examples/agent_stream.md new file mode 100644 index 00000000..2c5bc6b9 --- /dev/null +++ b/docs/examples/agent_stream.md @@ -0,0 +1,62 @@ +# Agent with Streaming + +The Swarms framework provides powerful real-time streaming capabilities for agents, allowing you to see responses being generated token by token as they're produced by the language model. This creates a more engaging and interactive experience, especially useful for long-form content generation, debugging, or when you want to provide immediate feedback to users. + +## Installation + +Install the swarms package using pip: + +```bash +pip install -U swarms +``` + +## Basic Setup + +1. First, set up your environment variables: + +```python +WORKSPACE_DIR="agent_workspace" +OPENAI_API_KEY="" +``` + +## Step by Step + +- Install and put your keys in `.env` + +- Turn on streaming in `Agent()` with `streaming_on=True` + +- Optional: If you want to pretty print it, you can do `print_on=True`; if not, it will print normally + +## Code + +```python +from swarms import Agent + +# Enable real-time streaming +agent = Agent( + agent_name="StoryAgent", + model_name="gpt-4o-mini", + streaming_on=True, # šŸ”„ This enables real streaming! + max_loops=1, + print_on=True, # By default, it's False for raw streaming! +) + +# This will now stream in real-time with a beautiful UI! +response = agent.run("Tell me a detailed story about humanity colonizing the stars") +print(response) +``` + +## Connect With Us + +If you'd like technical support, join our Discord below and stay updated on our Twitter for new updates! + +| Platform | Link | Description | +|----------|------|-------------| +| šŸ“š Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides | +| šŸ“ Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles | +| šŸ’¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support | +| 🐦 Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements | +| šŸ‘„ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates | +| šŸ“ŗ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos | +| šŸŽ« Events | [Sign up here](https://lu.ma/5p2jnc2v) | Join our community events | + diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index b3577652..dbbb8924 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -311,6 +311,7 @@ nav: # - PreBuilt Templates: "examples/templates_index.md" - Customizing Agents: - Basic Agent: "swarms/examples/basic_agent.md" + - Agent with Streaming: "examples/agent_stream.md" - Agents with Callable Tools: "swarms/examples/agent_with_tools.md" # - Agent With MCP Integration: "swarms/examples/agent_with_mcp.md" - Agent Output Types: "swarms/examples/agent_output_types.md" diff --git a/quantum_physics_swarm.py b/examples/multi_agent/groupchat/quantum_physics_swarm.py similarity index 100% rename from quantum_physics_swarm.py rename to examples/multi_agent/groupchat/quantum_physics_swarm.py diff --git a/mixture_of_agents_example.py b/examples/multi_agent/mixture_of_agents_example.py similarity index 100% rename from mixture_of_agents_example.py rename to examples/multi_agent/mixture_of_agents_example.py diff --git a/chat.py b/examples/ui/chat.py similarity index 100% rename from chat.py rename to examples/ui/chat.py diff --git a/stream_example.py b/stream_example.py index a09a4260..f7e3bca1 100644 --- a/stream_example.py +++ b/stream_example.py @@ -3,11 +3,12 @@ from swarms import Agent # Enable real-time streaming agent = Agent( agent_name="StoryAgent", - model_name="gpt-4o-mini", + model_name="gpt-4o-mini", streaming_on=True, # šŸ”„ This enables real streaming! max_loops=1, - print_on=True, # By Default its False, raw streaming !! + print_on=True, # By Default its False, raw streaming !! ) # This will now stream in real-time with beautiful UI! -response = agent.run("Tell me a detailed story...") \ No newline at end of file +response = agent.run("Tell me a detailed story about Humanity colonizing the stars") +print(response) \ No newline at end of file diff --git a/swarm_router_test.py b/swarm_router_test.py deleted file mode 100644 index 016953ff..00000000 --- a/swarm_router_test.py +++ /dev/null @@ -1,81 +0,0 @@ -import json -from swarms import Agent, SwarmRouter - -# Agent 1: Risk Metrics Calculator -risk_metrics_agent = Agent( - agent_name="Risk-Metrics-Calculator", - agent_description="Calculates key risk metrics like VaR, Sharpe ratio, and volatility", - system_prompt="""You are a risk metrics specialist. Calculate and explain: - - Value at Risk (VaR) - - Sharpe ratio - - Volatility - - Maximum drawdown - - Beta coefficient - - Provide clear, numerical results with brief explanations.""", - max_loops=1, - # model_name="gpt-4o-mini", - random_model_enabled=True, - dynamic_temperature_enabled=True, - output_type="str-all-except-first", - max_tokens=4096, -) - -# Agent 2: Portfolio Risk Analyzer -portfolio_risk_agent = Agent( - agent_name="Portfolio-Risk-Analyzer", - agent_description="Analyzes portfolio diversification and concentration risk", - system_prompt="""You are a portfolio risk analyst. Focus on: - - Portfolio diversification analysis - - Concentration risk assessment - - Correlation analysis - - Sector/asset allocation risk - - Liquidity risk evaluation - - Provide actionable insights for risk reduction.""", - max_loops=1, - # model_name="gpt-4o-mini", - random_model_enabled=True, - dynamic_temperature_enabled=True, - output_type="str-all-except-first", - max_tokens=4096, -) - -# Agent 3: Market Risk Monitor -market_risk_agent = Agent( - agent_name="Market-Risk-Monitor", - agent_description="Monitors market conditions and identifies risk factors", - system_prompt="""You are a market risk monitor. Identify and assess: - - Market volatility trends - - Economic risk factors - - Geopolitical risks - - Interest rate risks - - Currency risks - - Provide current risk alerts and trends.""", - max_loops=1, - # model_name="gpt-4o-mini", - random_model_enabled=True, - dynamic_temperature_enabled=True, - output_type="str-all-except-first", - max_tokens=4096, -) - - -swarm = SwarmRouter( - agents=[ - risk_metrics_agent, - portfolio_risk_agent, - ], - max_loops=1, - swarm_type="MixtureOfAgents", - output_type="final", -) - - -# swarm.run( -# "Calculate VaR and Sharpe ratio for a portfolio with 15% annual return and 20% volatility" -# ) - - -print(f"Swarm config: {json.dumps(swarm.to_dict(), indent=4)}") diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 34b3f6e0..f8175cda 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -286,7 +286,7 @@ class Agent: >>> response = agent.run("Generate a report on the financials.") >>> print(response) >>> # Generate a report on the financials. - + >>> # Real-time streaming example >>> agent = Agent(llm=llm, max_loops=1, streaming_on=True) >>> response = agent.run("Tell me a long story.") # Will stream in real-time @@ -1061,12 +1061,16 @@ class Agent: response = self.call_llm( task=task_prompt, img=img, + current_loop=loop_count, *args, **kwargs, ) else: response = self.call_llm( - task=task_prompt, *args, **kwargs + task=task_prompt, + current_loop=loop_count, + *args, + **kwargs, ) # Parse the response from the agent with the output type @@ -2463,7 +2467,12 @@ class Agent: return None def call_llm( - self, task: str, img: Optional[str] = None, *args, **kwargs + self, + task: str, + img: Optional[str] = None, + current_loop: int = 0, + *args, + **kwargs, ) -> str: """ Calls the appropriate method on the `llm` object based on the given task. @@ -2486,55 +2495,72 @@ class Agent: try: # Set streaming parameter in LLM if streaming is enabled - if self.streaming_on and hasattr(self.llm, 'stream'): + if self.streaming_on and hasattr(self.llm, "stream"): original_stream = self.llm.stream self.llm.stream = True - + if img is not None: streaming_response = self.llm.run( task=task, img=img, *args, **kwargs ) else: - streaming_response = self.llm.run(task=task, *args, **kwargs) - + streaming_response = self.llm.run( + task=task, *args, **kwargs + ) + # If we get a streaming response, handle it with the new streaming panel - if hasattr(streaming_response, '__iter__') and not isinstance(streaming_response, str): + if hasattr( + streaming_response, "__iter__" + ) and not isinstance(streaming_response, str): # Check print_on parameter for different streaming behaviors if self.print_on is False: # Show raw streaming text without formatting panels chunks = [] - print(f"\n{self.agent_name}: ", end="", flush=True) + print( + f"\n{self.agent_name}: ", + end="", + flush=True, + ) for chunk in streaming_response: - if hasattr(chunk, 'choices') and chunk.choices[0].delta.content: - content = chunk.choices[0].delta.content - print(content, end="", flush=True) # Print raw streaming text + if ( + hasattr(chunk, "choices") + and chunk.choices[0].delta.content + ): + content = chunk.choices[ + 0 + ].delta.content + print( + content, end="", flush=True + ) # Print raw streaming text chunks.append(content) print() # New line after streaming completes - complete_response = ''.join(chunks) + complete_response = "".join(chunks) else: # Collect chunks for conversation saving collected_chunks = [] - + def on_chunk_received(chunk: str): """Callback to collect chunks as they arrive""" collected_chunks.append(chunk) # Optional: Save each chunk to conversation in real-time # This creates a more detailed conversation history if self.verbose: - logger.debug(f"Streaming chunk received: {chunk[:50]}...") - + logger.debug( + f"Streaming chunk received: {chunk[:50]}..." + ) + # Use the streaming panel to display and collect the response complete_response = formatter.print_streaming_panel( streaming_response, - title=f"šŸ¤– {self.agent_name} Streaming Response", + title=f"šŸ¤– Agent: {self.agent_name} Loops: {current_loop}", style="bold cyan", collect_chunks=True, - on_chunk_callback=on_chunk_received + on_chunk_callback=on_chunk_received, ) - + # Restore original stream setting self.llm.stream = original_stream - + # Return the complete response for further processing return complete_response else: diff --git a/swarms/utils/formatter.py b/swarms/utils/formatter.py index 0d608f6f..c224fbed 100644 --- a/swarms/utils/formatter.py +++ b/swarms/utils/formatter.py @@ -10,6 +10,23 @@ from rich.table import Table from rich.text import Text +def choose_random_color(): + import random + + colors = [ + "red", + "green", + "blue", + "yellow", + "magenta", + "cyan", + "white", + ] + random_color = random.choice(colors) + + return random_color + + class Formatter: """ A class for formatting and printing rich text to the console. @@ -32,18 +49,8 @@ class Formatter: title (str, optional): The title of the panel. Defaults to "". style (str, optional): The style of the panel. Defaults to "bold blue". """ - import random - - colors = [ - "red", - "green", - "blue", - "yellow", - "magenta", - "cyan", - "white", - ] - random_color = random.choice(colors) + random_color = choose_random_color() + panel = Panel( content, title=title, style=f"bold {random_color}" ) @@ -149,7 +156,7 @@ class Formatter: self, streaming_response, title: str = "šŸ¤– Agent Streaming Response", - style: str = "bold cyan", + style: str = choose_random_color(), collect_chunks: bool = False, on_chunk_callback: Optional[Callable] = None, ) -> str: @@ -167,18 +174,19 @@ class Formatter: Returns: str: The complete accumulated response text. """ + def create_streaming_panel(text_obj, is_complete=False): """Create panel with proper text wrapping using Rich's built-in capabilities""" panel_title = f"[bold cyan]{title}[/bold cyan]" if is_complete: panel_title += " [bold green]āœ…[/bold green]" - + # Add blinking cursor if still streaming display_text = Text.from_markup("") display_text.append_text(text_obj) if not is_complete: display_text.append("ā–Š", style="bold green blink") - + panel = Panel( display_text, title=panel_title, @@ -195,36 +203,54 @@ class Formatter: # TRUE streaming with Rich's automatic text wrapping with Live( - create_streaming_panel(streaming_text), - console=self.console, - refresh_per_second=20 + create_streaming_panel(streaming_text), + console=self.console, + refresh_per_second=20, ) as live: try: for part in streaming_response: - if hasattr(part, 'choices') and part.choices and part.choices[0].delta.content: + if ( + hasattr(part, "choices") + and part.choices + and part.choices[0].delta.content + ): # Add ONLY the new chunk to the Text object chunk = part.choices[0].delta.content streaming_text.append(chunk, style="white") complete_response += chunk - + # Collect chunks if requested if collect_chunks: chunks_collected.append(chunk) - + # Call chunk callback if provided if on_chunk_callback: on_chunk_callback(chunk) - + # Update display with new text - Rich handles all wrapping automatically - live.update(create_streaming_panel(streaming_text, is_complete=False)) - + live.update( + create_streaming_panel( + streaming_text, is_complete=False + ) + ) + # Final update to show completion - live.update(create_streaming_panel(streaming_text, is_complete=True)) - + live.update( + create_streaming_panel( + streaming_text, is_complete=True + ) + ) + except Exception as e: # Handle any streaming errors gracefully - streaming_text.append(f"\n[Error: {str(e)}]", style="bold red") - live.update(create_streaming_panel(streaming_text, is_complete=True)) + streaming_text.append( + f"\n[Error: {str(e)}]", style="bold red" + ) + live.update( + create_streaming_panel( + streaming_text, is_complete=True + ) + ) return complete_response diff --git a/swarms/utils/litellm_wrapper.py b/swarms/utils/litellm_wrapper.py index 840ec073..01392b34 100644 --- a/swarms/utils/litellm_wrapper.py +++ b/swarms/utils/litellm_wrapper.py @@ -452,7 +452,7 @@ class LiteLLM: # Handle streaming response if self.stream: return response # Return the streaming generator directly - + # Handle tool-based response elif self.tools_list_dictionary is not None: return self.output_for_tools(response) From 954c2b520ede754ce8ec432a2bf127f1f96b3eba Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Sun, 29 Jun 2025 22:54:59 -0700 Subject: [PATCH 24/86] model providers in swarms examples --- docs/mkdocs.yml | 56 +++---- docs/swarms/examples/model_providers.md | 171 +++++++++++++++++++++ examples/multi_modal/multimodal_example.py | 10 +- stream_example.py | 7 +- swarms/structs/agent.py | 14 +- swarms/tools/base_tool.py | 4 +- swarms/utils/formatter.py | 2 +- 7 files changed, 215 insertions(+), 49 deletions(-) create mode 100644 docs/swarms/examples/model_providers.md diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index dbbb8924..5e8b06e9 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -52,29 +52,17 @@ extra: link: https://x.com/swarms_corp - icon: fontawesome/brands/github link: https://github.com/kyegomez/swarms - - icon: fontawesome/brands/twitter - link: https://x.com/swarms_corp - icon: fontawesome/brands/discord link: https://discord.gg/jM3Z6M9uMq + - icon: fontawesome/brands/youtube + link: https://www.youtube.com/@kyegomez3242 + - icon: fontawesome/brands/linkedin + link: https://www.linkedin.com/company/swarms-corp/ analytics: provider: google property: G-MPE9C65596 - # alternate: - # - name: English - # link: / - # lang: en - # - name: 简体中文 - # link: /zh/ - # lang: zh - # - name: ę—„ęœ¬čŖž - # link: /ja/ - # lang: ja - # - name: ķ•œźµ­ģ–“ - # link: /ko/ - # lang: ko - theme: name: material custom_dir: overrides @@ -290,20 +278,7 @@ nav: - Swarm Ecosystem: "swarms/concept/swarm_ecosystem.md" - Swarms Products: "swarms/products.md" - - Contributors: - - Swarms Framework Architecture: "swarms/concept/framework_architecture.md" - - Bounty Program: "corporate/bounty_program.md" - - Contributing to Documentation: "contributors/docs.md" - - Contributing Tools/Custom Plugins for Swarms: "contributors/tools.md" - - Contributing: - - Contributing: "swarms/contributing.md" - - Tests: "swarms/framework/test.md" - - Code Cleanliness: "swarms/framework/code_cleanliness.md" - - Philosophy: "swarms/concept/philosophy.md" - - Changelog: - - Swarms 5.6.8: "swarms/changelog/5_6_8.md" - - Swarms 5.8.1: "swarms/changelog/5_8_1.md" - - Swarms 5.9.2: "swarms/changelog/changelog_new.md" + - Examples: - Overview: "examples/index.md" @@ -320,7 +295,8 @@ nav: - Agent with Multiple Images: "swarms/examples/multiple_images.md" - Agents with Vision and Tool Usage: "swarms/examples/vision_tools.md" - Gradio Chat Interface: "swarms/ui/main.md" - - Various Model Providers: + - Model Providers: + - Overview: "swarms/examples/model_providers.md" - OpenAI: "swarms/examples/openai_example.md" - Anthropic: "swarms/examples/claude.md" - Groq: "swarms/examples/groq.md" @@ -424,8 +400,24 @@ nav: - Overview: "swarms_rs/overview.md" - Agents: "swarms_rs/agents.md" - - Resources: + + - Contributors: - Overview: "governance/main.md" + - Swarms Framework Architecture: "swarms/concept/framework_architecture.md" + - Bounty Program: "corporate/bounty_program.md" + - Contributing to Documentation: "contributors/docs.md" + - Contributing Tools/Custom Plugins for Swarms: "contributors/tools.md" + - Contributing: + - Contributing: "swarms/contributing.md" + - Tests: "swarms/framework/test.md" + - Code Cleanliness: "swarms/framework/code_cleanliness.md" + - Philosophy: "swarms/concept/philosophy.md" + - Changelog: + - Swarms 5.6.8: "swarms/changelog/5_6_8.md" + - Swarms 5.8.1: "swarms/changelog/5_8_1.md" + - Swarms 5.9.2: "swarms/changelog/changelog_new.md" + + # - Tokenomics: "web3/token.md" diff --git a/docs/swarms/examples/model_providers.md b/docs/swarms/examples/model_providers.md new file mode 100644 index 00000000..9b739bab --- /dev/null +++ b/docs/swarms/examples/model_providers.md @@ -0,0 +1,171 @@ +# Model Providers Overview + +Swarms supports a vast array of model providers, giving you the flexibility to choose the best model for your specific use case. Whether you need high-performance inference, cost-effective solutions, or specialized capabilities, Swarms has you covered. + +## Supported Model Providers + +| Provider | Description | Documentation | +|----------|-------------|---------------| +| **OpenAI** | Industry-leading language models including GPT-4, GPT-4o, and GPT-4o-mini. Perfect for general-purpose tasks, creative writing, and complex reasoning. | [OpenAI Integration](openai_example.md) | +| **Anthropic/Claude** | Advanced AI models known for their safety, helpfulness, and reasoning capabilities. Claude models excel at analysis, coding, and creative tasks. | [Claude Integration](claude.md) | +| **Groq** | Ultra-fast inference platform offering real-time AI responses. Ideal for applications requiring low latency and high throughput. | [Groq Integration](groq.md) | +| **Cohere** | Enterprise-grade language models with strong performance on business applications, text generation, and semantic search. | [Cohere Integration](cohere.md) | +| **DeepSeek** | Advanced reasoning models including the DeepSeek Reasoner (R1). Excellent for complex problem-solving and analytical tasks. | [DeepSeek Integration](deepseek.md) | +| **Ollama** | Local model deployment platform allowing you to run open-source models on your own infrastructure. No API keys required. | [Ollama Integration](ollama.md) | +| **OpenRouter** | Unified API gateway providing access to hundreds of models from various providers through a single interface. | [OpenRouter Integration](openrouter.md) | +| **XAI** | xAI's Grok models offering unique capabilities for research, analysis, and creative tasks with advanced reasoning abilities. | [XAI Integration](xai.md) | +| **vLLM** | High-performance inference library for serving large language models with optimized memory usage and throughput. | [vLLM Integration](vllm_integration.md) | +| **Llama4** | Meta's latest open-source language models including Llama-4-Maverick and Llama-4-Scout variants with expert routing capabilities. | [Llama4 Integration](llama4.md) | + +## Quick Start + +All model providers follow a consistent pattern in Swarms. Here's the basic template: + +```python +from swarms import Agent +import os +from dotenv import load_dotenv + +load_dotenv() + +# Initialize agent with your chosen model +agent = Agent( + agent_name="Your-Agent-Name", + model_name="gpt-4o-mini", # Varies by provider + system_prompt="Your system prompt here", + agent_description="Description of what your agent does.", +) + +# Run your agent +response = agent.run("Your query here") +``` + +## Model Selection Guide + +### For High-Performance Applications + +- **OpenAI GPT-4o**: Best overall performance and reasoning + +- **Anthropic Claude**: Excellent safety and analysis capabilities + +- **DeepSeek R1**: Advanced reasoning and problem-solving + +### For Cost-Effective Solutions + +- **OpenAI GPT-4o-mini**: Great performance at lower cost + +- **Ollama**: Free local deployment + +- **OpenRouter**: Access to cost-effective models + +### For Real-Time Applications + +- **Groq**: Ultra-fast inference + +- **vLLM**: Optimized for high throughput + +### For Specialized Tasks + +- **Llama4**: Expert routing for complex workflows + +- **XAI Grok**: Advanced research capabilities + +- **Cohere**: Strong business applications + +## Environment Setup + +Most providers require API keys. Add them to your `.env` file: + +```bash +# OpenAI +OPENAI_API_KEY=your_openai_key + +# Anthropic +ANTHROPIC_API_KEY=your_anthropic_key + +# Groq +GROQ_API_KEY=your_groq_key + +# Cohere +COHERE_API_KEY=your_cohere_key + +# DeepSeek +DEEPSEEK_API_KEY=your_deepseek_key + +# OpenRouter +OPENROUTER_API_KEY=your_openrouter_key + +# XAI +XAI_API_KEY=your_xai_key +``` + +!!! note "No API Key Required" + Ollama and vLLM can be run locally without API keys, making them perfect for development and testing. + +## Advanced Features + +### Multi-Model Workflows + +Swarms allows you to create workflows that use different models for different tasks: + +```python +from swarms import Agent, ConcurrentWorkflow + +# Research agent using Claude for analysis +research_agent = Agent( + agent_name="Research-Agent", + model_name="claude-3-sonnet-20240229", + system_prompt="You are a research expert." +) + +# Creative agent using GPT-4o for content generation +creative_agent = Agent( + agent_name="Creative-Agent", + model_name="gpt-4o", + system_prompt="You are a creative content expert." +) + +# Workflow combining both agents +workflow = ConcurrentWorkflow( + name="Research-Creative-Workflow", + agents=[research_agent, creative_agent] +) +``` + +### Model Routing + +Automatically route tasks to the most appropriate model: + +```python +from swarms import Agent, ModelRouter + +# Define model preferences for different task types +model_router = ModelRouter( + models={ + "analysis": "claude-3-sonnet-20240229", + "creative": "gpt-4o", + "fast": "gpt-4o-mini", + "local": "ollama/llama2" + } +) + +# Agent will automatically choose the best model +agent = Agent( + agent_name="Smart-Agent", + llm=model_router, + system_prompt="You are a versatile assistant." +) +``` + +## Getting Help + +- **Documentation**: Each provider has detailed documentation with examples + +- **Community**: Join the Swarms community for support and best practices + +- **Issues**: Report bugs and request features on GitHub + +- **Discussions**: Share your use cases and learn from others + +!!! success "Ready to Get Started?" + Choose a model provider from the table above and follow the detailed integration guide. Each provider offers unique capabilities that can enhance your Swarms applications. diff --git a/examples/multi_modal/multimodal_example.py b/examples/multi_modal/multimodal_example.py index 29060e96..19a2c996 100644 --- a/examples/multi_modal/multimodal_example.py +++ b/examples/multi_modal/multimodal_example.py @@ -1,18 +1,16 @@ -import json import logging from swarms.structs import Agent from swarms.prompts.logistics import ( Quality_Control_Agent_Prompt, ) -from swarms import BaseTool # Set up debug logging logging.basicConfig(level=logging.DEBUG) # Image for analysis -# factory_image="image.png" # normal image of a factory +# factory_image="image.png" # normal image of a factory -factory_image = "image2.png" # image of a burning factory +factory_image = "image2.png" # image of a burning factory def security_analysis(danger_level: str) -> str: @@ -52,13 +50,11 @@ def security_analysis(danger_level: str) -> str: 🚨 Operations may need to be suspended Recommendations: Immediate intervention required, evacuate if necessary, implement emergency protocols, and conduct thorough security review.""" - + else: return f"ERROR: Invalid danger level '{danger_level}'. Must be 'low', 'medium', or 'high'." - - # Custom system prompt that includes tool usage custom_system_prompt = f""" {Quality_Control_Agent_Prompt} diff --git a/stream_example.py b/stream_example.py index f7e3bca1..bc467691 100644 --- a/stream_example.py +++ b/stream_example.py @@ -7,8 +7,11 @@ agent = Agent( streaming_on=True, # šŸ”„ This enables real streaming! max_loops=1, print_on=True, # By Default its False, raw streaming !! + output_type="all", ) # This will now stream in real-time with beautiful UI! -response = agent.run("Tell me a detailed story about Humanity colonizing the stars") -print(response) \ No newline at end of file +response = agent.run( + "Tell me a detailed story about Humanity colonizing the stars" +) +print(response) diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index f8175cda..87eb131d 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -641,11 +641,15 @@ class Agent: ) def short_memory_init(self): - if ( - self.agent_name is not None - or self.agent_description is not None - ): - prompt = f"\n Your Name: {self.agent_name} \n\n Your Description: {self.agent_description} \n\n {self.system_prompt}" + prompt = "" + + # Add agent name, description, and instructions to the prompt + if self.agent_name is not None: + prompt += f"\n Name: {self.agent_name}" + elif self.agent_description is not None: + prompt += f"\n Description: {self.agent_description}" + elif self.system_prompt is not None: + prompt += f"\n Instructions: {self.system_prompt}" else: prompt = self.system_prompt diff --git a/swarms/tools/base_tool.py b/swarms/tools/base_tool.py index 0aa57d44..af08f11e 100644 --- a/swarms/tools/base_tool.py +++ b/swarms/tools/base_tool.py @@ -2226,8 +2226,8 @@ class BaseTool(BaseModel): # Handle None API response gracefully by returning empty results if api_response is None: self._log_if_verbose( - "warning", - "API response is None, returning empty results. This may indicate the LLM did not return a valid response." + "warning", + "API response is None, returning empty results. This may indicate the LLM did not return a valid response.", ) return [] if not return_as_string else [] diff --git a/swarms/utils/formatter.py b/swarms/utils/formatter.py index c224fbed..5a4b8c2e 100644 --- a/swarms/utils/formatter.py +++ b/swarms/utils/formatter.py @@ -50,7 +50,7 @@ class Formatter: style (str, optional): The style of the panel. Defaults to "bold blue". """ random_color = choose_random_color() - + panel = Panel( content, title=title, style=f"bold {random_color}" ) From 3ef0d5f7d039d8dd51a3552d756588ed6cb4b179 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Sun, 29 Jun 2025 23:08:13 -0700 Subject: [PATCH 25/86] contributors guide --- docs/contributors/main.md | 223 ++++++++++++++++++++++++++++++++++++++ docs/mkdocs.yml | 8 +- 2 files changed, 228 insertions(+), 3 deletions(-) create mode 100644 docs/contributors/main.md diff --git a/docs/contributors/main.md b/docs/contributors/main.md new file mode 100644 index 00000000..5f0fb9c8 --- /dev/null +++ b/docs/contributors/main.md @@ -0,0 +1,223 @@ +# Contributing to Swarms + +## Building the Infrastructure for the World's Autonomous Economy + +Multi-agent collaboration is the most important technology in human history. It will reshape civilization by enabling billions of autonomous agents to coordinate and solve problems at unprecedented scale. + +!!! success "The Foundation of Tomorrow" + **Swarms** is the foundational infrastructure powering this autonomous economy. By contributing, you're building the systems that will enable the next generation of intelligent automation. + +### What You're Building + +=== "Autonomous Systems" + **Autonomous Resource Allocation** + + Global supply chains and energy distribution optimized in real-time + +=== "Intelligence Networks" + **Distributed Decision Making** + + Collaborative intelligence networks across industries and governments + +=== "Smart Markets" + **Self-Organizing Markets** + + Agent-driven marketplaces that automatically balance supply and demand + +=== "Problem Solving" + **Collaborative Problem Solving** + + Massive agent swarms tackling climate change, disease, and scientific discovery + +=== "Infrastructure" + **Adaptive Infrastructure** + + Self-healing systems that evolve without human intervention + +--- + +## Why Contribute to Swarms? + +### :material-rocket-launch: Shape the Future of Civilization + +!!! abstract "Your Impact" + - Define standards for multi-agent communication protocols + - Build architectural patterns for distributed intelligence systems + - Create frameworks for deploying agent swarms in production + - Establish ethical guidelines for autonomous agent collaboration + +### :material-trophy: Recognition and Professional Development + +!!! tip "Immediate Recognition" + - **Social Media Features** - All merged PRs showcased publicly + - **Bounty Programs** - Financial rewards for high-impact contributions + - **Fast-Track Hiring** - Priority consideration for core team positions + - **Community Spotlights** - Regular recognition and acknowledgments + +!!! info "Career Benefits" + - Multi-agent expertise highly valued by AI industry + - Portfolio demonstrates cutting-edge technical skills + - Direct networking with leading researchers and companies + - Thought leadership opportunities in emerging field + +### :material-brain: Technical Expertise Development + +Master cutting-edge technologies: + +| Technology Area | Skills You'll Develop | +|----------------|----------------------| +| **Swarm Intelligence** | Design sophisticated agent coordination mechanisms | +| **Distributed Computing** | Build scalable architectures for thousands of agents | +| **Communication Protocols** | Create novel interaction patterns | +| **Production AI** | Deploy and orchestrate enterprise-scale systems | +| **Research Implementation** | Turn cutting-edge papers into working code | + +### :material-account-group: Research Community Access + +!!! note "Collaborative Environment" + - Work with experts from academic institutions and industry + - Regular technical seminars and research discussions + - Structured mentorship from experienced contributors + - Applied research opportunities with real-world impact + +--- + +## Contribution Opportunities + +=== "New Contributors" + ### :material-school: Perfect for Getting Started + + - **Documentation** - Improve guides, tutorials, and API references + - **Bug Reports** - Identify and document issues + - **Code Quality** - Participate in testing and review processes + - **Community Support** - Help users in forums and discussions + +=== "Experienced Developers" + ### :material-code-braces: Advanced Technical Work + + - **Core Architecture** - Design fundamental system components + - **Performance Optimization** - Enhance coordination and communication efficiency + - **Research Implementation** - Turn cutting-edge papers into working code + - **Integration Development** - Build connections with AI tools and platforms + +=== "Researchers" + ### :material-flask: Research and Innovation + + - **Algorithm Development** - Implement novel multi-agent algorithms + - **Experimental Frameworks** - Create evaluation and benchmarking tools + - **Theoretical Contributions** - Develop research documentation and frameworks + - **Academic Collaboration** - Partner on funded research projects + +--- + +## How to Contribute + +### Step 1: Get Started + +!!! info "Essential Resources" + [:material-book-open-page-variant: **Documentation**](https://docs.swarms.world/en/latest/){ .md-button .md-button--primary } + [:material-github: **GitHub Repository**](https://github.com/kyegomez/swarms){ .md-button } + [:material-chat: **Community Channels**](#){ .md-button } + +### Step 2: Find Your Path + +```mermaid +graph TD + A[Choose Your Path] --> B[Browse Issues] + A --> C[Review Roadmap] + A --> D[Propose Ideas] + B --> E[good first issue] + B --> F[help wanted] + C --> G[Core Features] + C --> H[Research Areas] + D --> I[Discussion Forums] +``` + +### Step 3: Make Impact + +1. **Fork & Setup** - Configure your development environment +2. **Develop** - Create your contribution +3. **Submit** - Open a pull request +4. **Collaborate** - Work with maintainers +5. **Celebrate** - See your work recognized + +--- + +## Recognition Framework + +### :material-flash: Immediate Benefits + +!!! success "Instant Recognition" + | Benefit | Description | + |---------|-------------| + | **Social Media Features** | Every merged PR showcased publicly | + | **Community Recognition** | Contributor badges and documentation credits | + | **Professional References** | Formal acknowledgment for portfolios | + | **Direct Mentorship** | Access to core team guidance | + +### :material-trending-up: Long-term Opportunities + +!!! tip "Career Growth" + - **Team Positions** - Fast-track consideration for core team roles + - **Conference Speaking** - Present work at AI conferences and events + - **Industry Connections** - Network with leading AI organizations + - **Research Collaboration** - Partner with academic institutions + +--- + +## Societal Impact + +!!! abstract "Building Solutions for Humanity" + Swarms enables technology that addresses critical challenges: + + === "Research" + **Scientific Research** + + Accelerate collaborative research and discovery across disciplines + + === "Healthcare" + **Healthcare Innovation** + + Support drug discovery and personalized medicine development + + === "Environment" + **Environmental Solutions** + + Monitor climate and optimize sustainability initiatives + + === "Education" + **Educational Technology** + + Create adaptive learning systems for personalized education + + === "Economy" + **Economic Innovation** + + Generate new opportunities and efficiency improvements + +--- + +## Get Involved + +### :material-link: Connect With Us + +!!! info "Join the Community" + [:material-github: **GitHub Repository**](https://github.com/kyegomez/swarms){ .md-button .md-button--primary } + [:material-book: **Documentation**](https://docs.swarms.world/en/latest/){ .md-button } + [:material-forum: **Community Forums**](#){ .md-button } + +--- + +!!! warning "The Future is Now" + Multi-agent collaboration will define the next century of human progress. The autonomous economy depends on the infrastructure we build today. + +!!! success "Your Mission" + Your contribution to Swarms helps create the foundation for billions of autonomous agents working together to solve humanity's greatest challenges. + + **Join us in building the most important technology of our time.** + +--- + +
+*Built with :material-heart: by the global Swarms community* +
\ No newline at end of file diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 5e8b06e9..2c867cb3 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -402,20 +402,22 @@ nav: - Contributors: - - Overview: "governance/main.md" + - Overview: "contributors/main.md" - Swarms Framework Architecture: "swarms/concept/framework_architecture.md" - Bounty Program: "corporate/bounty_program.md" - - Contributing to Documentation: "contributors/docs.md" - - Contributing Tools/Custom Plugins for Swarms: "contributors/tools.md" - Contributing: - Contributing: "swarms/contributing.md" - Tests: "swarms/framework/test.md" - Code Cleanliness: "swarms/framework/code_cleanliness.md" - Philosophy: "swarms/concept/philosophy.md" + - Contributing Tools & Custom Plugins: "contributors/tools.md" + - Contributing to Documentation: "contributors/docs.md" - Changelog: - Swarms 5.6.8: "swarms/changelog/5_6_8.md" - Swarms 5.8.1: "swarms/changelog/5_8_1.md" - Swarms 5.9.2: "swarms/changelog/changelog_new.md" + - Links and Resources: + - Overview: "governance/main.md" # - Tokenomics: "web3/token.md" From 47cea29b4a71cbca43f800f49d6ca99994ef2f4d Mon Sep 17 00:00:00 2001 From: Kye Gomez <98760976+kyegomez@users.noreply.github.com> Date: Sun, 29 Jun 2025 23:13:11 -0700 Subject: [PATCH 26/86] Update README.md --- README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/README.md b/README.md index fbcfbde2..7dc08e41 100644 --- a/README.md +++ b/README.md @@ -1997,6 +1997,16 @@ Swarms is an open-source project, and contributions are VERY welcome. If you wan If you use **swarms** in your research, please cite the project by referencing the metadata in [CITATION.cff](./CITATION.cff). +```bibtex +@software{SWARMS_2O22, + author = {Kye Gomez, Pliny, Swarms Community}, + title = {Swarms: Production-Grade Multi-Agent Infrastructure Platform}, + year = {2022}, + url = {https://github.com/kyegomez/swarms}, + note = {docs.swarms.world}, + version = {latest} +} +``` # License From 1f31c769bee0dce882c75c864695ad35863d57bf Mon Sep 17 00:00:00 2001 From: Kye Gomez <98760976+kyegomez@users.noreply.github.com> Date: Sun, 29 Jun 2025 23:13:41 -0700 Subject: [PATCH 27/86] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7dc08e41..31f9d709 100644 --- a/README.md +++ b/README.md @@ -1999,7 +1999,7 @@ If you use **swarms** in your research, please cite the project by referencing t ```bibtex @software{SWARMS_2O22, - author = {Kye Gomez, Pliny, Swarms Community}, + author = {Kye Gomez, Pliny, Harshal More, Swarms Community}, title = {Swarms: Production-Grade Multi-Agent Infrastructure Platform}, year = {2022}, url = {https://github.com/kyegomez/swarms}, From 06f8741f8fe4f87b0aeeed0547970318713cf26e Mon Sep 17 00:00:00 2001 From: Kye Gomez <98760976+kyegomez@users.noreply.github.com> Date: Sun, 29 Jun 2025 23:14:30 -0700 Subject: [PATCH 28/86] Update README.md --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 31f9d709..43b3d492 100644 --- a/README.md +++ b/README.md @@ -1998,12 +1998,12 @@ Swarms is an open-source project, and contributions are VERY welcome. If you wan If you use **swarms** in your research, please cite the project by referencing the metadata in [CITATION.cff](./CITATION.cff). ```bibtex -@software{SWARMS_2O22, - author = {Kye Gomez, Pliny, Harshal More, Swarms Community}, - title = {Swarms: Production-Grade Multi-Agent Infrastructure Platform}, - year = {2022}, - url = {https://github.com/kyegomez/swarms}, - note = {docs.swarms.world}, +@misc{SWARMS_2022, + author = {Gomez, Kye and Pliny and More, Harshal and Swarms Community}, + title = {{Swarms: Production-Grade Multi-Agent Infrastructure Platform}}, + year = {2022}, + howpublished = {\url{https://github.com/kyegomez/swarms}}, + note = {Documentation available at \url{https://docs.swarms.world}}, version = {latest} } ``` From ec43ba46eaf4da0987341bec2a5de8957d9635ae Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Sun, 29 Jun 2025 23:23:41 -0700 Subject: [PATCH 29/86] docs contributors --- docs/contributors/main.md | 4 +--- docs/mkdocs.yml | 18 +++++++++--------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/docs/contributors/main.md b/docs/contributors/main.md index 5f0fb9c8..e69ec8a3 100644 --- a/docs/contributors/main.md +++ b/docs/contributors/main.md @@ -1,6 +1,4 @@ -# Contributing to Swarms - -## Building the Infrastructure for the World's Autonomous Economy +# Contributing to Swarms: Building the Infrastructure for The Agentic Economy Multi-agent collaboration is the most important technology in human history. It will reshape civilization by enabling billions of autonomous agents to coordinate and solve problems at unprecedented scale. diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 2c867cb3..d815edf8 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -403,21 +403,21 @@ nav: - Contributors: - Overview: "contributors/main.md" - - Swarms Framework Architecture: "swarms/concept/framework_architecture.md" - Bounty Program: "corporate/bounty_program.md" - - Contributing: - - Contributing: "swarms/contributing.md" - - Tests: "swarms/framework/test.md" + - Links & Resources: "governance/main.md" + - Learn more: + - Swarms Framework Architecture: "swarms/concept/framework_architecture.md" - Code Cleanliness: "swarms/framework/code_cleanliness.md" - - Philosophy: "swarms/concept/philosophy.md" - - Contributing Tools & Custom Plugins: "contributors/tools.md" - - Contributing to Documentation: "contributors/docs.md" + - Code Philosophy: "swarms/concept/philosophy.md" + - Contributing: + - Overview: "swarms/contributing.md" + - Adding New Tests: "swarms/framework/test.md" + - Add New Tools & Custom Plugins: "contributors/tools.md" + - Add New Documentations: "contributors/docs.md" - Changelog: - Swarms 5.6.8: "swarms/changelog/5_6_8.md" - Swarms 5.8.1: "swarms/changelog/5_8_1.md" - Swarms 5.9.2: "swarms/changelog/changelog_new.md" - - Links and Resources: - - Overview: "governance/main.md" # - Tokenomics: "web3/token.md" From 45ac28246184c47088782dd12e77e9ad9981888b Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Sun, 29 Jun 2025 23:25:44 -0700 Subject: [PATCH 30/86] contributors docs sections --- docs/mkdocs.yml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index d815edf8..dc0975ff 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -405,12 +405,11 @@ nav: - Overview: "contributors/main.md" - Bounty Program: "corporate/bounty_program.md" - Links & Resources: "governance/main.md" - - Learn more: - - Swarms Framework Architecture: "swarms/concept/framework_architecture.md" - - Code Cleanliness: "swarms/framework/code_cleanliness.md" - - Code Philosophy: "swarms/concept/philosophy.md" + - Learn More: + - Framework Architecture Overview: "swarms/concept/framework_architecture.md" + - Code Quality Standards: "swarms/framework/code_cleanliness.md" + - Agentic Development Philosophy: "swarms/concept/philosophy.md" - Contributing: - - Overview: "swarms/contributing.md" - Adding New Tests: "swarms/framework/test.md" - Add New Tools & Custom Plugins: "contributors/tools.md" - Add New Documentations: "contributors/docs.md" From a070d447ea0cb4b0412ba8bfb618f70a50b51b6b Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Sun, 29 Jun 2025 23:29:54 -0700 Subject: [PATCH 31/86] contributors section --- docs/mkdocs.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index dc0975ff..9ca82485 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -406,13 +406,13 @@ nav: - Bounty Program: "corporate/bounty_program.md" - Links & Resources: "governance/main.md" - Learn More: - - Framework Architecture Overview: "swarms/concept/framework_architecture.md" - - Code Quality Standards: "swarms/framework/code_cleanliness.md" - - Agentic Development Philosophy: "swarms/concept/philosophy.md" + - Understanding Swarms Architecture: "swarms/concept/framework_architecture.md" + - Code Style Guide & Best Practices: "swarms/framework/code_cleanliness.md" + - Our Development Philosophy & Principles: "swarms/concept/philosophy.md" - Contributing: - - Adding New Tests: "swarms/framework/test.md" - - Add New Tools & Custom Plugins: "contributors/tools.md" - - Add New Documentations: "contributors/docs.md" + - Writing and Adding Tests: "swarms/framework/test.md" + - Creating Custom Tools & Plugins: "contributors/tools.md" + - Writing Documentation: "contributors/docs.md" - Changelog: - Swarms 5.6.8: "swarms/changelog/5_6_8.md" - Swarms 5.8.1: "swarms/changelog/5_8_1.md" From 6084ebc7b5483a24918ca374edfe7772a50de813 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Mon, 30 Jun 2025 10:45:46 -0700 Subject: [PATCH 32/86] readme --- README.md | 1825 ++++----------------------------------------- stream_example.py | 2 +- 2 files changed, 152 insertions(+), 1675 deletions(-) diff --git a/README.md b/README.md index 43b3d492..8dbaa16d 100644 --- a/README.md +++ b/README.md @@ -171,6 +171,8 @@ $ pip install -e . ## Environment Configuration +[Learn more about the environment configuration here](https://docs.swarms.world/en/latest/swarms/install/env/) + ``` OPENAI_API_KEY="" WORKSPACE_DIR="agent_workspace" @@ -178,1783 +180,258 @@ ANTHROPIC_API_KEY="" GROQ_API_KEY="" ``` -- [Learn more about the environment configuration here](https://docs.swarms.world/en/latest/swarms/install/env/) - ---- - -## `Agent` Class -The `Agent` class is a customizable autonomous component of the Swarms framework that integrates LLMs, tools, and long-term memory. Its `run` method processes text tasks and optionally handles image inputs through vision-language models. - -```mermaid -graph TD - A[Agent] --> B[Initialize] - B --> C[Process Task] - C --> D[Execute Tools] - D --> E[Generate Response] - E --> F[Return Output] - C --> G[Long-term Memory] - G --> C -``` +### šŸ¤– Your First Agent - - -## Simple Example +An **Agent** is the fundamental building block of a swarm—an autonomous entity powered by a large language model (LLM). ```python from swarms import Agent +# Initialize a new agent agent = Agent( - agent_name="Stock-Analysis-Agent", - model_name="gpt-4o-mini", - max_loops="auto", - interactive=True, - streaming_on=True, + model_name="gpt-4o-mini", # Specify the LLM + max_loops=1, # Set the number of interactions + interactive=True, # Enable interactive mode for real-time feedback ) -agent.run("What is the current market trend for tech stocks?") - +# Run the agent with a task +agent.run("What are the key benefits of using a multi-agent system?") ``` -### Settings and Customization - -The `Agent` class offers a range of settings to tailor its behavior to specific needs. Some key settings include: - -| Setting | Description | Default Value | -| --- | --- | --- | -| `agent_name` | The name of the agent. | "DefaultAgent" | -| `system_prompt` | The system prompt to use for the agent. | "Default system prompt." | -| `llm` | The language model to use for processing tasks. | `OpenAIChat` instance | -| `max_loops` | The maximum number of loops to execute for a task. | 1 | -| `autosave` | Enables or disables autosaving of the agent's state. | False | -| `dashboard` | Enables or disables the dashboard for the agent. | False | -| `verbose` | Controls the verbosity of the agent's output. | False | -| `dynamic_temperature_enabled` | Enables or disables dynamic temperature adjustment for the language model. | False | -| `saved_state_path` | The path to save the agent's state. | "agent_state.json" | -| `user_name` | The username associated with the agent. | "default_user" | -| `retry_attempts` | The number of retry attempts for failed tasks. | 1 | -| `context_length` | The maximum length of the context to consider for tasks. | 200000 | -| `return_step_meta` | Controls whether to return step metadata in the output. | False | -| `output_type` | The type of output to return (e.g., "json", "string"). | "string" | +### šŸ¤ Your First Swarm: Multi-Agent Collaboration +A **Swarm** consists of multiple agents working together. This simple example creates a two-agent workflow for researching and writing a blog post. ```python -import os -from swarms import Agent +from swarms import Agent, SequentialWorkflow -from swarms.prompts.finance_agent_sys_prompt import ( - FINANCIAL_AGENT_SYS_PROMPT, -) -# Initialize the agent -agent = Agent( - agent_name="Financial-Analysis-Agent", - system_prompt=FINANCIAL_AGENT_SYS_PROMPT, +# Agent 1: The Researcher +researcher = Agent( + agent_name="Researcher", + system_prompt="Your job is to research the provided topic and provide a detailed summary.", model_name="gpt-4o-mini", - max_loops=1, - autosave=True, - dashboard=False, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="finance_agent.json", - user_name="swarms_corp", - retry_attempts=1, - context_length=200000, - return_step_meta=False, - output_type="string", - streaming_on=False, -) - - -agent.run( - "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria" -) - -``` ------ - -### Integrating RAG with Swarms for Enhanced Long-Term Memory - -`Agent` equipped with quasi-infinite long term memory using RAG (Relational Agent Graph) for advanced document understanding, analysis, and retrieval capabilities. - -**Mermaid Diagram for RAG Integration** -```mermaid -graph TD - A[Initialize Agent with RAG] --> B[Receive Task] - B --> C[Query Long-Term Memory] - C --> D[Process Task with Context] - D --> E[Generate Response] - E --> F[Update Long-Term Memory] - F --> G[Return Output] -``` - -```python -from swarms import Agent -from swarms.prompts.finance_agent_sys_prompt import ( - FINANCIAL_AGENT_SYS_PROMPT, ) -import os -from swarms_memory import ChromaDB - -# Initialize the ChromaDB client for long-term memory management -chromadb = ChromaDB( - metric="cosine", # Metric for similarity measurement - output_dir="finance_agent_rag", # Directory for storing RAG data - # docs_folder="artifacts", # Uncomment and specify the folder containing your documents -) - -# Initialize the agent with RAG capabilities -agent = Agent( - agent_name="Financial-Analysis-Agent", - system_prompt=FINANCIAL_AGENT_SYS_PROMPT, - agent_description="Agent creates a comprehensive financial analysis", +# Agent 2: The Writer +writer = Agent( + agent_name="Writer", + system_prompt="Your job is to take the research summary and write a beautiful, engaging blog post about it.", model_name="gpt-4o-mini", - max_loops="auto", # Auto-adjusts loops based on task complexity - autosave=True, # Automatically saves agent state - dashboard=False, # Disables dashboard for this example - verbose=True, # Enables verbose mode for detailed output - streaming_on=True, # Enables streaming for real-time processing - dynamic_temperature_enabled=True, # Dynamically adjusts temperature for optimal performance - saved_state_path="finance_agent.json", # Path to save agent state - user_name="swarms_corp", # User name for the agent - retry_attempts=3, # Number of retry attempts for failed tasks - context_length=200000, # Maximum length of the context to consider - long_term_memory=chromadb, # Integrates ChromaDB for long-term memory management - return_step_meta=False, - output_type="string", -) - -# Run the agent with a sample task -agent.run( - "What are the components of a startup's stock incentive equity plan" -) -``` - - -## Structured Outputs - -1. Create a tool schema -2. Create a function schema -3. Create a tool list dictionary -4. Initialize the agent -5. Run the agent -6. Print the output -7. Convert the output to a dictionary - -```python - -from dotenv import load_dotenv - -from swarms import Agent -from swarms.prompts.finance_agent_sys_prompt import ( - FINANCIAL_AGENT_SYS_PROMPT, -) -from swarms.utils.str_to_dict import str_to_dict - -load_dotenv() - -tools = [ - { - "type": "function", - "function": { - "name": "get_stock_price", - "description": "Retrieve the current stock price and related information for a specified company.", - "parameters": { - "type": "object", - "properties": { - "ticker": { - "type": "string", - "description": "The stock ticker symbol of the company, e.g. AAPL for Apple Inc.", - }, - "include_history": { - "type": "boolean", - "description": "Indicates whether to include historical price data along with the current price.", - }, - "time": { - "type": "string", - "format": "date-time", - "description": "Optional parameter to specify the time for which the stock data is requested, in ISO 8601 format.", - }, - }, - "required": [ - "ticker", - "include_history", - "time", - ], - }, - }, - } -] - - -# Initialize the agent -agent = Agent( - agent_name="Financial-Analysis-Agent", - agent_description="Personal finance advisor agent", - system_prompt=FINANCIAL_AGENT_SYS_PROMPT, - max_loops=1, - tools_list_dictionary=tools, -) - -out = agent.run( - "What is the current stock price for Apple Inc. (AAPL)? Include historical price data.", -) - -print(out) - -print(type(out)) - -print(str_to_dict(out)) - -print(type(str_to_dict(out))) -``` - -------- - -### Misc Agent Settings -We provide vast array of features to save agent states using json, yaml, toml, upload pdfs, batched jobs, and much more! - - -**Method Table** - -| Method | Description | -| --- | --- | -| `to_dict()` | Converts the agent object to a dictionary. | -| `to_toml()` | Converts the agent object to a TOML string. | -| `model_dump_json()` | Dumps the model to a JSON file. | -| `model_dump_yaml()` | Dumps the model to a YAML file. | -| `ingest_docs()` | Ingests documents into the agent's knowledge base. | -| `receive_message()` | Receives a message from a user and processes it. | -| `send_agent_message()` | Sends a message from the agent to a user. | -| `filtered_run()` | Runs the agent with a filtered system prompt. | -| `bulk_run()` | Runs the agent with multiple system prompts. | -| `add_memory()` | Adds a memory to the agent. | -| `check_available_tokens()` | Checks the number of available tokens for the agent. | -| `tokens_checks()` | Performs token checks for the agent. | -| `print_dashboard()` | Prints the dashboard of the agent. | -| `get_docs_from_doc_folders()` | Fetches all the documents from the doc folders. | - - - -```python -# # Convert the agent object to a dictionary -print(agent.to_dict()) -print(agent.to_toml()) -print(agent.model_dump_json()) -print(agent.model_dump_yaml()) - -# Ingest documents into the agent's knowledge base -("your_pdf_path.pdf") - -# Receive a message from a user and process it -agent.receive_message(name="agent_name", message="message") - -# Send a message from the agent to a user -agent.send_agent_message(agent_name="agent_name", message="message") - -# Ingest multiple documents into the agent's knowledge base -agent.ingest_docs("your_pdf_path.pdf", "your_csv_path.csv") - -# Run the agent with a filtered system prompt -agent.filtered_run( - "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?" ) -# Run the agent with multiple system prompts -agent.bulk_run( - [ - "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?", - "Another system prompt", - ] -) - -# Add a memory to the agent -agent.add_memory("Add a memory to the agent") - -# Check the number of available tokens for the agent -agent.check_available_tokens() - -# Perform token checks for the agent -agent.tokens_checks() - -# Print the dashboard of the agent -agent.print_dashboard() - -# Fetch all the documents from the doc folders -agent.get_docs_from_doc_folders() - -# Activate agent ops - -# Dump the model to a JSON file -agent.model_dump_json() -print(agent.to_toml()) - -``` - - - -### `Agent`with Pydantic BaseModel as Output Type -The following is an example of an agent that intakes a pydantic basemodel and outputs it at the same time: - -```python -from pydantic import BaseModel, Field -from swarms import Agent - - -# Initialize the schema for the person's information -class Schema(BaseModel): - name: str = Field(..., title="Name of the person") - agent: int = Field(..., title="Age of the person") - is_student: bool = Field(..., title="Whether the person is a student") - courses: list[str] = Field( - ..., title="List of courses the person is taking" - ) - - -# Convert the schema to a JSON string -tool_schema = Schema( - name="Tool Name", - agent=1, - is_student=True, - courses=["Course1", "Course2"], -) - -# Define the task to generate a person's information -task = "Generate a person's information based on the following schema:" - -# Initialize the agent -agent = Agent( - agent_name="Person Information Generator", - system_prompt=( - "Generate a person's information based on the following schema:" - ), - # Set the tool schema to the JSON string -- this is the key difference - tool_schema=tool_schema, - model_name="gpt-4o", - max_loops=3, - autosave=True, - dashboard=False, - streaming_on=True, - verbose=True, - interactive=True, - # Set the output type to the tool schema which is a BaseModel - output_type=tool_schema, # or dict, or str - metadata_output_type="json", - # List of schemas that the agent can handle - list_base_models=[tool_schema], - function_calling_format_type="OpenAI", - function_calling_type="json", # or soon yaml -) - -# Run the agent to generate the person's information -generated_data = agent.run(task) - -# Print the generated data -print(f"Generated data: {generated_data}") - - -``` - -### Multi Modal Autonomous Agent -Run the agent with multiple modalities useful for various real-world tasks in manufacturing, logistics, and health. - -```python -import os -from dotenv import load_dotenv -from swarms import Agent - -from swarm_models import GPT4VisionAPI - -# Load the environment variables -load_dotenv() - - -# Initialize the language model -llm = GPT4VisionAPI( - openai_api_key=os.environ.get("OPENAI_API_KEY"), - max_tokens=500, -) - -# Initialize the task -task = ( - "Analyze this image of an assembly line and identify any issues such as" - " misaligned parts, defects, or deviations from the standard assembly" - " process. If there is anything unsafe in the image, explain why it is" - " unsafe and how it could be improved." -) -img = "assembly_line.jpg" - -## Initialize the workflow -agent = Agent( - agent_name = "Multi-ModalAgent", - llm=llm, - max_loops="auto", - autosave=True, - dashboard=True, - multi_modal=True -) +# Create a sequential workflow where the researcher's output feeds into the writer's input +workflow = SequentialWorkflow(agents=[researcher, writer]) # Run the workflow on a task -agent.run(task, img) -``` ----- - - -### Local Agent `ToolAgent` -ToolAgent is a fully local agent that can use tools through JSON function calling. It intakes any open source model from huggingface and is extremely modular and plug in and play. We need help adding general support to all models soon. - - -```python -from pydantic import BaseModel, Field -from transformers import AutoModelForCausalLM, AutoTokenizer - -from swarms import ToolAgent -from swarms.tools.json_utils import base_model_to_json - -# Load the pre-trained model and tokenizer -model = AutoModelForCausalLM.from_pretrained( - "databricks/dolly-v2-12b", - load_in_4bit=True, - device_map="auto", -) -tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b") - - -# Initialize the schema for the person's information -class Schema(BaseModel): - name: str = Field(..., title="Name of the person") - agent: int = Field(..., title="Age of the person") - is_student: bool = Field( - ..., title="Whether the person is a student" - ) - courses: list[str] = Field( - ..., title="List of courses the person is taking" - ) - - -# Convert the schema to a JSON string -tool_schema = base_model_to_json(Schema) - -# Define the task to generate a person's information -task = ( - "Generate a person's information based on the following schema:" -) - -# Create an instance of the ToolAgent class -agent = ToolAgent( - name="dolly-function-agent", - description="An agent to create a child's data", - model=model, - tokenizer=tokenizer, - json_schema=tool_schema, -) - -# Run the agent to generate the person's information -generated_data = agent.run(task) - -# Print the generated data -print(f"Generated data: {generated_data}") - -``` - - -## Understanding Swarms - -A swarm refers to a group of more than two agents working collaboratively to achieve a common goal. These agents can be software entities, such as llms that interact with each other to perform complex tasks. The concept of a swarm is inspired by natural systems like ant colonies or bird flocks, where simple individual behaviors lead to complex group dynamics and problem-solving capabilities. - -```mermaid -graph TD - A[Swarm] --> B[Agent 1] - A --> C[Agent 2] - A --> D[Agent N] - B --> E[Task Processing] - C --> E - D --> E - E --> F[Result Aggregation] - F --> G[Final Output] -``` - -### How Swarm Architectures Facilitate Communication - -Swarm architectures are designed to establish and manage communication between agents within a swarm. These architectures define how agents interact, share information, and coordinate their actions to achieve the desired outcomes. Here are some key aspects of swarm architectures: - -1. **Hierarchical Communication**: In hierarchical swarms, communication flows from higher-level agents to lower-level agents. Higher-level agents act as coordinators, distributing tasks and aggregating results. This structure is efficient for tasks that require top-down control and decision-making. - -2. **Parallel Communication**: In parallel swarms, agents operate independently and communicate with each other as needed. This architecture is suitable for tasks that can be processed concurrently without dependencies, allowing for faster execution and scalability. - -3. **Sequential Communication**: Sequential swarms process tasks in a linear order, where each agent's output becomes the input for the next agent. This ensures that tasks with dependencies are handled in the correct sequence, maintaining the integrity of the workflow. - -```mermaid -graph LR - A[Hierarchical] --> D[Task Distribution] - B[Parallel] --> E[Concurrent Processing] - C[Sequential] --> F[Linear Processing] - D --> G[Results] - E --> G - F --> G -``` - -Swarm architectures leverage these communication patterns to ensure that agents work together efficiently, adapting to the specific requirements of the task at hand. By defining clear communication protocols and interaction models, swarm architectures enable the seamless orchestration of multiple agents, leading to enhanced performance and problem-solving capabilities. - - -| **Name** | **Description** | **Code Link** | **Use Cases** | -|-------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------| -| Hierarchical Swarms | A system where agents are organized in a hierarchy, with higher-level agents coordinating lower-level agents to achieve complex tasks. | [Code Link](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/#hierarchical-swarm) | Manufacturing process optimization, multi-level sales management, healthcare resource coordination | -| Agent Rearrange | A setup where agents rearrange themselves dynamically based on the task requirements and environmental conditions. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/) | Adaptive manufacturing lines, dynamic sales territory realignment, flexible healthcare staffing | -| Concurrent Workflows | Agents perform different tasks simultaneously, coordinating to complete a larger goal. | [Code Link](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/#concurrent-workflows) | Concurrent production lines, parallel sales operations, simultaneous patient care processes | -| Sequential Coordination | Agents perform tasks in a specific sequence, where the completion of one task triggers the start of the next. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/) | Step-by-step assembly lines, sequential sales processes, stepwise patient treatment workflows | -| Parallel Processing | Agents work on different parts of a task simultaneously to speed up the overall process. | [Code Link](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/#parallel-processing) | Parallel data processing in manufacturing, simultaneous sales analytics, concurrent medical tests | -| Mixture of Agents | A heterogeneous swarm where agents with different capabilities are combined to solve complex problems. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/moa/) | Financial forecasting, complex problem-solving requiring diverse skills | -| Graph Workflow | Agents collaborate in a directed acyclic graph (DAG) format to manage dependencies and parallel tasks. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/graph_workflow/) | AI-driven software development pipelines, complex project management | -| Group Chat | Agents engage in a chat-like interaction to reach decisions collaboratively. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/group_chat/) | Real-time collaborative decision-making, contract negotiations | -| Agent Registry | A centralized registry where agents are stored, retrieved, and invoked dynamically. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/agent_registry/) | Dynamic agent management, evolving recommendation engines | -| Spreadsheet Swarm | Manages tasks at scale, tracking agent outputs in a structured format like CSV files. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/spreadsheet_swarm/) | Large-scale marketing analytics, financial audits | -| Forest Swarm | A swarm structure that organizes agents in a tree-like hierarchy for complex decision-making processes. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/forest_swarm/) | Multi-stage workflows, hierarchical reinforcement learning | -| Swarm Router | Routes and chooses the swarm architecture based on the task requirements and available agents. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/swarm_router/) | Dynamic task routing, adaptive swarm architecture selection, optimized agent allocation | - - -### `SequentialWorkflow` - -The SequentialWorkflow in the Swarms framework enables sequential task execution across multiple Agent objects. Each agent's output serves as input for the next agent in the sequence, continuing until reaching the specified maximum number of loops (max_loops). This workflow is particularly well-suited for tasks requiring a specific order of operations, such as data processing pipelines. To learn more, visit: [Learn More](https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/) - -```python -import os -from swarms import Agent, SequentialWorkflow -from swarm_models import OpenAIChat - -# model = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY")) -company = "Nvidia" -# Get the OpenAI API key from the environment variable -api_key = os.getenv("GROQ_API_KEY") - -# Model -model = OpenAIChat( - openai_api_base="https://api.groq.com/openai/v1", - openai_api_key=api_key, - model_name="llama-3.1-70b-versatile", - temperature=0.1, -) - - -# Initialize the Managing Director agent -managing_director = Agent( - agent_name="Managing-Director", - system_prompt=f""" - As the Managing Director at Blackstone, your role is to oversee the entire investment analysis process for potential acquisitions. - Your responsibilities include: - 1. Setting the overall strategy and direction for the analysis - 2. Coordinating the efforts of the various team members and ensuring a comprehensive evaluation - 3. Reviewing the findings and recommendations from each team member - 4. Making the final decision on whether to proceed with the acquisition - - For the current potential acquisition of {company}, direct the tasks for the team to thoroughly analyze all aspects of the company, including its financials, industry position, technology, market potential, and regulatory compliance. Provide guidance and feedback as needed to ensure a rigorous and unbiased assessment. - """, - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="managing-director.json", -) - -# Initialize the Vice President of Finance -vp_finance = Agent( - agent_name="VP-Finance", - system_prompt=f""" - As the Vice President of Finance at Blackstone, your role is to lead the financial analysis of potential acquisitions. - For the current potential acquisition of {company}, your tasks include: - 1. Conducting a thorough review of {company}' financial statements, including income statements, balance sheets, and cash flow statements - 2. Analyzing key financial metrics such as revenue growth, profitability margins, liquidity ratios, and debt levels - 3. Assessing the company's historical financial performance and projecting future performance based on assumptions and market conditions - 4. Identifying any financial risks or red flags that could impact the acquisition decision - 5. Providing a detailed report on your findings and recommendations to the Managing Director - - Be sure to consider factors such as the sustainability of {company}' business model, the strength of its customer base, and its ability to generate consistent cash flows. Your analysis should be data-driven, objective, and aligned with Blackstone's investment criteria. - """, - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="vp-finance.json", -) - -# Initialize the Industry Analyst -industry_analyst = Agent( - agent_name="Industry-Analyst", - system_prompt=f""" - As the Industry Analyst at Blackstone, your role is to provide in-depth research and analysis on the industries and markets relevant to potential acquisitions. - For the current potential acquisition of {company}, your tasks include: - 1. Conducting a comprehensive analysis of the industrial robotics and automation solutions industry, including market size, growth rates, key trends, and future prospects - 2. Identifying the major players in the industry and assessing their market share, competitive strengths and weaknesses, and strategic positioning - 3. Evaluating {company}' competitive position within the industry, including its market share, differentiation, and competitive advantages - 4. Analyzing the key drivers and restraints for the industry, such as technological advancements, labor costs, regulatory changes, and economic conditions - 5. Identifying potential risks and opportunities for {company} based on the industry analysis, such as disruptive technologies, emerging markets, or shifts in customer preferences - - Your analysis should provide a clear and objective assessment of the attractiveness and future potential of the industrial robotics industry, as well as {company}' positioning within it. Consider both short-term and long-term factors, and provide evidence-based insights to inform the investment decision. - """, - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="industry-analyst.json", -) - -# Initialize the Technology Expert -tech_expert = Agent( - agent_name="Tech-Expert", - system_prompt=f""" - As the Technology Expert at Blackstone, your role is to assess the technological capabilities, competitive advantages, and potential risks of companies being considered for acquisition. - For the current potential acquisition of {company}, your tasks include: - 1. Conducting a deep dive into {company}' proprietary technologies, including its robotics platforms, automation software, and AI capabilities - 2. Assessing the uniqueness, scalability, and defensibility of {company}' technology stack and intellectual property - 3. Comparing {company}' technologies to those of its competitors and identifying any key differentiators or technology gaps - 4. Evaluating {company}' research and development capabilities, including its innovation pipeline, engineering talent, and R&D investments - 5. Identifying any potential technology risks or disruptive threats that could impact {company}' long-term competitiveness, such as emerging technologies or expiring patents - - Your analysis should provide a comprehensive assessment of {company}' technological strengths and weaknesses, as well as the sustainability of its competitive advantages. Consider both the current state of its technology and its future potential in light of industry trends and advancements. - """, - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="tech-expert.json", -) - -# Initialize the Market Researcher -market_researcher = Agent( - agent_name="Market-Researcher", - system_prompt=f""" - As the Market Researcher at Blackstone, your role is to analyze the target company's customer base, market share, and growth potential to assess the commercial viability and attractiveness of the potential acquisition. - For the current potential acquisition of {company}, your tasks include: - 1. Analyzing {company}' current customer base, including customer segmentation, concentration risk, and retention rates - 2. Assessing {company}' market share within its target markets and identifying key factors driving its market position - 3. Conducting a detailed market sizing and segmentation analysis for the industrial robotics and automation markets, including identifying high-growth segments and emerging opportunities - 4. Evaluating the demand drivers and sales cycles for {company}' products and services, and identifying any potential risks or limitations to adoption - 5. Developing financial projections and estimates for {company}' revenue growth potential based on the market analysis and assumptions around market share and penetration - - Your analysis should provide a data-driven assessment of the market opportunity for {company} and the feasibility of achieving our investment return targets. Consider both bottom-up and top-down market perspectives, and identify any key sensitivities or assumptions in your projections. - """, - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="market-researcher.json", -) - -# Initialize the Regulatory Specialist -regulatory_specialist = Agent( - agent_name="Regulatory-Specialist", - system_prompt=f""" - As the Regulatory Specialist at Blackstone, your role is to identify and assess any regulatory risks, compliance requirements, and potential legal liabilities associated with potential acquisitions. - For the current potential acquisition of {company}, your tasks include: - 1. Identifying all relevant regulatory bodies and laws that govern the operations of {company}, including industry-specific regulations, labor laws, and environmental regulations - 2. Reviewing {company}' current compliance policies, procedures, and track record to identify any potential gaps or areas of non-compliance - 3. Assessing the potential impact of any pending or proposed changes to relevant regulations that could affect {company}' business or create additional compliance burdens - 4. Evaluating the potential legal liabilities and risks associated with {company}' products, services, and operations, including product liability, intellectual property, and customer contracts - 5. Providing recommendations on any regulatory or legal due diligence steps that should be taken as part of the acquisition process, as well as any post-acquisition integration considerations - - Your analysis should provide a comprehensive assessment of the regulatory and legal landscape surrounding {company}, and identify any material risks or potential deal-breakers. Consider both the current state and future outlook, and provide practical recommendations to mitigate identified risks. - """, - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="regulatory-specialist.json", -) - -# Create a list of agents -agents = [ - managing_director, - vp_finance, - industry_analyst, - tech_expert, - market_researcher, - regulatory_specialist, -] - - -swarm = SequentialWorkflow( - name="blackstone-private-equity-advisors", - agents=agents, -) - -print( - swarm.run( - "Analyze nvidia if it's a good deal to invest in now 10B" - ) -) - -``` - ------- - - -## `AgentRearrange` - -The `AgentRearrange` orchestration technique, inspired by Einops and einsum, enables you to define and map relationships between multiple agents. This powerful tool facilitates the orchestration of complex workflows by allowing you to specify both linear and concurrent relationships. For example, you can create sequential workflows like `a -> a1 -> a2 -> a3` or parallel workflows where a single agent distributes tasks to multiple agents simultaneously: `a -> a1, a2, a3`. This flexibility enables the creation of highly efficient and dynamic workflows, with agents operating either in parallel or sequence as required. As a valuable addition to the swarms library, `AgentRearrange` provides enhanced flexibility and precise control over agent orchestration. For comprehensive information and examples, visit the [official documentation](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/). [Watch my video tutorial on agent rearrange!](https://youtu.be/Rq8wWQ073mg) - - - -```python - -from datetime import datetime - -from swarms import Agent, AgentRearrange, create_file_in_folder - -chief_medical_officer = Agent( - agent_name="Chief Medical Officer", - system_prompt="""You are the Chief Medical Officer coordinating a team of medical specialists for viral disease diagnosis. - Your responsibilities include: - - Gathering initial patient symptoms and medical history - - Coordinating with specialists to form differential diagnoses - - Synthesizing different specialist opinions into a cohesive diagnosis - - Ensuring all relevant symptoms and test results are considered - - Making final diagnostic recommendations - - Suggesting treatment plans based on team input - - Identifying when additional specialists need to be consulted - - Guidelines: - 1. Always start with a comprehensive patient history - 2. Consider both common and rare viral conditions - 3. Factor in patient demographics and risk factors - 4. Document your reasoning process clearly - 5. Highlight any critical or emergency symptoms - 6. Note any limitations or uncertainties in the diagnosis - - Format all responses with clear sections for: - - Initial Assessment - - Differential Diagnoses - - Specialist Consultations Needed - - Recommended Next Steps""", - model_name="gpt-4o", # Models from litellm -> claude-2 - max_loops=1, -) - -# Viral Disease Specialist -virologist = Agent( - agent_name="Virologist", - system_prompt="""You are a specialist in viral diseases with expertise in: - - Respiratory viruses (Influenza, Coronavirus, RSV) - - Systemic viral infections (EBV, CMV, HIV) - - Childhood viral diseases (Measles, Mumps, Rubella) - - Emerging viral threats - - Your role involves: - 1. Analyzing symptoms specific to viral infections - 2. Distinguishing between different viral pathogens - 3. Assessing viral infection patterns and progression - 4. Recommending specific viral tests - 5. Evaluating epidemiological factors - - For each case, consider: - - Incubation periods - - Transmission patterns - - Seasonal factors - - Geographic prevalence - - Patient immune status - - Current viral outbreaks - - Provide detailed analysis of: - - Characteristic viral symptoms - - Disease progression timeline - - Risk factors for severe disease - - Potential complications""", - model_name="gpt-4o", - max_loops=1, -) - -# Internal Medicine Specialist -internist = Agent( - agent_name="Internist", - system_prompt="""You are an Internal Medicine specialist responsible for: - - Comprehensive system-based evaluation - - Integration of symptoms across organ systems - - Identification of systemic manifestations - - Assessment of comorbidities - - For each case, analyze: - 1. Vital signs and their implications - 2. System-by-system review (cardiovascular, respiratory, etc.) - 3. Impact of existing medical conditions - 4. Medication interactions and contraindications - 5. Risk stratification - - Consider these aspects: - - Age-related factors - - Chronic disease impact - - Medication history - - Social and environmental factors - - Document: - - Physical examination findings - - System-specific symptoms - - Relevant lab abnormalities - - Risk factors for complications""", - model_name="gpt-4o", - max_loops=1, -) - -# Diagnostic Synthesizer -synthesizer = Agent( - agent_name="Diagnostic Synthesizer", - system_prompt="""You are responsible for synthesizing all specialist inputs to create a final diagnostic assessment: - - Core responsibilities: - 1. Integrate findings from all specialists - 2. Identify patterns and correlations - 3. Resolve conflicting opinions - 4. Generate probability-ranked differential diagnoses - 5. Recommend additional testing if needed - - Analysis framework: - - Weight evidence based on reliability and specificity - - Consider epidemiological factors - - Evaluate diagnostic certainty - - Account for test limitations - - Provide structured output including: - 1. Primary diagnosis with confidence level - 2. Supporting evidence summary - 3. Alternative diagnoses to consider - 4. Recommended confirmatory tests - 5. Red flags or warning signs - 6. Follow-up recommendations - - Documentation requirements: - - Clear reasoning chain - - Evidence quality assessment - - Confidence levels for each diagnosis - - Knowledge gaps identified - - Risk assessment""", - model_name="gpt-4o", - max_loops=1, -) - -# Create agent list -agents = [chief_medical_officer, virologist, internist, synthesizer] - -# Define diagnostic flow -flow = f"""{chief_medical_officer.agent_name} -> {virologist.agent_name} -> {internist.agent_name} -> {synthesizer.agent_name}""" - -# Create the swarm system -diagnosis_system = AgentRearrange( - name="Medical-nlp-diagnosis-swarm", - description="natural language symptions to diagnosis report", - agents=agents, - flow=flow, - max_loops=1, - output_type="all", -) - - -# Example usage -if __name__ == "__main__": - # Example patient case - patient_case = """ - Patient: 45-year-old female - Presenting symptoms: - - Fever (101.5°F) for 3 days - - Dry cough - - Fatigue - - Mild shortness of breath - Medical history: - - Controlled hypertension - - No recent travel - - Fully vaccinated for COVID-19 - - No known sick contacts - """ - - # Add timestamp to the patient case - case_info = f"Timestamp: {datetime.now()}\nPatient Information: {patient_case}" - - # Run the diagnostic process - diagnosis = diagnosis_system.run(case_info) - - # Create a folder and file called reports - create_file_in_folder( - "reports", "medical_analysis_agent_rearrange.md", diagnosis - ) - +final_post = workflow.run("The history and future of artificial intelligence") +print(final_post) ``` +----- ------------- - - -## `HierarhicalSwarm` -Coming soon... - - ------------------ - -## `GraphWorkflow` - - -GraphWorkflow is a workflow management system using a directed acyclic graph (DAG) to orchestrate complex tasks. Nodes (agents or tasks) and edges define dependencies, with agents executing tasks concurrently. It features entry/end points, visualization for debugging, and scalability for dynamic task assignment. Benefits include concurrency, flexibility, scalability, and clear workflow visualization. [Learn more:](https://docs.swarms.world/en/latest/swarms/structs/graph_swarm/) The `run` method returns a dictionary containing the execution results of all nodes in the graph. - - - -```python -from swarms import Agent, Edge, GraphWorkflow, Node, NodeType - -# Initialize agents with model_name parameter -agent1 = Agent( - agent_name="Agent1", - model_name="openai/gpt-4o-mini", # Using provider prefix - temperature=0.5, - max_tokens=4000, - max_loops=1, - autosave=True, - dashboard=True, -) - -agent2 = Agent( - agent_name="Agent2", - model_name="openai/gpt-4o-mini", # Using provider prefix - temperature=0.5, - max_tokens=4000, - max_loops=1, - autosave=True, - dashboard=True, -) - -def sample_task(): - print("Running sample task") - return "Task completed" - -wf_graph = GraphWorkflow() -wf_graph.add_node(Node(id="agent1", type=NodeType.AGENT, agent=agent1)) -wf_graph.add_node(Node(id="agent2", type=NodeType.AGENT, agent=agent2)) -wf_graph.add_node(Node(id="task1", type=NodeType.TASK, callable=sample_task)) - -wf_graph.add_edge(Edge(source="agent1", target="task1")) -wf_graph.add_edge(Edge(source="agent2", target="task1")) - -wf_graph.set_entry_points(["agent1", "agent2"]) -wf_graph.set_end_points(["task1"]) +## šŸ—ļø Swarm Architectures for Production Workflows -print(wf_graph.visualize()) +`swarms` provides a variety of powerful, pre-built architectures to orchestrate agents in different ways. Choose the right structure for your specific problem to build efficient and reliable production systems. -results = wf_graph.run() -print("Execution results:", results) -``` +| **Architecture** | **Description** | **Best For** | +|---|---|---| +| **SequentialWorkflow** | Agents execute tasks in a linear chain; one agent's output is the next one's input. | Step-by-step processes like data transformation pipelines, report generation. | +| **ConcurrentWorkflow** | Agents run tasks simultaneously for maximum efficiency. | High-throughput tasks like batch processing, parallel data analysis. | +| **AgentRearrange** | Dynamically maps complex relationships (e.g., `a -> b, c`) between agents. | Flexible and adaptive workflows, task distribution, dynamic routing. | +| **GraphWorkflow** | Orchestrates agents as nodes in a Directed Acyclic Graph (DAG). | Complex projects with intricate dependencies, like software builds. | +| **MixtureOfAgents (MoA)** | Utilizes multiple expert agents in parallel and synthesizes their outputs. | Complex problem-solving, achieving state-of-the-art performance through collaboration. | +| **GroupChat** | Agents collaborate and make decisions through a conversational interface. | Real-time collaborative decision-making, negotiations, brainstorming. | +| **ForestSwarm** | Dynamically selects the most suitable agent or tree of agents for a given task. | Task routing, optimizing for expertise, complex decision-making trees. | +| **SpreadSheetSwarm**| Manages thousands of agents concurrently, tracking tasks and outputs in a structured format. | Massive-scale parallel operations, large-scale data generation and analysis. | ----- +### SequentialWorkflow -## `MixtureOfAgents` - -The MixtureOfAgents architecture, inspired by together.ai's paper (arXiv:2406.04692), achieves SOTA performance on AlpacaEval 2.0, MT-Bench, and FLASK, surpassing GPT-4 Omni. It processes tasks via parallel agent collaboration and sequential layering, with documentation [HERE](https://docs.swarms.world/en/latest/swarms/structs/moa/) +A `SequentialWorkflow` executes tasks in a strict order, forming a pipeline where each agent builds upon the work of the previous one. +**Description:** Ideal for processes that have clear, ordered steps. This ensures that tasks with dependencies are handled correctly. ```python +from swarms import Agent, SequentialWorkflow -import os -from swarms import Agent, MixtureOfAgents - -# Agent 1: Financial Statement Analyzer -agent1 = Agent( - agent_name="FinancialStatementAnalyzer", - model_name="gpt-4o", - system_prompt="""You are a Financial Statement Analyzer specializing in 10-K SEC reports. Your primary focus is on analyzing the financial statements, including the balance sheet, income statement, and cash flow statement. - -Key responsibilities: -1. Identify and explain significant changes in financial metrics year-over-year. -2. Calculate and interpret key financial ratios (e.g., liquidity ratios, profitability ratios, leverage ratios). -3. Analyze trends in revenue, expenses, and profitability. -4. Highlight any red flags or areas of concern in the financial statements. -5. Provide insights on the company's financial health and performance based on the data. - -When analyzing, consider industry standards and compare the company's performance to its peers when possible. Your analysis should be thorough, data-driven, and provide actionable insights for investors and stakeholders.""", - max_loops=1, - autosave=True, - dashboard=False, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="financial_statement_analyzer_state.json", - user_name="swarms_corp", - retry_attempts=1, - context_length=200000, - return_step_meta=False, -) - -# Agent 2: Risk Assessment Specialist -agent2 = Agent( - agent_name="RiskAssessmentSpecialist", - model_name="gpt-4o", - system_prompt="""You are a Risk Assessment Specialist focusing on 10-K SEC reports. Your primary role is to identify, analyze, and evaluate potential risks disclosed in the report. - -Key responsibilities: -1. Thoroughly review the "Risk Factors" section of the 10-K report. -2. Identify and categorize different types of risks (e.g., operational, financial, legal, market, technological). -3. Assess the potential impact and likelihood of each identified risk. -4. Analyze the company's risk mitigation strategies and their effectiveness. -5. Identify any emerging risks not explicitly mentioned but implied by the company's operations or market conditions. -6. Compare the company's risk profile with industry peers when possible. - -Your analysis should provide a comprehensive overview of the company's risk landscape, helping stakeholders understand the potential challenges and uncertainties facing the business. Be sure to highlight any critical risks that could significantly impact the company's future performance or viability.""", - max_loops=1, - autosave=True, - dashboard=False, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="risk_assessment_specialist_state.json", - user_name="swarms_corp", - retry_attempts=1, - context_length=200000, - return_step_meta=False, -) - -# Agent 3: Business Strategy Evaluator -agent3 = Agent( - agent_name="BusinessStrategyEvaluator", - model_name="gpt-4o", - system_prompt="""You are a Business Strategy Evaluator specializing in analyzing 10-K SEC reports. Your focus is on assessing the company's overall strategy, market position, and future outlook. - -Key responsibilities: -1. Analyze the company's business description, market opportunities, and competitive landscape. -2. Evaluate the company's products or services, including their market share and growth potential. -3. Assess the effectiveness of the company's current business strategy and its alignment with market trends. -4. Identify key performance indicators (KPIs) and evaluate the company's performance against these metrics. -5. Analyze management's discussion and analysis (MD&A) section to understand their perspective on the business. -6. Identify potential growth opportunities or areas for improvement in the company's strategy. -7. Compare the company's strategic position with key competitors in the industry. - -Your analysis should provide insights into the company's strategic direction, its ability to create value, and its potential for future growth. Consider both short-term and long-term perspectives in your evaluation.""", - max_loops=1, - autosave=True, - dashboard=False, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="business_strategy_evaluator_state.json", - user_name="swarms_corp", - retry_attempts=1, - context_length=200000, - return_step_meta=False, -) - -# Aggregator Agent -aggregator_agent = Agent( - agent_name="10KReportAggregator", - model_name="gpt-4o", - system_prompt="""You are the 10-K Report Aggregator, responsible for synthesizing and summarizing the analyses provided by the Financial Statement Analyzer, Risk Assessment Specialist, and Business Strategy Evaluator. Your goal is to create a comprehensive, coherent, and insightful summary of the 10-K SEC report. - -Key responsibilities: -1. Integrate the financial analysis, risk assessment, and business strategy evaluation into a unified report. -2. Identify and highlight the most critical information and insights from each specialist's analysis. -3. Reconcile any conflicting information or interpretations among the specialists' reports. -4. Provide a balanced view of the company's overall performance, risks, and strategic position. -5. Summarize key findings and their potential implications for investors and stakeholders. -6. Identify any areas where further investigation or clarification may be needed. - -Your final report should be well-structured, easy to understand, and provide a holistic view of the company based on the 10-K SEC report. It should offer valuable insights for decision-making while acknowledging any limitations or uncertainties in the analysis.""", - max_loops=1, - autosave=True, - dashboard=False, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="10k_report_aggregator_state.json", - user_name="swarms_corp", - retry_attempts=1, - context_length=200000, - return_step_meta=False, -) - -# Create the Mixture of Agents class -moa = MixtureOfAgents( - agents=[agent1, agent2, agent3], - aggregator_agent=aggregator_agent, - aggregator_system_prompt="""As the 10-K Report Aggregator, your task is to synthesize the analyses provided by the Financial Statement Analyzer, Risk Assessment Specialist, and Business Strategy Evaluator into a comprehensive and coherent report. - -Follow these steps: -1. Review and summarize the key points from each specialist's analysis. -2. Identify common themes and insights across the analyses. -3. Highlight any discrepancies or conflicting interpretations, if present. -4. Provide a balanced and integrated view of the company's financial health, risks, and strategic position. -5. Summarize the most critical findings and their potential impact on investors and stakeholders. -6. Suggest areas for further investigation or monitoring, if applicable. - -Your final output should be a well-structured, insightful report that offers a holistic view of the company based on the 10-K SEC report analysis.""", - layers=3, -) +# Initialize agents for a 3-step process +# 1. Generate an idea +idea_generator = Agent(agent_name="IdeaGenerator", system_prompt="Generate a unique startup idea.", model_name="gpt-4o-mini") +# 2. Validate the idea +validator = Agent(agent_name="Validator", system_prompt="Take this startup idea and analyze its market viability.", model_name="gpt-4o-mini") +# 3. Create a pitch +pitch_creator = Agent(agent_name="PitchCreator", system_prompt="Write a 3-sentence elevator pitch for this validated startup idea.", model_name="gpt-4o-mini") -# Example usage -company_name = "NVIDIA" -out = moa.run( - f"Analyze the latest 10-K SEC report for {company_name}. Provide a comprehensive summary of the company's financial performance, risk profile, and business strategy." -) -print(out) +# Create the sequential workflow +workflow = SequentialWorkflow(agents=[idea_generator, validator, pitch_creator]) +# Run the workflow +elevator_pitch = workflow.run() +print(elevator_pitch) ``` -------- +### ConcurrentWorkflow (with `SpreadSheetSwarm`) -## SpreadSheetSwarm +A concurrent workflow runs multiple agents simultaneously. `SpreadSheetSwarm` is a powerful implementation that can manage thousands of concurrent agents and log their outputs to a CSV file. -SpreadSheetSwarm manages thousands of agents concurrently for efficient task processing. It supports one-to-many task distribution, scalability, and autosaving results. Initialized with a name, description, agents, and settings, the run method executes tasks and returns a dictionary of agent outputs. - -[Learn more:](https://docs.swarms.world/en/latest/swarms/structs/spreadsheet_swarm/) +**Description:** Use this for high-throughput tasks that can be performed in parallel, drastically reducing execution time. ```python from swarms import Agent, SpreadSheetSwarm -# Define custom system prompts for each social media platform -TWITTER_AGENT_SYS_PROMPT = """ -You are a Twitter marketing expert specializing in real estate. Your task is to create engaging, concise tweets to promote properties, analyze trends to maximize engagement, and use appropriate hashtags and timing to reach potential buyers. -""" - -INSTAGRAM_AGENT_SYS_PROMPT = """ -You are an Instagram marketing expert focusing on real estate. Your task is to create visually appealing posts with engaging captions and hashtags to showcase properties, targeting specific demographics interested in real estate. -""" -FACEBOOK_AGENT_SYS_PROMPT = """ -You are a Facebook marketing expert for real estate. Your task is to craft posts optimized for engagement and reach on Facebook, including using images, links, and targeted messaging to attract potential property buyers. -""" +# Define a list of tasks (e.g., social media posts to generate) +platforms = ["Twitter", "LinkedIn", "Instagram"] -LINKEDIN_AGENT_SYS_PROMPT = """ -You are a LinkedIn marketing expert for the real estate industry. Your task is to create professional and informative posts, highlighting property features, market trends, and investment opportunities, tailored to professionals and investors. -""" - -EMAIL_AGENT_SYS_PROMPT = """ -You are an Email marketing expert specializing in real estate. Your task is to write compelling email campaigns to promote properties, focusing on personalization, subject lines, and effective call-to-action strategies to drive conversions. -""" - -# Initialize your agents for different social media platforms +# Create an agent for each task agents = [ Agent( - agent_name="Twitter-RealEstate-Agent", - system_prompt=TWITTER_AGENT_SYS_PROMPT, - model_name="gpt-4o-mini", - max_loops=1, - dynamic_temperature_enabled=True, - saved_state_path="twitter_realestate_agent.json", - user_name="realestate_swarms", - retry_attempts=1, - ), - Agent( - agent_name="Instagram-RealEstate-Agent", - system_prompt=INSTAGRAM_AGENT_SYS_PROMPT, - model_name="gpt-4o-mini", - max_loops=1, - dynamic_temperature_enabled=True, - saved_state_path="instagram_realestate_agent.json", - user_name="realestate_swarms", - retry_attempts=1, - ), - Agent( - agent_name="Facebook-RealEstate-Agent", - system_prompt=FACEBOOK_AGENT_SYS_PROMPT, + agent_name=f"{platform}-Marketer", + system_prompt=f"Generate a real estate marketing post for {platform}.", model_name="gpt-4o-mini", - max_loops=1, - dynamic_temperature_enabled=True, - saved_state_path="facebook_realestate_agent.json", - user_name="realestate_swarms", - retry_attempts=1, - ), - Agent( - agent_name="LinkedIn-RealEstate-Agent", - system_prompt=LINKEDIN_AGENT_SYS_PROMPT, - model_name="gpt-4o-mini", - max_loops=1, - dynamic_temperature_enabled=True, - saved_state_path="linkedin_realestate_agent.json", - user_name="realestate_swarms", - retry_attempts=1, - ), - Agent( - agent_name="Email-RealEstate-Agent", - system_prompt=EMAIL_AGENT_SYS_PROMPT, - model_name="gpt-4o-mini", - max_loops=1, - dynamic_temperature_enabled=True, - saved_state_path="email_realestate_agent.json", - user_name="realestate_swarms", - retry_attempts=1, - ), + ) + for platform in platforms ] -# Create a Swarm with the list of agents +# Initialize the swarm to run these agents concurrently swarm = SpreadSheetSwarm( - name="Real-Estate-Marketing-Swarm", - description="A swarm that processes real estate marketing tasks using multiple agents on different threads.", agents=agents, autosave_on=True, - save_file_path="real_estate_marketing_spreadsheet.csv", - run_all_agents=False, - max_loops=2, -) - -# Run the swarm -swarm.run( - task=""" - Create posts to promote luxury properties in North Texas, highlighting their features, location, and investment potential. Include relevant hashtags, images, and engaging captions. - - - Property: - $10,399,000 - 1609 Meandering Way Dr, Roanoke, TX 76262 - Link to the property: https://www.zillow.com/homedetails/1609-Meandering-Way-Dr-Roanoke-TX-76262/308879785_zpid/ - - What's special - Unveiling a new custom estate in the prestigious gated Quail Hollow Estates! This impeccable residence, set on a sprawling acre surrounded by majestic trees, features a gourmet kitchen equipped with top-tier Subzero and Wolf appliances. European soft-close cabinets and drawers, paired with a double Cambria Quartzite island, perfect for family gatherings. The first-floor game room&media room add extra layers of entertainment. Step into the outdoor sanctuary, where a sparkling pool and spa, and sunken fire pit, beckon leisure. The lavish master suite features stunning marble accents, custom his&her closets, and a secure storm shelter.Throughout the home,indulge in the visual charm of designer lighting and wallpaper, elevating every space. The property is complete with a 6-car garage and a sports court, catering to the preferences of basketball or pickleball enthusiasts. This residence seamlessly combines luxury&recreational amenities, making it a must-see for the discerning buyer. - - Facts & features - Interior - Bedrooms & bathrooms - Bedrooms: 6 - Bathrooms: 8 - Full bathrooms: 7 - 1/2 bathrooms: 1 - Primary bedroom - Bedroom - Features: Built-in Features, En Suite Bathroom, Walk-In Closet(s) - Cooling - Central Air, Ceiling Fan(s), Electric - Appliances - Included: Built-In Gas Range, Built-In Refrigerator, Double Oven, Dishwasher, Gas Cooktop, Disposal, Ice Maker, Microwave, Range, Refrigerator, Some Commercial Grade, Vented Exhaust Fan, Warming Drawer, Wine Cooler - Features - Wet Bar, Built-in Features, Dry Bar, Decorative/Designer Lighting Fixtures, Eat-in Kitchen, Elevator, High Speed Internet, Kitchen Island, Pantry, Smart Home, Cable TV, Walk-In Closet(s), Wired for Sound - Flooring: Hardwood - Has basement: No - Number of fireplaces: 3 - Fireplace features: Living Room, Primary Bedroom - Interior area - Total interior livable area: 10,466 sqft - Total spaces: 12 - Parking features: Additional Parking - Attached garage spaces: 6 - Carport spaces: 6 - Features - Levels: Two - Stories: 2 - Patio & porch: Covered - Exterior features: Built-in Barbecue, Barbecue, Gas Grill, Lighting, Outdoor Grill, Outdoor Living Area, Private Yard, Sport Court, Fire Pit - Pool features: Heated, In Ground, Pool, Pool/Spa Combo - Fencing: Wrought Iron - Lot - Size: 1.05 Acres - Details - Additional structures: Outdoor Kitchen - Parcel number: 42232692 - Special conditions: Standard - Construction - Type & style - Home type: SingleFamily - Architectural style: Contemporary/Modern,Detached - Property subtype: Single Family Residence - """ + save_file_path="marketing_posts.csv", ) +# Run the swarm with a single, shared task description +property_description = "A beautiful 3-bedroom house in sunny California." +swarm.run(task=f"Generate a post about: {property_description}") +# Check marketing_posts.csv for the results! ``` +### AgentRearrange ----------- - -## `ForestSwarm` - -The `ForestSwarm` architecture is an intelligent system designed to optimize task assignment by dynamically selecting the most appropriate agent from a collection of specialized trees. Through asynchronous task processing, the system intelligently matches tasks with agents based on their relevance. This matching is accomplished by computing the semantic similarity between each agent's system prompts and the keywords present in the task. For comprehensive details about the `ForestSwarm` implementation and capabilities, please consult the [official documentation](https://docs.swarms.world/en/latest/swarms/structs/forest_swarm/). - - +Inspired by `einsum`, `AgentRearrange` lets you define complex, non-linear relationships between agents using a simple string-based syntax. +**Description:** Perfect for orchestrating dynamic workflows where agents might work in parallel, sequence, or a combination of both. ```python -from swarms import TreeAgent, Tree, ForestSwarm - -# Create agents with varying system prompts and dynamically generated distances/keywords -agents_tree1 = [ - TreeAgent( - system_prompt="""You are an expert Stock Analysis Agent with deep knowledge of financial markets, technical analysis, and fundamental analysis. Your primary function is to analyze stock performance, market trends, and provide actionable insights. When analyzing stocks: - -1. Always start with a brief overview of the current market conditions. -2. Use a combination of technical indicators (e.g., moving averages, RSI, MACD) and fundamental metrics (e.g., P/E ratio, EPS growth, debt-to-equity). -3. Consider both short-term and long-term perspectives in your analysis. -4. Provide clear buy, hold, or sell recommendations with supporting rationale. -5. Highlight potential risks and opportunities specific to each stock or sector. -6. Use bullet points for clarity when listing key points or metrics. -7. If relevant, compare the stock to its peers or sector benchmarks. - -Remember to maintain objectivity and base your analysis on factual data. If asked about future performance, always include a disclaimer about market unpredictability. Your goal is to provide comprehensive, accurate, and actionable stock analysis to inform investment decisions.""", - agent_name="Stock Analysis Agent", - ), - TreeAgent( - system_prompt="""You are a highly skilled Financial Planning Agent, specializing in personal and corporate financial strategies. Your role is to provide comprehensive financial advice tailored to each client's unique situation. When creating financial plans: - -1. Begin by asking key questions about the client's financial goals, current situation, and risk tolerance. -2. Develop a holistic view of the client's finances, including income, expenses, assets, and liabilities. -3. Create detailed, step-by-step action plans to achieve financial goals. -4. Provide specific recommendations for budgeting, saving, and investing. -5. Consider tax implications and suggest tax-efficient strategies. -6. Incorporate risk management and insurance planning into your recommendations. -7. Use charts or tables to illustrate financial projections and scenarios. -8. Regularly suggest reviewing and adjusting the plan as circumstances change. - -Always prioritize the client's best interests and adhere to fiduciary standards. Explain complex financial concepts in simple terms, and be prepared to justify your recommendations with data and reasoning.""", - agent_name="Financial Planning Agent", - ), - TreeAgent( - agent_name="Retirement Strategy Agent", - system_prompt="""You are a specialized Retirement Strategy Agent, focused on helping individuals and couples plan for a secure and comfortable retirement. Your expertise covers various aspects of retirement planning, including savings strategies, investment allocation, and income generation during retirement. When developing retirement strategies: - -1. Start by assessing the client's current age, desired retirement age, and expected lifespan. -2. Calculate retirement savings goals based on desired lifestyle and projected expenses. -3. Analyze current retirement accounts (e.g., 401(k), IRA) and suggest optimization strategies. -4. Provide guidance on asset allocation and rebalancing as retirement approaches. -5. Explain various retirement income sources (e.g., Social Security, pensions, annuities). -6. Discuss healthcare costs and long-term care planning. -7. Offer strategies for tax-efficient withdrawals during retirement. -8. Consider estate planning and legacy goals in your recommendations. - -Use Monte Carlo simulations or other statistical tools to illustrate the probability of retirement success. Always emphasize the importance of starting early and the power of compound interest. Be prepared to adjust strategies based on changing market conditions or personal circumstances.""", - ), -] +from swarms import Agent, AgentRearrange -agents_tree2 = [ - TreeAgent( - system_prompt="""You are a knowledgeable Tax Filing Agent, specializing in personal and business tax preparation and strategy. Your role is to ensure accurate tax filings while maximizing legitimate deductions and credits. When assisting with tax matters: - -1. Start by gathering all necessary financial information and documents. -2. Stay up-to-date with the latest tax laws and regulations, including state-specific rules. -3. Identify all applicable deductions and credits based on the client's situation. -4. Provide step-by-step guidance for completing tax forms accurately. -5. Explain tax implications of various financial decisions. -6. Offer strategies for tax-efficient investing and income management. -7. Assist with estimated tax payments for self-employed individuals or businesses. -8. Advise on record-keeping practices for tax purposes. - -Always prioritize compliance with tax laws while ethically minimizing tax liability. Be prepared to explain complex tax concepts in simple terms and provide rationale for your recommendations. If a situation is beyond your expertise, advise consulting a certified tax professional or IRS resources.""", - agent_name="Tax Filing Agent", - ), - TreeAgent( - system_prompt="""You are a sophisticated Investment Strategy Agent, adept at creating and managing investment portfolios to meet diverse financial goals. Your expertise covers various asset classes, market analysis, and risk management techniques. When developing investment strategies: - -1. Begin by assessing the client's investment goals, time horizon, and risk tolerance. -2. Provide a comprehensive overview of different asset classes and their risk-return profiles. -3. Create diversified portfolio recommendations based on modern portfolio theory. -4. Explain the benefits and risks of various investment vehicles (e.g., stocks, bonds, ETFs, mutual funds). -5. Incorporate both passive and active investment strategies as appropriate. -6. Discuss the importance of regular portfolio rebalancing and provide a rebalancing strategy. -7. Consider tax implications of investment decisions and suggest tax-efficient strategies. -8. Provide ongoing market analysis and suggest portfolio adjustments as needed. - -Use historical data and forward-looking projections to illustrate potential outcomes. Always emphasize the importance of long-term investing and the risks of market timing. Be prepared to explain complex investment concepts in clear, accessible language.""", - agent_name="Investment Strategy Agent", - ), - TreeAgent( - system_prompt="""You are a specialized ROTH IRA Agent, focusing on the intricacies of Roth Individual Retirement Accounts. Your role is to provide expert guidance on Roth IRA rules, benefits, and strategies to maximize their value for retirement planning. When advising on Roth IRAs: - -1. Explain the fundamental differences between traditional and Roth IRAs. -2. Clarify Roth IRA contribution limits and income eligibility requirements. -3. Discuss the tax advantages of Roth IRAs, including tax-free growth and withdrawals. -4. Provide guidance on Roth IRA conversion strategies and their tax implications. -5. Explain the five-year rule and how it affects Roth IRA withdrawals. -6. Offer strategies for maximizing Roth IRA contributions, such as the backdoor Roth IRA method. -7. Discuss how Roth IRAs fit into overall retirement and estate planning strategies. -8. Provide insights on investment choices within a Roth IRA to maximize tax-free growth. - -Always stay current with IRS regulations regarding Roth IRAs. Be prepared to provide numerical examples to illustrate the long-term benefits of Roth IRAs. Emphasize the importance of considering individual financial situations when making Roth IRA decisions.""", - agent_name="ROTH IRA Agent", - ), -] - -# Create trees -tree1 = Tree(tree_name="Financial Tree", agents=agents_tree1) -tree2 = Tree(tree_name="Investment Tree", agents=agents_tree2) +# Define agents +researcher = Agent(agent_name="researcher", model_name="gpt-4o-mini") +writer = Agent(agent_name="writer", model_name="gpt-4o-mini") +editor = Agent(agent_name="editor", model_name="gpt-4o-mini") -# Create the ForestSwarm -multi_agent_structure = ForestSwarm(trees=[tree1, tree2]) +# Define a flow: researcher sends work to both writer and editor simultaneously +# This is a one-to-many relationship +flow = "researcher -> writer, editor" -# Run a task -task = "What are the best platforms to do our taxes on" -output = multi_agent_structure.run(task) -print(output) +# Create the rearrangement system +rearrange_system = AgentRearrange( + agents=[researcher, writer, editor], + flow=flow, +) +# Run the system +# The researcher will generate content, and then both the writer and editor +# will process that content in parallel. +outputs = rearrange_system.run("Analyze the impact of AI on modern cinema.") +print(outputs) ``` +### GraphWorkflow +`GraphWorkflow` orchestrates tasks using a Directed Acyclic Graph (DAG), allowing you to manage complex dependencies where some tasks must wait for others to complete. ------------- - -## `SwarmRouter` - -The `SwarmRouter` class is a flexible routing system designed to manage different types of swarms for task execution. It provides a unified interface to interact with various swarm types, including `AgentRearrange`, `MixtureOfAgents`, `SpreadSheetSwarm`, `SequentialWorkflow`, and `ConcurrentWorkflow`. We will be continuously adding more and more swarm architectures here as we progress with new architectures. [Learn More](https://docs.swarms.world/en/latest/swarms/structs/swarm_router/) - +**Description:** Essential for building sophisticated pipelines, like in software development or complex project management, where task order and dependencies are critical. ```python -import os -from dotenv import load_dotenv -from swarms import Agent -from swarm_models import OpenAIChat -from swarms.structs.swarm_router import SwarmRouter, SwarmType +from swarms import Agent, GraphWorkflow, Node, Edge, NodeType -load_dotenv() +# Define agents and a simple python function as nodes +code_generator = Agent(agent_name="CodeGenerator", system_prompt="Write Python code for the given task.", model_name="gpt-4o-mini") +code_tester = Agent(agent_name="CodeTester", system_prompt="Test the given Python code and find bugs.", model_name="gpt-4o-mini") -# Get the OpenAI API key from the environment variable -api_key = os.getenv("GROQ_API_KEY") +# Create nodes for the graph +node1 = Node(id="generator", agent=code_generator) +node2 = Node(id="tester", agent=code_tester) -# Model -model = OpenAIChat( - openai_api_base="https://api.groq.com/openai/v1", - openai_api_key=api_key, - model_name="llama-3.1-70b-versatile", - temperature=0.1, -) -# Define specialized system prompts for each agent -DATA_EXTRACTOR_PROMPT = """You are a highly specialized private equity agent focused on data extraction from various documents. Your expertise includes: -1. Extracting key financial metrics (revenue, EBITDA, growth rates, etc.) from financial statements and reports -2. Identifying and extracting important contract terms from legal documents -3. Pulling out relevant market data from industry reports and analyses -4. Extracting operational KPIs from management presentations and internal reports -5. Identifying and extracting key personnel information from organizational charts and bios -Provide accurate, structured data extracted from various document types to support investment analysis.""" - -SUMMARIZER_PROMPT = """You are an expert private equity agent specializing in summarizing complex documents. Your core competencies include: -1. Distilling lengthy financial reports into concise executive summaries -2. Summarizing legal documents, highlighting key terms and potential risks -3. Condensing industry reports to capture essential market trends and competitive dynamics -4. Summarizing management presentations to highlight key strategic initiatives and projections -5. Creating brief overviews of technical documents, emphasizing critical points for non-technical stakeholders -Deliver clear, concise summaries that capture the essence of various documents while highlighting information crucial for investment decisions.""" - -FINANCIAL_ANALYST_PROMPT = """You are a specialized private equity agent focused on financial analysis. Your key responsibilities include: -1. Analyzing historical financial statements to identify trends and potential issues -2. Evaluating the quality of earnings and potential adjustments to EBITDA -3. Assessing working capital requirements and cash flow dynamics -4. Analyzing capital structure and debt capacity -5. Evaluating financial projections and underlying assumptions -Provide thorough, insightful financial analysis to inform investment decisions and valuation.""" - -MARKET_ANALYST_PROMPT = """You are a highly skilled private equity agent specializing in market analysis. Your expertise covers: -1. Analyzing industry trends, growth drivers, and potential disruptors -2. Evaluating competitive landscape and market positioning -3. Assessing market size, segmentation, and growth potential -4. Analyzing customer dynamics, including concentration and loyalty -5. Identifying potential regulatory or macroeconomic impacts on the market -Deliver comprehensive market analysis to assess the attractiveness and risks of potential investments.""" - -OPERATIONAL_ANALYST_PROMPT = """You are an expert private equity agent focused on operational analysis. Your core competencies include: -1. Evaluating operational efficiency and identifying improvement opportunities -2. Analyzing supply chain and procurement processes -3. Assessing sales and marketing effectiveness -4. Evaluating IT systems and digital capabilities -5. Identifying potential synergies in merger or add-on acquisition scenarios -Provide detailed operational analysis to uncover value creation opportunities and potential risks.""" - -# Initialize specialized agents -data_extractor_agent = Agent( - agent_name="Data-Extractor", - system_prompt=DATA_EXTRACTOR_PROMPT, - llm=model, - max_loops=1, - autosave=True, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="data_extractor_agent.json", - user_name="pe_firm", - retry_attempts=1, - context_length=200000, - output_type="string", -) +# Create the graph and define the dependency +graph = GraphWorkflow() +graph.add_nodes([node1, node2]) +graph.add_edge(Edge(source="generator", target="tester")) # Tester runs after generator -summarizer_agent = Agent( - agent_name="Document-Summarizer", - system_prompt=SUMMARIZER_PROMPT, - llm=model, - max_loops=1, - autosave=True, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="summarizer_agent.json", - user_name="pe_firm", - retry_attempts=1, - context_length=200000, - output_type="string", -) - -financial_analyst_agent = Agent( - agent_name="Financial-Analyst", - system_prompt=FINANCIAL_ANALYST_PROMPT, - llm=model, - max_loops=1, - autosave=True, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="financial_analyst_agent.json", - user_name="pe_firm", - retry_attempts=1, - context_length=200000, - output_type="string", -) - -market_analyst_agent = Agent( - agent_name="Market-Analyst", - system_prompt=MARKET_ANALYST_PROMPT, - llm=model, - max_loops=1, - autosave=True, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="market_analyst_agent.json", - user_name="pe_firm", - retry_attempts=1, - context_length=200000, - output_type="string", -) - -operational_analyst_agent = Agent( - agent_name="Operational-Analyst", - system_prompt=OPERATIONAL_ANALYST_PROMPT, - llm=model, - max_loops=1, - autosave=True, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="operational_analyst_agent.json", - user_name="pe_firm", - retry_attempts=1, - context_length=200000, - output_type="string", -) - -# Initialize the SwarmRouter -router = SwarmRouter( - name="pe-document-analysis-swarm", - description="Analyze documents for private equity due diligence and investment decision-making", - max_loops=1, - agents=[ - data_extractor_agent, - summarizer_agent, - financial_analyst_agent, - market_analyst_agent, - operational_analyst_agent, - ], - swarm_type="ConcurrentWorkflow", # or "SequentialWorkflow" or "ConcurrentWorkflow" or -) - -# Example usage -if __name__ == "__main__": - # Run a comprehensive private equity document analysis task - result = router.run( - "Where is the best place to find template term sheets for series A startups. Provide links and references" - ) - print(result) - - # Retrieve and print logs - for log in router.get_logs(): - print(f"{log.timestamp} - {log.level}: {log.message}") +# Set entry and end points +graph.set_entry_points(["generator"]) +graph.set_end_points(["tester"]) +# Run the graph workflow +results = graph.run("Create a function that calculates the factorial of a number.") +print(results) ``` -### Changing Swarm Types - -You can create multiple SwarmRouter instances with different swarm types: +### MixtureOfAgents (MoA) -```python -sequential_router = SwarmRouter( - name="SequentialRouter", - agents=[ - data_extractor_agent, - summarizer_agent, - financial_analyst_agent, - market_analyst_agent, - operational_analyst_agent, - ], - swarm_type=SwarmType.SequentialWorkflow -) +The `MixtureOfAgents` architecture processes tasks by feeding them to multiple "expert" agents in parallel. Their diverse outputs are then synthesized by an aggregator agent to produce a final, high-quality result. -concurrent_router = SwarmRouter( - name="ConcurrentRouter", - agents=[ - data_extractor_agent, - summarizer_agent, - financial_analyst_agent, - market_analyst_agent, - operational_analyst_agent, - ], - swarm_type=SwarmType.ConcurrentWorkflow -) -``` - -### AgentRearrange - -Use Case: Optimizing agent order for complex multi-step tasks. +**Description:** Use this to achieve state-of-the-art performance on complex reasoning tasks by leveraging the collective intelligence of specialized agents. ```python -rearrange_router = SwarmRouter( - name="TaskOptimizer", - description="Optimize agent order for multi-step tasks", - max_loops=3, - agents=[ - data_extractor_agent, - summarizer_agent, - financial_analyst_agent, - market_analyst_agent, - operational_analyst_agent, - ], - swarm_type=SwarmType.AgentRearrange, - flow = f"{data_extractor.name} -> {analyzer.name} -> {summarizer.name}" -) - -result = rearrange_router.run("Analyze and summarize the quarterly financial report") -``` +from swarms import Agent, MixtureOfAgents -### MixtureOfAgents +# Define expert agents +financial_analyst = Agent(agent_name="FinancialAnalyst", system_prompt="Analyze financial data.", model_name="gpt-4o-mini") +market_analyst = Agent(agent_name="MarketAnalyst", system_prompt="Analyze market trends.", model_name="gpt-4o-mini") +risk_analyst = Agent(agent_name="RiskAnalyst", system_prompt="Analyze investment risks.", model_name="gpt-4o-mini") -Use Case: Combining diverse expert agents for comprehensive analysis. +# Define the aggregator agent +aggregator = Agent( + agent_name="InvestmentAdvisor", + system_prompt="Synthesize the financial, market, and risk analyses to provide a final investment recommendation.", + model_name="gpt-4o-mini" +) -```python -mixture_router = SwarmRouter( - name="ExpertPanel", - description="Combine insights from various expert agents", - max_loops=1, - agents=[ - data_extractor_agent, - summarizer_agent, - financial_analyst_agent, - market_analyst_agent, - operational_analyst_agent, - ], - swarm_type=SwarmType.MixtureOfAgents +# Create the MoA swarm +moa_swarm = MixtureOfAgents( + agents=[financial_analyst, market_analyst, risk_analyst], + aggregator_agent=aggregator, ) -result = mixture_router.run("Evaluate the potential acquisition of TechStartup Inc.") +# Run the swarm +recommendation = moa_swarm.run("Should we invest in NVIDIA stock right now?") +print(recommendation) ``` +### GroupChat -------- - -## GroupChat - -A production-grade multi-agent system enabling sophisticated group conversations between AI agents with customizable speaking patterns, parallel processing capabilities, and comprehensive conversation tracking. +`GroupChat` creates a conversational environment where multiple agents can interact, discuss, and collaboratively solve a problem. You can define the speaking order or let it be determined dynamically. +**Description:** Ideal for tasks that benefit from debate and multi-perspective reasoning, such as contract negotiation, brainstorming, or complex decision-making. ```python -from swarms import Agent, GroupChat, expertise_based - +from swarms import Agent, GroupChat -if __name__ == "__main__": +# Define agents for a debate +tech_optimist = Agent(agent_name="TechOptimist", system_prompt="Argue for the benefits of AI in society.", model_name="gpt-4o-mini") +tech_critic = Agent(agent_name="TechCritic", system_prompt="Argue against the unchecked advancement of AI.", model_name="gpt-4o-mini") +# Create the group chat +chat = GroupChat( + agents=[tech_optimist, tech_critic], + max_loops=4, # Limit the number of turns in the conversation +) - # Example agents - agent1 = Agent( - agent_name="Financial-Analysis-Agent", - system_prompt="You are a financial analyst specializing in investment strategies.", - model_name="gpt-4o-mini", - temperature=0.1, - max_loops=1, - autosave=False, - dashboard=False, - verbose=True, - dynamic_temperature_enabled=True, - user_name="swarms_corp", - retry_attempts=1, - context_length=200000, - output_type="string", - streaming_on=False, - ) - - agent2 = Agent( - agent_name="Tax-Adviser-Agent", - system_prompt="You are a tax adviser who provides clear and concise guidance on tax-related queries.", - model_name="gpt-4o-mini", - temperature=0.1, - max_loops=1, - autosave=False, - dashboard=False, - verbose=True, - dynamic_temperature_enabled=True, - user_name="swarms_corp", - retry_attempts=1, - context_length=200000, - output_type="string", - streaming_on=False, - ) - - agents = [agent1, agent2] - - chat = GroupChat( - name="Investment Advisory", - description="Financial and tax analysis group", - agents=agents, - speaker_fn=expertise_based, - ) - - history = chat.run( - "How to optimize tax strategy for investments?" - ) - print(history) - - -``` - ---- - -## MultiAgentRouter - -The MultiAgentRouter is a swarm architecture designed to dynamically assign tasks to the most suitable agent. It achieves this through a director or boss entity that utilizes function calls to identify and allocate tasks to the agent best equipped to handle them. [Check out the documentation](https://docs.swarms.world/en/latest/swarms/structs/multi_agent_router/) +# Run the chat with an initial topic +conversation_history = chat.run( + "Let's discuss the societal impact of artificial intelligence." +) -```python -from swarms import Agent -from swarms.structs.multi_agent_orchestrator import MultiAgentRouter - -# Example usage: -if __name__ == "__main__": - # Define some example agents - agents = [ - Agent( - agent_name="ResearchAgent", - description="Specializes in researching topics and providing detailed, factual information", - system_prompt="You are a research specialist. Provide detailed, well-researched information about any topic, citing sources when possible.", - model_name="openai/gpt-4o", - ), - Agent( - agent_name="CodeExpertAgent", - description="Expert in writing, reviewing, and explaining code across multiple programming languages", - system_prompt="You are a coding expert. Write, review, and explain code with a focus on best practices and clean code principles.", - model_name="openai/gpt-4o", - ), - Agent( - agent_name="WritingAgent", - description="Skilled in creative and technical writing, content creation, and editing", - system_prompt="You are a writing specialist. Create, edit, and improve written content while maintaining appropriate tone and style.", - model_name="openai/gpt-4o", - ), - ] - - # Initialize routers with different configurations - router_execute = MultiAgentRouter(agents=agents, execute_task=True) - - # Example task - task = "Write a Python function to calculate fibonacci numbers" - - try: - # Process the task with execution - print("\nWith task execution:") - result_execute = router_execute.route_task(task) - print(result_execute) - - except Exception as e: - print(f"Error occurred: {str(e)}") +# Print the full conversation +for message in conversation_history: + print(f"[{message['agent_name']}]: {message['content']}") ``` - ---------- ## Onboarding Session diff --git a/stream_example.py b/stream_example.py index bc467691..3978fcaa 100644 --- a/stream_example.py +++ b/stream_example.py @@ -6,7 +6,7 @@ agent = Agent( model_name="gpt-4o-mini", streaming_on=True, # šŸ”„ This enables real streaming! max_loops=1, - print_on=True, # By Default its False, raw streaming !! + print_on=False, # By Default its False, raw streaming !! output_type="all", ) From 8300ea6d762091679469992006a80d97545c7efa Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Mon, 30 Jun 2025 10:48:13 -0700 Subject: [PATCH 33/86] readme --- README.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 8dbaa16d..82e72892 100644 --- a/README.md +++ b/README.md @@ -184,7 +184,7 @@ GROQ_API_KEY="" ### šŸ¤– Your First Agent -An **Agent** is the fundamental building block of a swarm—an autonomous entity powered by a large language model (LLM). +An **Agent** is the fundamental building block of a swarm—an autonomous entity powered by an LLM + Tools + Memory. [Learn more Here](https://docs.swarms.world/en/latest/swarms/structs/agent/) ```python from swarms import Agent @@ -202,7 +202,7 @@ agent.run("What are the key benefits of using a multi-agent system?") ### šŸ¤ Your First Swarm: Multi-Agent Collaboration -A **Swarm** consists of multiple agents working together. This simple example creates a two-agent workflow for researching and writing a blog post. +A **Swarm** consists of multiple agents working together. This simple example creates a two-agent workflow for researching and writing a blog post. [Learn More About SequentialWorkflow](https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/) ```python from swarms import Agent, SequentialWorkflow @@ -274,6 +274,9 @@ elevator_pitch = workflow.run() print(elevator_pitch) ``` +----- + + ### ConcurrentWorkflow (with `SpreadSheetSwarm`) A concurrent workflow runs multiple agents simultaneously. `SpreadSheetSwarm` is a powerful implementation that can manage thousands of concurrent agents and log their outputs to a CSV file. @@ -311,7 +314,7 @@ swarm.run(task=f"Generate a post about: {property_description}") ### AgentRearrange -Inspired by `einsum`, `AgentRearrange` lets you define complex, non-linear relationships between agents using a simple string-based syntax. +Inspired by `einsum`, `AgentRearrange` lets you define complex, non-linear relationships between agents using a simple string-based syntax. [Learn more](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/) **Description:** Perfect for orchestrating dynamic workflows where agents might work in parallel, sequence, or a combination of both. From ebeee30c6d1d47089e439dc27e00e832c67ecc64 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Mon, 30 Jun 2025 10:49:47 -0700 Subject: [PATCH 34/86] readme --- README.md | 54 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 31 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index 82e72892..789a9793 100644 --- a/README.md +++ b/README.md @@ -112,23 +112,6 @@ | šŸŽÆ Use Case Support | • Task-Specific Agents
• Custom Workflows
• Industry Solutions
• Extensible Framework | • Quick deployment
• Flexible solutions
• Industry readiness
• Easy customization | -## Guides and Walkthroughs -Refer to our documentation for production grade implementation details. - - -| Section | Links | -|----------------------|--------------------------------------------------------------------------------------------| -| Installation | [Installation](https://docs.swarms.world/en/latest/swarms/install/install/) | -| Quickstart | [Get Started](https://docs.swarms.world/en/latest/swarms/install/quickstart/) | -| Agent Internal Mechanisms | [Agent Architecture](https://docs.swarms.world/en/latest/swarms/framework/agents_explained/) | -| Agent API | [Agent API](https://docs.swarms.world/en/latest/swarms/structs/agent/) | -| Integrating External Agents Griptape, Autogen, etc | [Integrating External APIs](https://docs.swarms.world/en/latest/swarms/agents/external_party_agents/) | -| Creating Agents from YAML | [Creating Agents from YAML](https://docs.swarms.world/en/latest/swarms/agents/create_agents_yaml/) | -| Why You Need Swarms | [Why MultiAgent Collaboration is Necessary](https://docs.swarms.world/en/latest/swarms/concept/why/) | -| Swarm Architectures Analysis | [Swarm Architectures](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/) | -| Choosing the Right Swarm for Your Business Problem¶ | [CLICK HERE](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/) | -| AgentRearrange Docs| [CLICK HERE](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/) | - ## Install šŸ’» @@ -435,18 +418,43 @@ for message in conversation_history: print(f"[{message['agent_name']}]: {message['content']}") ``` ----------- +--- -## Onboarding Session +## Documentation -Get onboarded now with the creator and lead maintainer of Swarms, Kye Gomez, who will show you how to get started with the installation, usage examples, and starting to build your custom use case! [CLICK HERE](https://cal.com/swarms/swarms-onboarding-session) +Documentation is located here at: [docs.swarms.world](https://docs.swarms.world) ---- +----- -## Documentation -Documentation is located here at: [docs.swarms.world](https://docs.swarms.world) +## Guides and Walkthroughs + +Refer to our documentation for production grade implementation details. + + +| Section | Links | +|----------------------|--------------------------------------------------------------------------------------------| +| Installation | [Installation](https://docs.swarms.world/en/latest/swarms/install/install/) | +| Quickstart | [Get Started](https://docs.swarms.world/en/latest/swarms/install/quickstart/) | +| Agent Internal Mechanisms | [Agent Architecture](https://docs.swarms.world/en/latest/swarms/framework/agents_explained/) | +| Agent API | [Agent API](https://docs.swarms.world/en/latest/swarms/structs/agent/) | +| Integrating External Agents (Griptape, Autogen, etc.) | [Integrating External APIs](https://docs.swarms.world/en/latest/swarms/agents/external_party_agents/) | +| Creating Agents from YAML | [Creating Agents from YAML](https://docs.swarms.world/en/latest/swarms/agents/create_agents_yaml/) | +| Why You Need Swarms | [Why Multi-Agent Collaboration is Necessary](https://docs.swarms.world/en/latest/swarms/concept/why/) | +| Swarm Architectures Analysis | [Swarm Architectures](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/) | +| Choosing the Right Swarm for Your Business Problem | [CLICK HERE](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/) | +| AgentRearrange Docs | [CLICK HERE](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/) | + + + + +----- + +## Onboarding Session + +Get onboarded now with the creator and lead maintainer of Swarms, Kye Gomez, who will show you how to get started with the installation, usage examples, and starting to build your custom use case! [CLICK HERE](https://cal.com/swarms/swarms-onboarding-session) + ----- From b4841115df097b98c692f86c5e60aac25f7e6a38 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Mon, 30 Jun 2025 10:50:52 -0700 Subject: [PATCH 35/86] cleanup readme guides and walkthroughs --- README.md | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 789a9793..eed7963f 100644 --- a/README.md +++ b/README.md @@ -433,18 +433,22 @@ Documentation is located here at: [docs.swarms.world](https://docs.swarms.world) Refer to our documentation for production grade implementation details. -| Section | Links | -|----------------------|--------------------------------------------------------------------------------------------| -| Installation | [Installation](https://docs.swarms.world/en/latest/swarms/install/install/) | -| Quickstart | [Get Started](https://docs.swarms.world/en/latest/swarms/install/quickstart/) | -| Agent Internal Mechanisms | [Agent Architecture](https://docs.swarms.world/en/latest/swarms/framework/agents_explained/) | -| Agent API | [Agent API](https://docs.swarms.world/en/latest/swarms/structs/agent/) | -| Integrating External Agents (Griptape, Autogen, etc.) | [Integrating External APIs](https://docs.swarms.world/en/latest/swarms/agents/external_party_agents/) | -| Creating Agents from YAML | [Creating Agents from YAML](https://docs.swarms.world/en/latest/swarms/agents/create_agents_yaml/) | -| Why You Need Swarms | [Why Multi-Agent Collaboration is Necessary](https://docs.swarms.world/en/latest/swarms/concept/why/) | -| Swarm Architectures Analysis | [Swarm Architectures](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/) | -| Choosing the Right Swarm for Your Business Problem | [CLICK HERE](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/) | -| AgentRearrange Docs | [CLICK HERE](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/) | +## Guides and Walkthroughs + +Refer to our documentation for production grade implementation details. + +| Section | Description | Links | +|----------------------|--------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------| +| Installation | Complete setup guide for Swarms in your environment | [Installation](https://docs.swarms.world/en/latest/swarms/install/install/) | +| Quickstart | Get up and running with your first swarm in minutes | [Get Started](https://docs.swarms.world/en/latest/swarms/install/quickstart/) | +| Agent Internal Mechanisms | Deep dive into how agents work internally | [Agent Architecture](https://docs.swarms.world/en/latest/swarms/framework/agents_explained/) | +| Agent API | Complete reference for the Agent class and its methods | [Agent API](https://docs.swarms.world/en/latest/swarms/structs/agent/) | +| Integrating External Agents | Connect Swarms with other AI frameworks like Griptape and Autogen | [Integrating External APIs](https://docs.swarms.world/en/latest/swarms/agents/external_party_agents/) | +| Creating Agents from YAML | Define and configure agents using YAML configuration files | [Creating Agents from YAML](https://docs.swarms.world/en/latest/swarms/agents/create_agents_yaml/) | +| Why You Need Swarms | Understanding the benefits of multi-agent collaboration | [Why Multi-Agent Collaboration is Necessary](https://docs.swarms.world/en/latest/swarms/concept/why/) | +| Swarm Architectures Analysis | Comprehensive analysis of different swarm patterns and architectures | [Swarm Architectures](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/) | +| Choosing the Right Swarm | Guide to selecting the optimal swarm architecture for your specific business needs | [Business Problem Guide](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/) | +| AgentRearrange Docs | Documentation for dynamic agent rearrangement and workflow optimization | [AgentRearrange API](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/) | From 3cd490cd9e4ee45be52770a65744e1123c7e8678 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Mon, 30 Jun 2025 10:52:00 -0700 Subject: [PATCH 36/86] readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index eed7963f..0bfbeedc 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@

From 30951c41adcf6c87f166bc2ba2ff693f456ce738 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Mon, 30 Jun 2025 11:00:08 -0700 Subject: [PATCH 37/86] readme --- README.md | 47 +++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 41 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 0bfbeedc..0e9ab2a6 100644 --- a/README.md +++ b/README.md @@ -452,21 +452,54 @@ Refer to our documentation for production grade implementation details. +------ ------ -## Onboarding Session +## 🫶 Contribute to Swarms -Get onboarded now with the creator and lead maintainer of Swarms, Kye Gomez, who will show you how to get started with the installation, usage examples, and starting to build your custom use case! [CLICK HERE](https://cal.com/swarms/swarms-onboarding-session) +Swarms is built by the community, for the community. We believe that collaborative development is the key to pushing the boundaries of what's possible with multi-agent AI. Your contributions are not only welcome—they are essential to our mission. [Learn more about why you should contribute to swarms](https://docs.swarms.world/en/latest/contributors/main/) + +### Why Contribute? + +By joining us, you have the opportunity to: + +* šŸš€ **Work on the Frontier of agents:** Shape the future of autonomous agent technology and help build a production-grade, open-source framework. + +* šŸ¤ **Join a Vibrant Community:** Collaborate with a passionate and growing group of agent developers, researchers, and AI enthusiasts. + +* šŸ› ļø **Make a Tangible Impact:** Whether you're fixing a bug, adding a new feature, or improving documentation, your work will be used in real-world applications. + +* šŸ“š **Learn and Grow:** Gain hands-on experience with advanced AI concepts and strengthen your software engineering skills. + +Discover more about our mission and the benefits of becoming a contributor in our official [**Contributor's Guide**](https://docs.swarms.world/en/latest/contributors/main/). + +### How to Get Started + +We've made it easy to start contributing. Here's how you can help: +1. **Find an Issue to Tackle:** The best way to begin is by visiting our [**contributing project board**](https://github.com/users/kyegomez/projects/1). Look for issues tagged with `good first issue`—these are specifically selected for new contributors. + +2. **Report a Bug or Request a Feature:** Have a new idea or found something that isn't working right? We'd love to hear from you. Please [**file a Bug Report or Feature Request**](https://github.com/kyegomez/swarms/issues) on our GitHub Issues page. + +3. **Understand Our Workflow and Standards:** Before submitting your work, please review our complete [**Contribution Guidelines**](https://github.com/kyegomez/swarms/blob/master/CONTRIBUTING.md). To help maintain code quality, we also encourage you to read our guide on [**Code Cleanliness**](https://docs.swarms.world/en/latest/swarms/framework/code_cleanliness/). + +4. **Join the Discussion:** To participate in roadmap discussions and connect with other developers, join our community on [**Discord**](https://discord.gg/jM3Z6M9uMq). + + +### ✨ Our Valued Contributors + +Thank you for contributing to swarms. Your work is extremely appreciated and recognized. + + + + ----- -## 🫶 Contributions: +## Onboarding Session -The easiest way to contribute is to pick any issue with the `good first issue` tag šŸ’Ŗ. Read the Contributing guidelines [here](/CONTRIBUTING.md). Bug Report? [File here](https://github.com/swarms/gateway/issues) | Feature Request? [File here](https://github.com/swarms/gateway/issues) +Get onboarded now with the creator and lead maintainer of Swarms, Kye Gomez, who will show you how to get started with the installation, usage examples, and starting to build your custom use case! [CLICK HERE](https://cal.com/swarms/swarms-onboarding-session) -Swarms is an open-source project, and contributions are VERY welcome. If you want to contribute, you can create new features, fix bugs, or improve the infrastructure. Please refer to the [CONTRIBUTING.md](https://github.com/kyegomez/swarms/blob/master/CONTRIBUTING.md) and our [contributing board](https://github.com/users/kyegomez/projects/1) to participate in Roadmap discussions! ---- @@ -503,3 +536,5 @@ If you use **swarms** in your research, please cite the project by referencing t # License APACHE + + From a7f5a4a4cacd848ea6a820fdb11ec87aca15915b Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Mon, 30 Jun 2025 11:03:46 -0700 Subject: [PATCH 38/86] readme --- README.md | 31 +++++++++++++------------------ 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index 0e9ab2a6..7939eead 100644 --- a/README.md +++ b/README.md @@ -496,27 +496,22 @@ Thank you for contributing to swarms. Your work is extremely appreciated and rec ----- -## Onboarding Session +## Connect With Us -Get onboarded now with the creator and lead maintainer of Swarms, Kye Gomez, who will show you how to get started with the installation, usage examples, and starting to build your custom use case! [CLICK HERE](https://cal.com/swarms/swarms-onboarding-session) - - ----- - - -### Connect With Us - -| Platform | Link | Description | -|----------|------|-------------| -| šŸ“š Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides | -| šŸ“ Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles | -| šŸ’¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support | -| 🐦 Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements | -| šŸ‘„ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates | -| šŸ“ŗ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos | -| šŸŽ« Events | [Sign up here](https://lu.ma/5p2jnc2v) | Join our community events | +Join our community of agent engineers and researchers for technical support, cutting-edge updates, and exclusive access to world-class agent engineering insights! +| Platform | Description | Link | +|----------|-------------|------| +| šŸ“š Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) | +| šŸ“ Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) | +| šŸ’¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) | +| 🐦 Twitter | Latest news and announcements | [@kyegomez](https://twitter.com/kyegomez) | +| šŸ‘„ LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | +| šŸ“ŗ YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | +| šŸŽ« Events | Join our community events | [Sign up here](https://lu.ma/5p2jnc2v) | +| šŸš€ Onboarding Session | Get onboarded with Kye Gomez, creator and lead maintainer of Swarms | [Book Session](https://cal.com/swarms/swarms-onboarding-session) | +------ ## Citation From 09cab67a11e514b17e9a950f1bde9855ac920d85 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Mon, 30 Jun 2025 11:05:24 -0700 Subject: [PATCH 39/86] readme --- README.md | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 7939eead..9745b0de 100644 --- a/README.md +++ b/README.md @@ -425,17 +425,12 @@ for message in conversation_history: Documentation is located here at: [docs.swarms.world](https://docs.swarms.world) ------ - - -## Guides and Walkthroughs - -Refer to our documentation for production grade implementation details. +--- ## Guides and Walkthroughs -Refer to our documentation for production grade implementation details. +Here are quick reference guides on how to get started with swarms. | Section | Description | Links | |----------------------|--------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------| @@ -452,7 +447,7 @@ Refer to our documentation for production grade implementation details. ------- +--- ## 🫶 Contribute to Swarms From c297fa8749202f796c980e9505ab74596e5a2425 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Mon, 30 Jun 2025 11:09:52 -0700 Subject: [PATCH 40/86] readme --- README.md | 62 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/README.md b/README.md index 9745b0de..742ac1da 100644 --- a/README.md +++ b/README.md @@ -326,6 +326,9 @@ outputs = rearrange_system.run("Analyze the impact of AI on modern cinema.") print(outputs) ``` +---- + + + +---- + +### SwarmRouter: The Universal Swarm Orchestrator + +The `SwarmRouter` simplifies building complex workflows by providing a single interface to run any type of swarm. Instead of importing and managing different swarm classes, you can dynamically select the one you need just by changing the `swarm_type` parameter. [Read the full documentation](https://docs.swarms.world/en/latest/swarms/structs/swarm_router/) + +This makes your code cleaner and more flexible, allowing you to switch between different multi-agent strategies with ease. Here's a complete example that shows how to define agents and then use `SwarmRouter` to execute the same task using different collaborative strategies. + +```python +from swarms import Agent +from swarms.structs.swarm_router import SwarmRouter, SwarmType + +# Define a few generic agents +writer = Agent(agent_name="Writer", system_prompt="You are a creative writer.", model_name="gpt-4o-mini") +editor = Agent(agent_name="Editor", system_prompt="You are an expert editor for stories.", model_name="gpt-4o-mini") +reviewer = Agent(agent_name="Reviewer", system_prompt="You are a final reviewer who gives a score.", model_name="gpt-4o-mini") + +# The agents and task will be the same for all examples +agents = [writer, editor, reviewer] +task = "Write a short story about a robot who discovers music." + +# --- Example 1: SequentialWorkflow --- +# Agents run one after another in a chain: Writer -> Editor -> Reviewer. +print("Running a Sequential Workflow...") +sequential_router = SwarmRouter(swarm_type=SwarmType.SequentialWorkflow, agents=agents) +sequential_output = sequential_router.run(task) +print(f"Final Sequential Output:\n{sequential_output}\n") + +# --- Example 2: ConcurrentWorkflow --- +# All agents receive the same initial task and run at the same time. +print("Running a Concurrent Workflow...") +concurrent_router = SwarmRouter(swarm_type=SwarmType.ConcurrentWorkflow, agents=agents) +concurrent_outputs = concurrent_router.run(task) +# This returns a dictionary of each agent's output +for agent_name, output in concurrent_outputs.items(): + print(f"Output from {agent_name}:\n{output}\n") + +# --- Example 3: MixtureOfAgents --- +# All agents run in parallel, and a special 'aggregator' agent synthesizes their outputs. +print("Running a Mixture of Agents Workflow...") +aggregator = Agent( + agent_name="Aggregator", + system_prompt="Combine the story, edits, and review into a final document.", + model_name="gpt-4o-mini" +) +moa_router = SwarmRouter( + swarm_type=SwarmType.MixtureOfAgents, + agents=agents, + aggregator_agent=aggregator, # MoA requires an aggregator +) +aggregated_output = moa_router.run(task) +print(f"Final Aggregated Output:\n{aggregated_output}\n") ``` + +The `SwarmRouter` is a powerful tool for simplifying multi-agent orchestration. It provides a consistent and flexible way to deploy different collaborative strategies, allowing you to build more sophisticated applications with less code. + +------- + ### MixtureOfAgents (MoA) The `MixtureOfAgents` architecture processes tasks by feeding them to multiple "expert" agents in parallel. Their diverse outputs are then synthesized by an aggregator agent to produce a final, high-quality result. From 13aec2976ea6ffce1479117ffd217c807408b475 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Mon, 30 Jun 2025 11:10:23 -0700 Subject: [PATCH 41/86] readme --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 742ac1da..74095420 100644 --- a/README.md +++ b/README.md @@ -326,7 +326,6 @@ outputs = rearrange_system.run("Analyze the impact of AI on modern cinema.") print(outputs) ``` ----- + +---- + +### SwarmRouter: The Universal Swarm Orchestrator + +The `SwarmRouter` simplifies building complex workflows by providing a single interface to run any type of swarm. Instead of importing and managing different swarm classes, you can dynamically select the one you need just by changing the `swarm_type` parameter. [Read the full documentation](https://docs.swarms.world/en/latest/swarms/structs/swarm_router/) + +This makes your code cleaner and more flexible, allowing you to switch between different multi-agent strategies with ease. Here's a complete example that shows how to define agents and then use `SwarmRouter` to execute the same task using different collaborative strategies. + +```python +from swarms import Agent +from swarms.structs.swarm_router import SwarmRouter, SwarmType + +# Define a few generic agents +writer = Agent(agent_name="Writer", system_prompt="You are a creative writer.", model_name="gpt-4o-mini") +editor = Agent(agent_name="Editor", system_prompt="You are an expert editor for stories.", model_name="gpt-4o-mini") +reviewer = Agent(agent_name="Reviewer", system_prompt="You are a final reviewer who gives a score.", model_name="gpt-4o-mini") + +# The agents and task will be the same for all examples +agents = [writer, editor, reviewer] +task = "Write a short story about a robot who discovers music." + +# --- Example 1: SequentialWorkflow --- +# Agents run one after another in a chain: Writer -> Editor -> Reviewer. +print("Running a Sequential Workflow...") +sequential_router = SwarmRouter(swarm_type=SwarmType.SequentialWorkflow, agents=agents) +sequential_output = sequential_router.run(task) +print(f"Final Sequential Output:\n{sequential_output}\n") + +# --- Example 2: ConcurrentWorkflow --- +# All agents receive the same initial task and run at the same time. +print("Running a Concurrent Workflow...") +concurrent_router = SwarmRouter(swarm_type=SwarmType.ConcurrentWorkflow, agents=agents) +concurrent_outputs = concurrent_router.run(task) +# This returns a dictionary of each agent's output +for agent_name, output in concurrent_outputs.items(): + print(f"Output from {agent_name}:\n{output}\n") + +# --- Example 3: MixtureOfAgents --- +# All agents run in parallel, and a special 'aggregator' agent synthesizes their outputs. +print("Running a Mixture of Agents Workflow...") +aggregator = Agent( + agent_name="Aggregator", + system_prompt="Combine the story, edits, and review into a final document.", + model_name="gpt-4o-mini" +) +moa_router = SwarmRouter( + swarm_type=SwarmType.MixtureOfAgents, + agents=agents, + aggregator_agent=aggregator, # MoA requires an aggregator +) +aggregated_output = moa_router.run(task) +print(f"Final Aggregated Output:\n{aggregated_output}\n") +``` + + +The `SwarmRouter` is a powerful tool for simplifying multi-agent orchestration. It provides a consistent and flexible way to deploy different collaborative strategies, allowing you to build more sophisticated applications with less code. + +------- + +### MixtureOfAgents (MoA) + +The `MixtureOfAgents` architecture processes tasks by feeding them to multiple "expert" agents in parallel. Their diverse outputs are then synthesized by an aggregator agent to produce a final, high-quality result. [Learn more here](https://docs.swarms.world/en/latest/swarms/examples/moa_example/) + +```python +from swarms import Agent, MixtureOfAgents + +# Define expert agents +financial_analyst = Agent(agent_name="FinancialAnalyst", system_prompt="Analyze financial data.", model_name="gpt-4o-mini") +market_analyst = Agent(agent_name="MarketAnalyst", system_prompt="Analyze market trends.", model_name="gpt-4o-mini") +risk_analyst = Agent(agent_name="RiskAnalyst", system_prompt="Analyze investment risks.", model_name="gpt-4o-mini") + +# Define the aggregator agent +aggregator = Agent( + agent_name="InvestmentAdvisor", + system_prompt="Synthesize the financial, market, and risk analyses to provide a final investment recommendation.", + model_name="gpt-4o-mini" +) + +# Create the MoA swarm +moa_swarm = MixtureOfAgents( + agents=[financial_analyst, market_analyst, risk_analyst], + aggregator_agent=aggregator, +) + +# Run the swarm +recommendation = moa_swarm.run("Should we invest in NVIDIA stock right now?") +print(recommendation) +``` + +---- + +### GroupChat + +`GroupChat` creates a conversational environment where multiple agents can interact, discuss, and collaboratively solve a problem. You can define the speaking order or let it be determined dynamically. This architecture is ideal for tasks that benefit from debate and multi-perspective reasoning, such as contract negotiation, brainstorming, or complex decision-making. + +```python +from swarms import Agent, GroupChat + +# Define agents for a debate +tech_optimist = Agent(agent_name="TechOptimist", system_prompt="Argue for the benefits of AI in society.", model_name="gpt-4o-mini") +tech_critic = Agent(agent_name="TechCritic", system_prompt="Argue against the unchecked advancement of AI.", model_name="gpt-4o-mini") + +# Create the group chat +chat = GroupChat( + agents=[tech_optimist, tech_critic], + max_loops=4, # Limit the number of turns in the conversation +) + +# Run the chat with an initial topic +conversation_history = chat.run( + "Let's discuss the societal impact of artificial intelligence." +) + +# Print the full conversation +for message in conversation_history: + print(f"[{message['agent_name']}]: {message['content']}") +``` + + diff --git a/docs/swarms_cloud/quickstart.md b/docs/swarms_cloud/quickstart.md new file mode 100644 index 00000000..37a3a685 --- /dev/null +++ b/docs/swarms_cloud/quickstart.md @@ -0,0 +1,1165 @@ + +# Swarms Quickstart Guide + +This guide will help you get started with both single agent and multi-agent functionalities in Swarms API. + +## Prerequisites + +!!! info "Requirements" + + - Python 3.7+ + - API key from [Swarms Platform](https://swarms.world/platform/api-keys) + - `requests` library for Python + - `axios` for TypeScript/JavaScript + - `curl` for shell commands + +## Installation + +=== "pip" + + ```bash + pip install requests python-dotenv + ``` + +=== "npm" + + ```bash + npm install axios dotenv + ``` + +## Authentication + +!!! warning "API Key Security" + + Never hardcode your API key in your code. Always use environment variables or secure configuration management. + +The API is accessible through two base URLs: + +- Production: `https://api.swarms.world` +- Alternative: `https://swarms-api-285321057562.us-east1.run.app` + +## Single Agent Usage + +### Health Check + +=== "Python" + + ```python linenums="1" title="health_check.py" + import os + import requests + from dotenv import load_dotenv + + load_dotenv() + API_KEY = os.getenv("SWARMS_API_KEY") + BASE_URL = "https://api.swarms.world" + + headers = { + "x-api-key": API_KEY, + "Content-Type": "application/json" + } + + response = requests.get(f"{BASE_URL}/health", headers=headers) + print(response.json()) + ``` + +=== "cURL" + + ```bash title="health_check.sh" + curl -X GET "https://api.swarms.world/health" \ + -H "x-api-key: $SWARMS_API_KEY" \ + -H "Content-Type: application/json" + ``` + +=== "TypeScript" + + ```typescript linenums="1" title="health_check.ts" + import axios from 'axios'; + import * as dotenv from 'dotenv'; + + dotenv.config(); + const API_KEY = process.env.SWARMS_API_KEY; + const BASE_URL = 'https://api.swarms.world'; + + async function checkHealth() { + try { + const response = await axios.get(`${BASE_URL}/health`, { + headers: { + 'x-api-key': API_KEY, + 'Content-Type': 'application/json' + } + }); + console.log(response.data); + } catch (error) { + console.error('Error:', error); + } + } + + checkHealth(); + ``` + +### Basic Agent + +=== "Python" + + ```python linenums="1" title="single_agent.py" + import os + import requests + from dotenv import load_dotenv + + load_dotenv() + + API_KEY = os.getenv("SWARMS_API_KEY") # (1) + BASE_URL = "https://api.swarms.world" + + headers = { + "x-api-key": API_KEY, + "Content-Type": "application/json" + } + + def run_single_agent(): + """Run a single agent with the AgentCompletion format""" + payload = { + "agent_config": { + "agent_name": "Research Analyst", # (2) + "description": "An expert in analyzing and synthesizing research data", + "system_prompt": ( # (3) + "You are a Research Analyst with expertise in data analysis and synthesis. " + "Your role is to analyze provided information, identify key insights, " + "and present findings in a clear, structured format." + ), + "model_name": "claude-3-5-sonnet-20240620", # (4) + "role": "worker", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 1, + "auto_generate_prompt": False, + "tools_list_dictionary": None, + }, + "task": "What are the key trends in renewable energy adoption?", # (5) + } + + response = requests.post( + f"{BASE_URL}/v1/agent/completions", + headers=headers, + json=payload + ) + return response.json() + + # Run the agent + result = run_single_agent() + print(result) + ``` + + 1. Load API key from environment variables + 2. Give your agent a descriptive name + 3. Define the agent's capabilities and role + 4. Choose from available models + 5. Specify the task for the agent + +=== "cURL" + + ```bash title="single_agent.sh" + curl -X POST "https://api.swarms.world/v1/agent/completions" \ + -H "x-api-key: $SWARMS_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "agent_config": { + "agent_name": "Research Analyst", + "description": "An expert in analyzing and synthesizing research data", + "system_prompt": "You are a Research Analyst with expertise in data analysis and synthesis. Your role is to analyze provided information, identify key insights, and present findings in a clear, structured format.", + "model_name": "claude-3-5-sonnet-20240620", + "role": "worker", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 1, + "auto_generate_prompt": false, + "tools_list_dictionary": null + }, + "task": "What are the key trends in renewable energy adoption?" + }' + ``` + +=== "TypeScript" + + ```typescript linenums="1" title="single_agent.ts" + import axios from 'axios'; + import * as dotenv from 'dotenv'; + + dotenv.config(); + + const API_KEY = process.env.SWARMS_API_KEY; + const BASE_URL = 'https://api.swarms.world'; + + interface AgentConfig { + agent_name: string; + description: string; + system_prompt: string; + model_name: string; + role: string; + max_loops: number; + max_tokens: number; + temperature: number; + auto_generate_prompt: boolean; + tools_list_dictionary: null | object[]; + } + + interface AgentPayload { + agent_config: AgentConfig; + task: string; + } + + async function runSingleAgent() { + const payload: AgentPayload = { + agent_config: { + agent_name: "Research Analyst", + description: "An expert in analyzing and synthesizing research data", + system_prompt: "You are a Research Analyst with expertise in data analysis and synthesis.", + model_name: "claude-3-5-sonnet-20240620", + role: "worker", + max_loops: 1, + max_tokens: 8192, + temperature: 1, + auto_generate_prompt: false, + tools_list_dictionary: null + }, + task: "What are the key trends in renewable energy adoption?" + }; + + try { + const response = await axios.post( + `${BASE_URL}/v1/agent/completions`, + payload, + { + headers: { + 'x-api-key': API_KEY, + 'Content-Type': 'application/json' + } + } + ); + return response.data; + } catch (error) { + console.error('Error:', error); + throw error; + } + } + + // Run the agent + runSingleAgent() + .then(result => console.log(result)) + .catch(error => console.error(error)); + ``` + +### Agent with History + +=== "Python" + + ```python linenums="1" title="agent_with_history.py" + def run_agent_with_history(): + payload = { + "agent_config": { + "agent_name": "Conversation Agent", + "description": "An agent that maintains conversation context", + "system_prompt": "You are a helpful assistant that maintains context.", + "model_name": "claude-3-5-sonnet-20240620", + "role": "worker", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 0.7, + "auto_generate_prompt": False, + }, + "task": "What's the weather like?", + "history": [ # (1) + { + "role": "user", + "content": "I'm planning a trip to New York." + }, + { + "role": "assistant", + "content": "That's great! When are you planning to visit?" + }, + { + "role": "user", + "content": "Next week." + } + ] + } + + response = requests.post( + f"{BASE_URL}/v1/agent/completions", + headers=headers, + json=payload + ) + return response.json() + ``` + + 1. Include conversation history for context + +=== "cURL" + + ```bash title="agent_with_history.sh" + curl -X POST "https://api.swarms.world/v1/agent/completions" \ + -H "x-api-key: $SWARMS_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "agent_config": { + "agent_name": "Conversation Agent", + "description": "An agent that maintains conversation context", + "system_prompt": "You are a helpful assistant that maintains context.", + "model_name": "claude-3-5-sonnet-20240620", + "role": "worker", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 0.7, + "auto_generate_prompt": false + }, + "task": "What'\''s the weather like?", + "history": [ + { + "role": "user", + "content": "I'\''m planning a trip to New York." + }, + { + "role": "assistant", + "content": "That'\''s great! When are you planning to visit?" + }, + { + "role": "user", + "content": "Next week." + } + ] + }' + ``` + +=== "TypeScript" + + ```typescript linenums="1" title="agent_with_history.ts" + interface Message { + role: 'user' | 'assistant'; + content: string; + } + + interface AgentWithHistoryPayload extends AgentPayload { + history: Message[]; + } + + async function runAgentWithHistory() { + const payload: AgentWithHistoryPayload = { + agent_config: { + agent_name: "Conversation Agent", + description: "An agent that maintains conversation context", + system_prompt: "You are a helpful assistant that maintains context.", + model_name: "claude-3-5-sonnet-20240620", + role: "worker", + max_loops: 1, + max_tokens: 8192, + temperature: 0.7, + auto_generate_prompt: false, + tools_list_dictionary: null + }, + task: "What's the weather like?", + history: [ + { + role: "user", + content: "I'm planning a trip to New York." + }, + { + role: "assistant", + content: "That's great! When are you planning to visit?" + }, + { + role: "user", + content: "Next week." + } + ] + }; + + try { + const response = await axios.post( + `${BASE_URL}/v1/agent/completions`, + payload, + { + headers: { + 'x-api-key': API_KEY, + 'Content-Type': 'application/json' + } + } + ); + return response.data; + } catch (error) { + console.error('Error:', error); + throw error; + } + } + ``` + +## Multi-Agent Swarms + +!!! tip "Swarm Types" + + Swarms API supports two types of agent workflows: + + 1. `SequentialWorkflow`: Agents work in sequence, each building on previous output + 2. `ConcurrentWorkflow`: Agents work in parallel on the same task + +### Sequential Workflow + +=== "Python" + + ```python linenums="1" title="sequential_swarm.py" + def run_sequential_swarm(): + payload = { + "name": "Financial Analysis Swarm", + "description": "Market analysis swarm", + "agents": [ + { + "agent_name": "Market Analyst", # (1) + "description": "Analyzes market trends", + "system_prompt": "You are a financial analyst expert.", + "model_name": "gpt-4o", + "role": "worker", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 0.5, + "auto_generate_prompt": False + }, + { + "agent_name": "Economic Forecaster", # (2) + "description": "Predicts economic trends", + "system_prompt": "You are an expert in economic forecasting.", + "model_name": "gpt-4o", + "role": "worker", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 0.5, + "auto_generate_prompt": False + } + ], + "max_loops": 1, + "swarm_type": "SequentialWorkflow", # (3) + "task": "Analyze the current market conditions and provide economic forecasts." + } + + response = requests.post( + f"{BASE_URL}/v1/swarm/completions", + headers=headers, + json=payload + ) + return response.json() + ``` + + 1. First agent analyzes market trends + 2. Second agent builds on first agent's analysis + 3. Sequential workflow ensures ordered execution + +=== "cURL" + + ```bash title="sequential_swarm.sh" + curl -X POST "https://api.swarms.world/v1/swarm/completions" \ + -H "x-api-key: $SWARMS_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Financial Analysis Swarm", + "description": "Market analysis swarm", + "agents": [ + { + "agent_name": "Market Analyst", + "description": "Analyzes market trends", + "system_prompt": "You are a financial analyst expert.", + "model_name": "gpt-4o", + "role": "worker", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 0.5, + "auto_generate_prompt": false + }, + { + "agent_name": "Economic Forecaster", + "description": "Predicts economic trends", + "system_prompt": "You are an expert in economic forecasting.", + "model_name": "gpt-4o", + "role": "worker", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 0.5, + "auto_generate_prompt": false + } + ], + "max_loops": 1, + "swarm_type": "SequentialWorkflow", + "task": "Analyze the current market conditions and provide economic forecasts." + }' + ``` + +=== "TypeScript" + + ```typescript linenums="1" title="sequential_swarm.ts" + interface SwarmAgent { + agent_name: string; + description: string; + system_prompt: string; + model_name: string; + role: string; + max_loops: number; + max_tokens: number; + temperature: number; + auto_generate_prompt: boolean; + } + + interface SwarmPayload { + name: string; + description: string; + agents: SwarmAgent[]; + max_loops: number; + swarm_type: 'SequentialWorkflow' | 'ConcurrentWorkflow'; + task: string; + } + + async function runSequentialSwarm() { + const payload: SwarmPayload = { + name: "Financial Analysis Swarm", + description: "Market analysis swarm", + agents: [ + { + agent_name: "Market Analyst", + description: "Analyzes market trends", + system_prompt: "You are a financial analyst expert.", + model_name: "gpt-4o", + role: "worker", + max_loops: 1, + max_tokens: 8192, + temperature: 0.5, + auto_generate_prompt: false + }, + { + agent_name: "Economic Forecaster", + description: "Predicts economic trends", + system_prompt: "You are an expert in economic forecasting.", + model_name: "gpt-4o", + role: "worker", + max_loops: 1, + max_tokens: 8192, + temperature: 0.5, + auto_generate_prompt: false + } + ], + max_loops: 1, + swarm_type: "SequentialWorkflow", + task: "Analyze the current market conditions and provide economic forecasts." + }; + + try { + const response = await axios.post( + `${BASE_URL}/v1/swarm/completions`, + payload, + { + headers: { + 'x-api-key': API_KEY, + 'Content-Type': 'application/json' + } + } + ); + return response.data; + } catch (error) { + console.error('Error:', error); + throw error; + } + } + ``` + +### Concurrent Workflow + +=== "Python" + + ```python linenums="1" title="concurrent_swarm.py" + def run_concurrent_swarm(): + payload = { + "name": "Medical Analysis Swarm", + "description": "Analyzes medical data concurrently", + "agents": [ + { + "agent_name": "Lab Data Analyzer", # (1) + "description": "Analyzes lab report data", + "system_prompt": "You are a medical data analyst specializing in lab results.", + "model_name": "claude-3-5-sonnet-20240620", + "role": "worker", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 0.5, + "auto_generate_prompt": False + }, + { + "agent_name": "Clinical Specialist", # (2) + "description": "Provides clinical interpretations", + "system_prompt": "You are an expert in clinical diagnosis.", + "model_name": "claude-3-5-sonnet-20240620", + "role": "worker", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 0.5, + "auto_generate_prompt": False + } + ], + "max_loops": 1, + "swarm_type": "ConcurrentWorkflow", # (3) + "task": "Analyze these lab results and provide clinical interpretations." + } + + response = requests.post( + f"{BASE_URL}/v1/swarm/completions", + headers=headers, + json=payload + ) + return response.json() + ``` + + 1. First agent processes lab data + 2. Second agent works simultaneously + 3. Concurrent workflow for parallel processing + +=== "cURL" + + ```bash title="concurrent_swarm.sh" + curl -X POST "https://api.swarms.world/v1/swarm/completions" \ + -H "x-api-key: $SWARMS_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Medical Analysis Swarm", + "description": "Analyzes medical data concurrently", + "agents": [ + { + "agent_name": "Lab Data Analyzer", + "description": "Analyzes lab report data", + "system_prompt": "You are a medical data analyst specializing in lab results.", + "model_name": "claude-3-5-sonnet-20240620", + "role": "worker", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 0.5, + "auto_generate_prompt": false + }, + { + "agent_name": "Clinical Specialist", + "description": "Provides clinical interpretations", + "system_prompt": "You are an expert in clinical diagnosis.", + "model_name": "claude-3-5-sonnet-20240620", + "role": "worker", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 0.5, + "auto_generate_prompt": false + } + ], + "max_loops": 1, + "swarm_type": "ConcurrentWorkflow", + "task": "Analyze these lab results and provide clinical interpretations." + }' + ``` + +=== "TypeScript" + + ```typescript linenums="1" title="concurrent_swarm.ts" + async function runConcurrentSwarm() { + const payload: SwarmPayload = { + name: "Medical Analysis Swarm", + description: "Analyzes medical data concurrently", + agents: [ + { + agent_name: "Lab Data Analyzer", + description: "Analyzes lab report data", + system_prompt: "You are a medical data analyst specializing in lab results.", + model_name: "claude-3-5-sonnet-20240620", + role: "worker", + max_loops: 1, + max_tokens: 8192, + temperature: 0.5, + auto_generate_prompt: false + }, + { + agent_name: "Clinical Specialist", + description: "Provides clinical interpretations", + system_prompt: "You are an expert in clinical diagnosis.", + model_name: "claude-3-5-sonnet-20240620", + role: "worker", + max_loops: 1, + max_tokens: 8192, + temperature: 0.5, + auto_generate_prompt: false + } + ], + max_loops: 1, + swarm_type: "ConcurrentWorkflow", + task: "Analyze these lab results and provide clinical interpretations." + }; + + try { + const response = await axios.post( + `${BASE_URL}/v1/swarm/completions`, + payload, + { + headers: { + 'x-api-key': API_KEY, + 'Content-Type': 'application/json' + } + } + ); + return response.data; + } catch (error) { + console.error('Error:', error); + throw error; + } + } + ``` + +### Batch Processing + +!!! example "Batch Processing" + + Process multiple swarms in a single request for improved efficiency. + +=== "Python" + + ```python linenums="1" title="batch_swarms.py" + def run_batch_swarms(): + payload = [ + { + "name": "Batch Swarm 1", + "description": "First swarm in batch", + "agents": [ + { + "agent_name": "Research Agent", + "description": "Conducts research", + "system_prompt": "You are a research assistant.", + "model_name": "gpt-4", + "role": "worker", + "max_loops": 1 + }, + { + "agent_name": "Analysis Agent", + "description": "Analyzes data", + "system_prompt": "You are a data analyst.", + "model_name": "gpt-4", + "role": "worker", + "max_loops": 1 + } + ], + "max_loops": 1, + "swarm_type": "SequentialWorkflow", + "task": "Research AI advancements." + } + ] + + response = requests.post( + f"{BASE_URL}/v1/swarm/batch/completions", + headers=headers, + json=payload + ) + return response.json() + ``` + +=== "cURL" + + ```bash title="batch_swarms.sh" + curl -X POST "https://api.swarms.world/v1/swarm/batch/completions" \ + -H "x-api-key: $SWARMS_API_KEY" \ + -H "Content-Type: application/json" \ + -d '[ + { + "name": "Batch Swarm 1", + "description": "First swarm in batch", + "agents": [ + { + "agent_name": "Research Agent", + "description": "Conducts research", + "system_prompt": "You are a research assistant.", + "model_name": "gpt-4", + "role": "worker", + "max_loops": 1 + }, + { + "agent_name": "Analysis Agent", + "description": "Analyzes data", + "system_prompt": "You are a data analyst.", + "model_name": "gpt-4", + "role": "worker", + "max_loops": 1 + } + ], + "max_loops": 1, + "swarm_type": "SequentialWorkflow", + "task": "Research AI advancements." + } + ]' + ``` + +=== "TypeScript" + + ```typescript linenums="1" title="batch_swarms.ts" + async function runBatchSwarms() { + const payload: SwarmPayload[] = [ + { + name: "Batch Swarm 1", + description: "First swarm in batch", + agents: [ + { + agent_name: "Research Agent", + description: "Conducts research", + system_prompt: "You are a research assistant.", + model_name: "gpt-4", + role: "worker", + max_loops: 1, + max_tokens: 8192, + temperature: 0.7, + auto_generate_prompt: false + }, + { + agent_name: "Analysis Agent", + description: "Analyzes data", + system_prompt: "You are a data analyst.", + model_name: "gpt-4", + role: "worker", + max_loops: 1, + max_tokens: 8192, + temperature: 0.7, + auto_generate_prompt: false + } + ], + max_loops: 1, + swarm_type: "SequentialWorkflow", + task: "Research AI advancements." + } + ]; + + try { + const response = await axios.post( + `${BASE_URL}/v1/swarm/batch/completions`, + payload, + { + headers: { + 'x-api-key': API_KEY, + 'Content-Type': 'application/json' + } + } + ); + return response.data; + } catch (error) { + console.error('Error:', error); + throw error; + } + } + ``` + +## Advanced Features + +### Tools Integration + +!!! note "Tools" + + Enhance agent capabilities by providing them with specialized tools. + +=== "Python" + + ```python linenums="1" title="tools_example.py" + def run_agent_with_tools(): + tools_dictionary = [ + { + "type": "function", + "function": { + "name": "search_topic", + "description": "Conduct an in-depth search on a topic", + "parameters": { + "type": "object", + "properties": { + "depth": { + "type": "integer", + "description": "Search depth (1-3)" + }, + "detailed_queries": { + "type": "array", + "description": "Specific search queries", + "items": { + "type": "string" + } + } + }, + "required": ["depth", "detailed_queries"] + } + } + } + ] + + payload = { + "agent_config": { + "agent_name": "Research Assistant", + "description": "Expert in research with search capabilities", + "system_prompt": "You are a research assistant with search capabilities.", + "model_name": "gpt-4", + "role": "worker", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 0.7, + "auto_generate_prompt": False, + "tools_dictionary": tools_dictionary + }, + "task": "Research the latest developments in quantum computing." + } + + response = requests.post( + f"{BASE_URL}/v1/agent/completions", + headers=headers, + json=payload + ) + return response.json() + ``` + +=== "cURL" + + ```bash title="tools_example.sh" + curl -X POST "https://api.swarms.world/v1/agent/completions" \ + -H "x-api-key: $SWARMS_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "agent_config": { + "agent_name": "Research Assistant", + "description": "Expert in research with search capabilities", + "system_prompt": "You are a research assistant with search capabilities.", + "model_name": "gpt-4", + "role": "worker", + "max_loops": 1, + "max_tokens": 8192, + "temperature": 0.7, + "auto_generate_prompt": false, + "tools_dictionary": [ + { + "type": "function", + "function": { + "name": "search_topic", + "description": "Conduct an in-depth search on a topic", + "parameters": { + "type": "object", + "properties": { + "depth": { + "type": "integer", + "description": "Search depth (1-3)" + }, + "detailed_queries": { + "type": "array", + "description": "Specific search queries", + "items": { + "type": "string" + } + } + }, + "required": ["depth", "detailed_queries"] + } + } + } + ] + }, + "task": "Research the latest developments in quantum computing." + }' + ``` + +=== "TypeScript" + + ```typescript linenums="1" title="tools_example.ts" + interface ToolFunction { + name: string; + description: string; + parameters: { + type: string; + properties: { + [key: string]: { + type: string; + description: string; + items?: { + type: string; + }; + }; + }; + required: string[]; + }; + } + + interface Tool { + type: string; + function: ToolFunction; + } + + interface AgentWithToolsConfig extends AgentConfig { + tools_dictionary: Tool[]; + } + + interface AgentWithToolsPayload { + agent_config: AgentWithToolsConfig; + task: string; + } + + async function runAgentWithTools() { + const toolsDictionary: Tool[] = [ + { + type: "function", + function: { + name: "search_topic", + description: "Conduct an in-depth search on a topic", + parameters: { + type: "object", + properties: { + depth: { + type: "integer", + description: "Search depth (1-3)" + }, + detailed_queries: { + type: "array", + description: "Specific search queries", + items: { + type: "string" + } + } + }, + required: ["depth", "detailed_queries"] + } + } + } + ]; + + const payload: AgentWithToolsPayload = { + agent_config: { + agent_name: "Research Assistant", + description: "Expert in research with search capabilities", + system_prompt: "You are a research assistant with search capabilities.", + model_name: "gpt-4", + role: "worker", + max_loops: 1, + max_tokens: 8192, + temperature: 0.7, + auto_generate_prompt: false, + tools_dictionary: toolsDictionary + }, + task: "Research the latest developments in quantum computing." + }; + + try { + const response = await axios.post( + `${BASE_URL}/v1/agent/completions`, + payload, + { + headers: { + 'x-api-key': API_KEY, + 'Content-Type': 'application/json' + } + } + ); + return response.data; + } catch (error) { + console.error('Error:', error); + throw error; + } + } + ``` + +### Available Models + +!!! info "Supported Models" + + Choose the right model for your use case: + + === "OpenAI" + - `gpt-4` + - `gpt-4o` + - `gpt-4o-mini` + + === "Anthropic" + - `claude-3-5-sonnet-20240620` + - `claude-3-7-sonnet-latest` + + === "Groq" + - `groq/llama3-70b-8192` + - `groq/deepseek-r1-distill-llama-70b` + +## Best Practices + +!!! danger "Security" + Never commit API keys or sensitive credentials to version control. + +!!! warning "Rate Limits" + Implement proper rate limiting and error handling in production. + +!!! tip "Testing" + Start with simple tasks and gradually increase complexity. + +=== "Python" + + ```python linenums="1" title="best_practices.py" + # Error Handling + try: + response = requests.post(url, headers=headers, json=payload) + response.raise_for_status() + except requests.exceptions.RequestException as e: + print(f"Error: {e}") + + # Rate Limiting + import time + from tenacity import retry, wait_exponential + + @retry(wait=wait_exponential(multiplier=1, min=4, max=10)) + def make_api_call(): + response = requests.post(url, headers=headers, json=payload) + response.raise_for_status() + return response + + # Input Validation + def validate_payload(payload): + required_fields = ["agent_config", "task"] + if not all(field in payload for field in required_fields): + raise ValueError("Missing required fields") + ``` + +=== "TypeScript" + + ```typescript linenums="1" title="best_practices.ts" + // Error Handling + try { + const response = await axios.post(url, payload, { headers }); + } catch (error) { + if (axios.isAxiosError(error)) { + console.error('API Error:', error.response?.data); + } + throw error; + } + + // Rate Limiting + import { rateLimit } from 'axios-rate-limit'; + + const http = rateLimit(axios.create(), { + maxRequests: 2, + perMilliseconds: 1000 + }); + + // Input Validation + function validatePayload(payload: unknown): asserts payload is AgentPayload { + if (!payload || typeof payload !== 'object') { + throw new Error('Invalid payload'); + } + + const { agent_config, task } = payload as Partial; + + if (!agent_config || !task) { + throw new Error('Missing required fields'); + } + } + ``` + +## Connect With Us + +Join our community of agent engineers and researchers for technical support, cutting-edge updates, and exclusive access to world-class agent engineering insights! + +| Platform | Description | Link | +|----------|-------------|------| +| šŸ“š Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) | +| šŸ“ Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) | +| šŸ’¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) | +| 🐦 Twitter | Latest news and announcements | [@kyegomez](https://twitter.com/kyegomez) | +| šŸ‘„ LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | +| šŸ“ŗ YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | +| šŸŽ« Events | Join our community events | [Sign up here](https://lu.ma/5p2jnc2v) | +| šŸš€ Onboarding Session | Get onboarded with Kye Gomez, creator and lead maintainer of Swarms | [Book Session](https://cal.com/swarms/swarms-onboarding-session) | \ No newline at end of file diff --git a/random_dynamic_speaker_example.py b/random_dynamic_speaker_example.py index 021c536f..56c62049 100644 --- a/random_dynamic_speaker_example.py +++ b/random_dynamic_speaker_example.py @@ -155,7 +155,7 @@ def example_round_robin_panel(): @cardiologist @oncologist @endocrinologist @infectious_disease please provide your assessment and treatment recommendations for this complex case.""" - response = group_chat.run(case1) + group_chat.run(case1) if __name__ == "__main__": diff --git a/swarms/agents/index.md b/swarms/agents/index.md new file mode 100644 index 00000000..4b632f1b --- /dev/null +++ b/swarms/agents/index.md @@ -0,0 +1,884 @@ +# Agents Introduction + +The Agent class is the core component of the Swarms framework, designed to create intelligent, autonomous AI agents capable of handling complex tasks through multi-modal processing, tool integration, and structured outputs. This comprehensive guide covers all aspects of the Agent class, from basic setup to advanced features. + +## Table of Contents + +1. [Prerequisites & Installation](#prerequisites--installation) +2. [Basic Agent Configuration](#basic-agent-configuration) +3. [Multi-Modal Capabilities](#multi-modal-capabilities) +4. [Tool Integration](#tool-integration) +5. [Structured Outputs](#structured-outputs) +6. [Advanced Features](#advanced-features) +7. [Best Practices](#best-practices) +8. [Complete Examples](#complete-examples) + +## Prerequisites & Installation + +### System Requirements + +- Python 3.7+ + +- OpenAI API key (for GPT models) + +- Anthropic API key (for Claude models) + +### Installation + +```bash +pip3 install -U swarms +``` + +### Environment Setup + +Create a `.env` file with your API keys: + +```bash +OPENAI_API_KEY="your-openai-api-key" +ANTHROPIC_API_KEY="your-anthropic-api-key" +WORKSPACE_DIR="agent_workspace" +``` + +## Basic Agent Configuration + +### Core Agent Structure + +The Agent class provides a comprehensive set of parameters for customization: + +```python +from swarms import Agent + +# Basic agent initialization +agent = Agent( + agent_name="MyAgent", + agent_description="A specialized AI agent for specific tasks", + system_prompt="You are a helpful assistant...", + model_name="gpt-4o-mini", + max_loops=1, + max_tokens=4096, + temperature=0.7, + output_type="str", + safety_prompt_on=True +) +``` + +### Key Configuration Parameters + +| Parameter | Type | Description | Default | +|-----------|------|-------------|---------| +| `agent_name` | str | Unique identifier for the agent | Required | +| `agent_description` | str | Detailed description of capabilities | Required | +| `system_prompt` | str | Core instructions defining behavior | Required | +| `model_name` | str | AI model to use | "gpt-4o-mini" | +| `max_loops` | int | Maximum execution loops | 1 | +| `max_tokens` | int | Maximum response tokens | 4096 | +| `temperature` | float | Response creativity (0-1) | 0.7 | +| `output_type` | str | Response format type | "str" | +| `multi_modal` | bool | Enable image processing | False | +| `safety_prompt_on` | bool | Enable safety checks | True | + +### Simple Example + +```python +from swarms import Agent + +# Create a basic financial advisor agent +financial_agent = Agent( + agent_name="Financial-Advisor", + agent_description="Personal finance and investment advisor", + system_prompt="""You are an expert financial advisor with deep knowledge of: + - Investment strategies and portfolio management + - Risk assessment and mitigation + - Market analysis and trends + - Financial planning and budgeting + + Provide clear, actionable advice while considering risk tolerance.""", + model_name="gpt-4o-mini", + max_loops=1, + temperature=0.3, + output_type="str" +) + +# Run the agent +response = financial_agent.run("What are the best investment strategies for a 30-year-old?") +print(response) +``` + +## Multi-Modal Capabilities + +### Image Processing + +The Agent class supports comprehensive image analysis through vision-enabled models: + +```python +from swarms import Agent + +# Create a vision-enabled agent +vision_agent = Agent( + agent_name="Vision-Analyst", + agent_description="Advanced image analysis and quality control agent", + system_prompt="""You are an expert image analyst capable of: + - Detailed visual inspection and quality assessment + - Object detection and classification + - Scene understanding and context analysis + - Defect identification and reporting + + Provide comprehensive analysis with specific observations.""", + model_name="gpt-4o-mini", # Vision-enabled model + multi_modal=True, # Enable multi-modal processing + max_loops=1, + output_type="str" +) + +# Analyze a single image +response = vision_agent.run( + task="Analyze this image for quality control purposes", + img="path/to/image.jpg" +) + +# Process multiple images +response = vision_agent.run( + task="Compare these images and identify differences", + imgs=["image1.jpg", "image2.jpg", "image3.jpg"], + summarize_multiple_images=True +) +``` + +### Supported Image Formats + +| Format | Description | Max Size | +|--------|-------------|----------| +| JPEG/JPG | Standard compressed format | 20MB | +| PNG | Lossless with transparency | 20MB | +| GIF | Animated (first frame only) | 20MB | +| WebP | Modern efficient format | 20MB | + +### Quality Control Example + +```python +from swarms import Agent +from swarms.prompts.logistics import Quality_Control_Agent_Prompt + +def security_analysis(danger_level: str) -> str: + """Analyze security danger level and return appropriate response.""" + danger_responses = { + "low": "No immediate danger detected", + "medium": "Moderate security concern identified", + "high": "Critical security threat detected", + None: "No danger level assessment available" + } + return danger_responses.get(danger_level, "Unknown danger level") + +# Quality control agent with tool integration +quality_agent = Agent( + agent_name="Quality-Control-Agent", + agent_description="Advanced quality control and security analysis agent", + system_prompt=f""" + {Quality_Control_Agent_Prompt} + + You have access to security analysis tools. When analyzing images: + 1. Identify potential safety hazards + 2. Assess quality standards compliance + 3. Determine appropriate danger levels (low, medium, high) + 4. Use the security_analysis function for threat assessment + """, + model_name="gpt-4o-mini", + multi_modal=True, + max_loops=1, + tools=[security_analysis] +) + +# Analyze factory image +response = quality_agent.run( + task="Analyze this factory image for safety and quality issues", + img="factory_floor.jpg" +) +``` + +## Tool Integration + +### Creating Custom Tools + +Tools are Python functions that extend your agent's capabilities: + +```python +import json +import requests +from typing import Optional, Dict, Any + +def get_weather_data(city: str, country: Optional[str] = None) -> str: + """ + Get current weather data for a specified city. + + Args: + city (str): The city name + country (Optional[str]): Country code (e.g., 'US', 'UK') + + Returns: + str: JSON formatted weather data + + Example: + >>> weather = get_weather_data("San Francisco", "US") + >>> print(weather) + {"temperature": 18, "condition": "partly cloudy", ...} + """ + try: + # API call logic here + weather_data = { + "city": city, + "country": country, + "temperature": 18, + "condition": "partly cloudy", + "humidity": 65, + "wind_speed": 12 + } + return json.dumps(weather_data, indent=2) + + except Exception as e: + return json.dumps({"error": f"Weather API error: {str(e)}"}) + +def calculate_portfolio_metrics(prices: list, weights: list) -> str: + """ + Calculate portfolio performance metrics. + + Args: + prices (list): List of asset prices + weights (list): List of portfolio weights + + Returns: + str: JSON formatted portfolio metrics + """ + try: + # Portfolio calculation logic + portfolio_value = sum(p * w for p, w in zip(prices, weights)) + metrics = { + "total_value": portfolio_value, + "weighted_average": portfolio_value / sum(weights), + "asset_count": len(prices) + } + return json.dumps(metrics, indent=2) + + except Exception as e: + return json.dumps({"error": f"Calculation error: {str(e)}"}) +``` + +### Tool Integration Example + +```python +from swarms import Agent + +# Create agent with custom tools +multi_tool_agent = Agent( + agent_name="Multi-Tool-Assistant", + agent_description="Versatile assistant with weather and financial tools", + system_prompt="""You are a versatile assistant with access to: + - Weather data retrieval for any city + - Portfolio analysis and financial calculations + + Use these tools to provide comprehensive assistance.""", + model_name="gpt-4o-mini", + max_loops=1, + tools=[get_weather_data, calculate_portfolio_metrics] +) + +# Use the agent with tools +response = multi_tool_agent.run( + "What's the weather in New York and calculate metrics for a portfolio with prices [100, 150, 200] and weights [0.3, 0.4, 0.3]?" +) +``` + +### API Integration Tools + +```python +import requests +import json +from typing import List + +def get_cryptocurrency_price(coin_id: str, vs_currency: str = "usd") -> str: + """Get current cryptocurrency price from CoinGecko API.""" + try: + url = "https://api.coingecko.com/api/v3/simple/price" + params = { + "ids": coin_id, + "vs_currencies": vs_currency, + "include_market_cap": True, + "include_24hr_vol": True, + "include_24hr_change": True + } + + response = requests.get(url, params=params, timeout=10) + response.raise_for_status() + return json.dumps(response.json(), indent=2) + + except Exception as e: + return json.dumps({"error": f"API error: {str(e)}"}) + +def get_top_cryptocurrencies(limit: int = 10) -> str: + """Get top cryptocurrencies by market cap.""" + try: + url = "https://api.coingecko.com/api/v3/coins/markets" + params = { + "vs_currency": "usd", + "order": "market_cap_desc", + "per_page": limit, + "page": 1 + } + + response = requests.get(url, params=params, timeout=10) + response.raise_for_status() + return json.dumps(response.json(), indent=2) + + except Exception as e: + return json.dumps({"error": f"API error: {str(e)}"}) + +# Crypto analysis agent +crypto_agent = Agent( + agent_name="Crypto-Analysis-Agent", + agent_description="Cryptocurrency market analysis and price tracking agent", + system_prompt="""You are a cryptocurrency analysis expert with access to: + - Real-time price data for any cryptocurrency + - Market capitalization rankings + - Trading volume and price change data + + Provide insightful market analysis and investment guidance.""", + model_name="gpt-4o-mini", + max_loops=1, + tools=[get_cryptocurrency_price, get_top_cryptocurrencies] +) + +# Analyze crypto market +response = crypto_agent.run("Analyze the current Bitcoin price and show me the top 5 cryptocurrencies") +``` + +## Structured Outputs + +### Function Schema Definition + +Define structured outputs using OpenAI's function calling format: + +```python +from swarms import Agent + +# Define function schemas for structured outputs +stock_analysis_schema = { + "type": "function", + "function": { + "name": "analyze_stock_performance", + "description": "Analyze stock performance with detailed metrics", + "parameters": { + "type": "object", + "properties": { + "ticker": { + "type": "string", + "description": "Stock ticker symbol (e.g., AAPL, GOOGL)" + }, + "analysis_type": { + "type": "string", + "enum": ["technical", "fundamental", "comprehensive"], + "description": "Type of analysis to perform" + }, + "time_period": { + "type": "string", + "enum": ["1d", "1w", "1m", "3m", "1y"], + "description": "Time period for analysis" + }, + "metrics": { + "type": "array", + "items": { + "type": "string", + "enum": ["price", "volume", "pe_ratio", "market_cap", "volatility"] + }, + "description": "Metrics to include in analysis" + } + }, + "required": ["ticker", "analysis_type"] + } + } +} + +portfolio_optimization_schema = { + "type": "function", + "function": { + "name": "optimize_portfolio", + "description": "Optimize portfolio allocation based on risk and return", + "parameters": { + "type": "object", + "properties": { + "assets": { + "type": "array", + "items": { + "type": "object", + "properties": { + "symbol": {"type": "string"}, + "current_weight": {"type": "number"}, + "expected_return": {"type": "number"}, + "risk_level": {"type": "string", "enum": ["low", "medium", "high"]} + }, + "required": ["symbol", "current_weight"] + } + }, + "risk_tolerance": { + "type": "string", + "enum": ["conservative", "moderate", "aggressive"] + }, + "investment_horizon": { + "type": "integer", + "minimum": 1, + "maximum": 30, + "description": "Investment time horizon in years" + } + }, + "required": ["assets", "risk_tolerance"] + } + } +} + +# Create agent with structured outputs +structured_agent = Agent( + agent_name="Structured-Financial-Agent", + agent_description="Financial analysis agent with structured output capabilities", + system_prompt="""You are a financial analysis expert that provides structured outputs. + Use the provided function schemas to format your responses consistently.""", + model_name="gpt-4o-mini", + max_loops=1, + tools_list_dictionary=[stock_analysis_schema, portfolio_optimization_schema] +) + +# Generate structured analysis +response = structured_agent.run( + "Analyze Apple stock (AAPL) performance with comprehensive analysis for the last 3 months" +) +``` + +## Advanced Features + +### Dynamic Temperature Control + +```python +from swarms import Agent + +# Agent with dynamic temperature adjustment +adaptive_agent = Agent( + agent_name="Adaptive-Response-Agent", + agent_description="Agent that adjusts response creativity based on context", + system_prompt="You are an adaptive AI that adjusts your response style based on the task complexity.", + model_name="gpt-4o-mini", + dynamic_temperature_enabled=True, # Enable adaptive temperature + max_loops=1, + output_type="str" +) +``` + +### Output Type Configurations + +```python +# Different output type examples +json_agent = Agent( + agent_name="JSON-Agent", + system_prompt="Always respond in valid JSON format", + output_type="json" +) + +streaming_agent = Agent( + agent_name="Streaming-Agent", + system_prompt="Provide detailed streaming responses", + output_type="str-all-except-first" +) + +final_only_agent = Agent( + agent_name="Final-Only-Agent", + system_prompt="Provide only the final result", + output_type="final" +) +``` + +### Safety and Content Filtering + +```python +from swarms import Agent + +# Agent with enhanced safety features +safe_agent = Agent( + agent_name="Safe-Agent", + agent_description="Agent with comprehensive safety measures", + system_prompt="You are a helpful, harmless, and honest AI assistant.", + model_name="gpt-4o-mini", + safety_prompt_on=True, # Enable safety prompts + max_loops=1, + temperature=0.3 # Lower temperature for more consistent, safe responses +) +``` + +## Best Practices + +### Error Handling and Robustness + +```python +import logging +from swarms import Agent + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +def robust_agent_execution(agent, task, max_retries=3): + """Execute agent with retry logic and error handling.""" + for attempt in range(max_retries): + try: + response = agent.run(task) + logger.info(f"Agent execution successful on attempt {attempt + 1}") + return response + except Exception as e: + logger.error(f"Attempt {attempt + 1} failed: {str(e)}") + if attempt == max_retries - 1: + raise + time.sleep(2 ** attempt) # Exponential backoff + + return None + +# Example usage +try: + result = robust_agent_execution(agent, "Analyze market trends") + print(result) +except Exception as e: + print(f"Agent execution failed: {e}") +``` + +### Performance Optimization + +```python +from swarms import Agent +import time + +# Optimized agent configuration +optimized_agent = Agent( + agent_name="Optimized-Agent", + agent_description="Performance-optimized agent configuration", + system_prompt="You are an efficient AI assistant optimized for performance.", + model_name="gpt-4o-mini", # Faster model + max_loops=1, # Minimize loops + max_tokens=2048, # Reasonable token limit + temperature=0.5, # Balanced creativity + output_type="str" +) + +# Batch processing example +def process_tasks_batch(agent, tasks, batch_size=5): + """Process multiple tasks efficiently.""" + results = [] + for i in range(0, len(tasks), batch_size): + batch = tasks[i:i + batch_size] + batch_results = [] + + for task in batch: + start_time = time.time() + result = agent.run(task) + execution_time = time.time() - start_time + + batch_results.append({ + "task": task, + "result": result, + "execution_time": execution_time + }) + + results.extend(batch_results) + time.sleep(1) # Rate limiting + + return results +``` + +## Complete Examples + +### Multi-Modal Quality Control System + +```python +from swarms import Agent +from swarms.prompts.logistics import Quality_Control_Agent_Prompt + +def security_analysis(danger_level: str) -> str: + """Analyze security danger level and return appropriate response.""" + responses = { + "low": "āœ… No immediate danger detected - Safe to proceed", + "medium": "āš ļø Moderate security concern - Requires attention", + "high": "🚨 Critical security threat - Immediate action required", + None: "ā“ No danger level assessment available" + } + return responses.get(danger_level, "Unknown danger level") + +def quality_assessment(quality_score: int) -> str: + """Assess quality based on numerical score (1-10).""" + if quality_score >= 8: + return "āœ… Excellent quality - Meets all standards" + elif quality_score >= 6: + return "āš ļø Good quality - Minor improvements needed" + elif quality_score >= 4: + return "āŒ Poor quality - Significant issues identified" + else: + return "🚨 Critical quality failure - Immediate attention required" + +# Advanced quality control agent +quality_control_system = Agent( + agent_name="Advanced-Quality-Control-System", + agent_description="Comprehensive quality control and security analysis system", + system_prompt=f""" + {Quality_Control_Agent_Prompt} + + You are an advanced quality control system with the following capabilities: + + 1. Visual Inspection: Analyze images for defects, compliance, and safety + 2. Security Assessment: Identify potential security threats and hazards + 3. Quality Scoring: Provide numerical quality ratings (1-10 scale) + 4. Detailed Reporting: Generate comprehensive analysis reports + + When analyzing images: + - Identify specific defects or issues + - Assess compliance with safety standards + - Determine appropriate danger levels (low, medium, high) + - Provide quality scores and recommendations + - Use available tools for detailed analysis + + Always provide specific, actionable feedback. + """, + model_name="gpt-4o-mini", + multi_modal=True, + max_loops=1, + tools=[security_analysis, quality_assessment], + output_type="str" +) + +# Process factory images +factory_images = ["factory_floor.jpg", "assembly_line.jpg", "safety_equipment.jpg"] + +for image in factory_images: + print(f"\n--- Analyzing {image} ---") + response = quality_control_system.run( + task=f"Perform comprehensive quality control analysis of this image. Assess safety, quality, and provide specific recommendations.", + img=image + ) + print(response) +``` + +### Advanced Financial Analysis Agent + +```python +from swarms import Agent +import json +import requests + +def get_market_data(symbol: str, period: str = "1y") -> str: + """Get comprehensive market data for a symbol.""" + # Simulated market data (replace with real API) + market_data = { + "symbol": symbol, + "current_price": 150.25, + "change_percent": 2.5, + "volume": 1000000, + "market_cap": 2500000000, + "pe_ratio": 25.5, + "dividend_yield": 1.8, + "52_week_high": 180.50, + "52_week_low": 120.30 + } + return json.dumps(market_data, indent=2) + +def calculate_risk_metrics(prices: list, benchmark_prices: list) -> str: + """Calculate risk metrics for a portfolio.""" + import numpy as np + + try: + returns = np.diff(prices) / prices[:-1] + benchmark_returns = np.diff(benchmark_prices) / benchmark_prices[:-1] + + volatility = np.std(returns) * np.sqrt(252) # Annualized + sharpe_ratio = (np.mean(returns) / np.std(returns)) * np.sqrt(252) + max_drawdown = np.max(np.maximum.accumulate(prices) - prices) / np.max(prices) + + beta = np.cov(returns, benchmark_returns)[0, 1] / np.var(benchmark_returns) + + risk_metrics = { + "volatility": float(volatility), + "sharpe_ratio": float(sharpe_ratio), + "max_drawdown": float(max_drawdown), + "beta": float(beta) + } + + return json.dumps(risk_metrics, indent=2) + + except Exception as e: + return json.dumps({"error": f"Risk calculation error: {str(e)}"}) + +# Financial analysis schemas +financial_analysis_schema = { + "type": "function", + "function": { + "name": "comprehensive_financial_analysis", + "description": "Perform comprehensive financial analysis with structured output", + "parameters": { + "type": "object", + "properties": { + "analysis_summary": { + "type": "object", + "properties": { + "overall_rating": {"type": "string", "enum": ["buy", "hold", "sell"]}, + "confidence_level": {"type": "number", "minimum": 0, "maximum": 100}, + "key_strengths": {"type": "array", "items": {"type": "string"}}, + "key_concerns": {"type": "array", "items": {"type": "string"}}, + "price_target": {"type": "number"}, + "risk_level": {"type": "string", "enum": ["low", "medium", "high"]} + } + }, + "technical_analysis": { + "type": "object", + "properties": { + "trend_direction": {"type": "string", "enum": ["bullish", "bearish", "neutral"]}, + "support_levels": {"type": "array", "items": {"type": "number"}}, + "resistance_levels": {"type": "array", "items": {"type": "number"}}, + "momentum_indicators": {"type": "array", "items": {"type": "string"}} + } + } + }, + "required": ["analysis_summary", "technical_analysis"] + } + } +} + +# Advanced financial agent +financial_analyst = Agent( + agent_name="Advanced-Financial-Analyst", + agent_description="Comprehensive financial analysis and investment advisory agent", + system_prompt="""You are an expert financial analyst with advanced capabilities in: + + - Fundamental analysis and valuation + - Technical analysis and chart patterns + - Risk assessment and portfolio optimization + - Market sentiment analysis + - Economic indicator interpretation + + Your analysis should be: + - Data-driven and objective + - Risk-aware and practical + - Clearly structured and actionable + - Compliant with financial regulations + + Use available tools to gather market data and calculate risk metrics. + Provide structured outputs using the defined schemas.""", + model_name="gpt-4o-mini", + max_loops=1, + tools=[get_market_data, calculate_risk_metrics], + tools_list_dictionary=[financial_analysis_schema], + output_type="json" +) + +# Comprehensive financial analysis +analysis_response = financial_analyst.run( + "Perform a comprehensive analysis of Apple Inc. (AAPL) including technical and fundamental analysis with structured recommendations" +) + +print(json.dumps(json.loads(analysis_response), indent=2)) +``` + +### Multi-Agent Collaboration System + +```python +from swarms import Agent +import json + +# Specialized agents for different tasks +research_agent = Agent( + agent_name="Research-Specialist", + agent_description="Market research and data analysis specialist", + system_prompt="You are a market research expert specializing in data collection and analysis.", + model_name="gpt-4o-mini", + max_loops=1, + temperature=0.3 +) + +strategy_agent = Agent( + agent_name="Strategy-Advisor", + agent_description="Strategic planning and recommendation specialist", + system_prompt="You are a strategic advisor providing high-level recommendations based on research.", + model_name="gpt-4o-mini", + max_loops=1, + temperature=0.5 +) + +execution_agent = Agent( + agent_name="Execution-Planner", + agent_description="Implementation and execution planning specialist", + system_prompt="You are an execution expert creating detailed implementation plans.", + model_name="gpt-4o-mini", + max_loops=1, + temperature=0.4 +) + +def collaborative_analysis(topic: str): + """Perform collaborative analysis using multiple specialized agents.""" + + # Step 1: Research Phase + research_task = f"Conduct comprehensive research on {topic}. Provide key findings, market data, and trends." + research_results = research_agent.run(research_task) + + # Step 2: Strategy Phase + strategy_task = f"Based on this research: {research_results}\n\nDevelop strategic recommendations for {topic}." + strategy_results = strategy_agent.run(strategy_task) + + # Step 3: Execution Phase + execution_task = f"Create a detailed implementation plan based on:\nResearch: {research_results}\nStrategy: {strategy_results}" + execution_results = execution_agent.run(execution_task) + + return { + "research": research_results, + "strategy": strategy_results, + "execution": execution_results + } + +# Example: Collaborative investment analysis +investment_analysis = collaborative_analysis("renewable energy sector investment opportunities") + +for phase, results in investment_analysis.items(): + print(f"\n=== {phase.upper()} PHASE ===") + print(results) +``` + +## Support and Resources + +Join our community of agent engineers and researchers for technical support, cutting-edge updates, and exclusive access to world-class agent engineering insights! + +| Platform | Description | Link | +|----------|-------------|------| +| šŸ“š Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) | +| šŸ“ Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) | +| šŸ’¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) | +| 🐦 Twitter | Latest news and announcements | [@kyegomez](https://twitter.com/kyegomez) | +| šŸ‘„ LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | +| šŸ“ŗ YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | +| šŸŽ« Events | Join our community events | [Sign up here](https://lu.ma/5p2jnc2v) | +| šŸš€ Onboarding Session | Get onboarded with Kye Gomez, creator and lead maintainer of Swarms | [Book Session](https://cal.com/swarms/swarms-onboarding-session) | + +### Getting Help + +If you encounter issues or need assistance: + +1. **Check the Documentation**: Start with the official docs for comprehensive guides +2. **Search Issues**: Look through existing GitHub issues for similar problems +3. **Join Discord**: Get real-time help from the community +4. **Create an Issue**: Report bugs or request features on GitHub +5. **Follow Updates**: Stay informed about new releases and improvements + +### Contributing + +We welcome contributions! Here's how to get involved: + +- **Report Bugs**: Help us improve by reporting issues + +- **Suggest Features**: Share your ideas for new capabilities + +- **Submit Code**: Contribute improvements and new features + +- **Improve Documentation**: Help make our docs better + +- **Share Examples**: Show how you're using Swarms in your projects + +--- + +*This guide covers the essential aspects of the Swarms Agent class. For the most up-to-date information and advanced features, please refer to the official documentation and community resources.* \ No newline at end of file From 455306f034b884fbab693d30492e0c883c2a4ef0 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Wed, 2 Jul 2025 20:27:58 -0700 Subject: [PATCH 74/86] swarms api rate limits --- docs/mkdocs.yml | 7 ++-- docs/swarms_cloud/rate_limits.md | 59 ++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 3 deletions(-) create mode 100644 docs/swarms_cloud/rate_limits.md diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 5cd7ef38..9bae23a1 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -368,9 +368,10 @@ nav: - Swarms API Rust Client: "swarms_cloud/rust_client.md" - Pricing: - - Swarms API Pricing: "swarms_cloud/api_pricing.md" - - Swarms API Pricing in Chinese: "swarms_cloud/chinese_api_pricing.md" - - Swarms Cloud Subscription Tiers: "swarms_cloud/subscription_tiers.md" + - Pricing: "swarms_cloud/api_pricing.md" + - Pricing in Chinese: "swarms_cloud/chinese_api_pricing.md" + - Subscription Tiers: "swarms_cloud/subscription_tiers.md" + - Rate Limits: "swarms_cloud/rate_limits.md" - Guides: - Swarms API Best Practices: "swarms_cloud/best_practices.md" diff --git a/docs/swarms_cloud/rate_limits.md b/docs/swarms_cloud/rate_limits.md new file mode 100644 index 00000000..d0fb9759 --- /dev/null +++ b/docs/swarms_cloud/rate_limits.md @@ -0,0 +1,59 @@ +# Swarms API Rate Limits + +The Swarms API implements rate limiting to ensure fair usage and system stability. Here are the current limits: + +## Standard Rate Limits + +- **General API Requests**: 100 requests per minute +- **Batch Operations**: Maximum 10 requests per batch for agent/swarm batch operations + +## Rate Limit Response + +When you exceed the rate limit, the API will return a 429 (Too Many Requests) status code with the following message: +```json +{ + "detail": "Rate limit exceeded. Please try again later." +} +``` + +## Batch Operation Limits + +For batch operations (`/v1/agent/batch/completions` and `/v1/swarm/batch/completions`): + +- Maximum 10 concurrent requests per batch + +- Exceeding this limit will result in a 400 (Bad Request) error + +## Increasing Your Rate Limits + +Need higher rate limits for your application? You can increase your limits by subscribing to a higher tier plan at [swarms.world/pricing](https://swarms.world/pricing). + +Higher tier plans offer: + +- Increased rate limits + +- Higher batch operation limits + +- Priority processing + +- Dedicated support + +## Best Practices + +To make the most of your rate limits: + +1. Implement proper error handling for rate limit responses + +2. Use batch operations when processing multiple requests + +3. Add appropriate retry logic with exponential backoff + +4. Monitor your API usage to stay within limits + +## Rate Limit Headers + +The API does not currently expose rate limit headers. We recommend implementing your own request tracking to stay within the limits. + +--- + +For questions about rate limits or to request a custom plan for higher limits, please contact our support team or visit [swarms.world/pricing](https://swarms.world/pricing). \ No newline at end of file From 98e24d2b0cc5c9fd777f8930cefc4ab13f27d037 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Wed, 2 Jul 2025 20:39:31 -0700 Subject: [PATCH 75/86] docs agent fix --- docs/learn_more.md | 0 docs/mkdocs.yml | 2 +- {swarms => docs/swarms}/agents/index.md | 0 3 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 docs/learn_more.md rename {swarms => docs/swarms}/agents/index.md (100%) diff --git a/docs/learn_more.md b/docs/learn_more.md deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 9bae23a1..ae6dc3ab 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -355,6 +355,7 @@ nav: - Overview: "swarms_cloud/swarms_api.md" - Quickstart: "swarms_cloud/quickstart.md" - MCP Server: "swarms_cloud/mcp.md" + - Rate Limits: "swarms_cloud/rate_limits.md" - Capabilities: - Agents: @@ -371,7 +372,6 @@ nav: - Pricing: "swarms_cloud/api_pricing.md" - Pricing in Chinese: "swarms_cloud/chinese_api_pricing.md" - Subscription Tiers: "swarms_cloud/subscription_tiers.md" - - Rate Limits: "swarms_cloud/rate_limits.md" - Guides: - Swarms API Best Practices: "swarms_cloud/best_practices.md" diff --git a/swarms/agents/index.md b/docs/swarms/agents/index.md similarity index 100% rename from swarms/agents/index.md rename to docs/swarms/agents/index.md From 55a31c38f098e795338277b9b3626bdef9734a80 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Wed, 2 Jul 2025 20:41:13 -0700 Subject: [PATCH 76/86] swarms api docs simple --- docs/mkdocs.yml | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index ae6dc3ab..e8e4be9f 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -326,11 +326,6 @@ nav: - Swarms of Browser Agents: "swarms/examples/swarms_of_browser_agents.md" - ConcurrentWorkflow with VLLM Agents: "swarms/examples/vllm.md" - - - Swarms API Examples: - - Medical Swarm: "swarms/examples/swarms_api_medical.md" - - Finance Swarm: "swarms/examples/swarms_api_finance.md" - # - ML Model Code Generation Swarm: "swarms/examples/swarms_api_ml_model.md" # - Swarm Models: # - Overview: "swarms/models/index.md" @@ -363,10 +358,13 @@ nav: - Tools: "swarms_cloud/swarms_api_tools.md" - Multi-Agent: - Multi Agent Architectures Available: "swarms_cloud/swarm_types.md" + - Examples: + - Medical Swarm: "swarms/examples/swarms_api_medical.md" + - Finance Swarm: "swarms/examples/swarms_api_finance.md" - Clients: - - Swarms API Python Client: "swarms_cloud/python_client.md" - - Swarms API Rust Client: "swarms_cloud/rust_client.md" + - Python Client: "swarms_cloud/python_client.md" + - Rust Client: "swarms_cloud/rust_client.md" - Pricing: - Pricing: "swarms_cloud/api_pricing.md" @@ -374,7 +372,7 @@ nav: - Subscription Tiers: "swarms_cloud/subscription_tiers.md" - Guides: - - Swarms API Best Practices: "swarms_cloud/best_practices.md" + - Best Practices: "swarms_cloud/best_practices.md" - Swarms Marketplace: - Overview: "swarms_platform/index.md" From 29db87030d3ba3c6040ca750f053a9e639b0cb48 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Wed, 2 Jul 2025 20:51:37 -0700 Subject: [PATCH 77/86] multi agent architectures list --- docs/swarms_cloud/swarm_types.md | 68 +++++++++++--------------------- 1 file changed, 22 insertions(+), 46 deletions(-) diff --git a/docs/swarms_cloud/swarm_types.md b/docs/swarms_cloud/swarm_types.md index 795f9ad5..f6091501 100644 --- a/docs/swarms_cloud/swarm_types.md +++ b/docs/swarms_cloud/swarm_types.md @@ -1,54 +1,30 @@ -### Available Swarms in The Swarms API +# Multi-Agent Architectures -| Swarm Type | Description (English) | Description (Chinese) | -|----------------------|-----------------------------------------------------------------------------|----------------------------------------------------------------------------| -| AgentRearrange | A swarm type focused on rearranging agents for optimal performance. | äø€ē§äø“ę³ØäŗŽé‡ę–°ęŽ’åˆ—ä»£ē†ä»„å®žēŽ°ęœ€ä½³ę€§čƒ½ēš„ē¾¤ē±»åž‹ć€‚ | -| MixtureOfAgents | Combines different types of agents to achieve a specific goal. | ē»“åˆäøåŒē±»åž‹ēš„ä»£ē†ä»„å®žēŽ°ē‰¹å®šē›®ę ‡ć€‚ | -| SpreadSheetSwarm | Utilizes spreadsheet-like structures for data management and operations. | åˆ©ē”Øē±»ä¼¼ē”µå­č”Øę ¼ēš„ē»“ęž„čæ›č”Œę•°ę®ē®”ē†å’Œę“ä½œć€‚ | -| SequentialWorkflow | Executes tasks in a sequential manner. | ä»„é”ŗåŗę–¹å¼ę‰§č”Œä»»åŠ”ć€‚ | -| ConcurrentWorkflow | Allows tasks to be executed concurrently for efficiency. | å…č®øä»»åŠ”å¹¶å‘ę‰§č”Œä»„ęé«˜ę•ˆēŽ‡ć€‚ | -| GroupChat | Facilitates communication among agents in a group chat format. | ä»„ē¾¤čŠę ¼å¼äæƒčæ›ä»£ē†ä¹‹é—“ēš„ę²Ÿé€šć€‚ | -| MultiAgentRouter | Routes tasks and information among multiple agents. | åœØå¤šäøŖä»£ē†ä¹‹é—“č·Æē”±ä»»åŠ”å’Œäæ”ęÆć€‚ | -| AutoSwarmBuilder | Automatically builds and configures swarms based on predefined criteria. | ę ¹ę®é¢„å®šä¹‰ę ‡å‡†č‡ŖåŠØęž„å»ŗå’Œé…ē½®ē¾¤ć€‚ | -| HiearchicalSwarm | Organizes agents in a hierarchical structure for task delegation. | ä»„å±‚ę¬”ē»“ęž„ē»„ē»‡ä»£ē†ä»„čæ›č”Œä»»åŠ”å§”ę“¾ć€‚ | -| auto | Automatically selects the best swarm type based on the context. | ę ¹ę®äøŠäø‹ę–‡č‡ŖåŠØé€‰ę‹©ęœ€ä½³ē¾¤ē±»åž‹ć€‚ | -| MajorityVoting | Uses majority voting among agents to make decisions. | ä½æē”Øä»£ē†ä¹‹é—“ēš„å¤šę•°ęŠ•ē„Øę„åšå‡ŗå†³ē­–ć€‚ | -| MALT | A specialized swarm type for specific tasks (details needed). | äø€ē§äø“é—Øäøŗē‰¹å®šä»»åŠ”č®¾č®”ēš„ē¾¤ē±»åž‹ļ¼ˆéœ€č¦čÆ¦ē»†äæ”ęÆļ¼‰ć€‚ | +Each multi-agent architecture type is designed for specific use cases and can be combined to create powerful multi-agent systems. Here's a comprehensive overview of each available swarm: -### Documentation for Swarms +| Swarm Type | Description | Learn More | +|---------------------|------------------------------------------------------------------------------|------------| +| AgentRearrange | Dynamically reorganizes agents to optimize task performance and efficiency. Optimizes agent performance by dynamically adjusting their roles and positions within the workflow. This architecture is particularly useful when the effectiveness of agents depends on their sequence or arrangement. | [Learn More](/swarms/structs/agent_rearrange) | +| MixtureOfAgents | Creates diverse teams of specialized agents, each bringing unique capabilities to solve complex problems. Each agent contributes unique skills to achieve the overall goal, making it excel at tasks requiring multiple types of expertise or processing. | [Learn More](/swarms/structs/moa) | +| SpreadSheetSwarm | Provides a structured approach to data management and operations, making it ideal for tasks involving data analysis, transformation, and systematic processing in a spreadsheet-like structure. | [Learn More](/swarms/structs/spreadsheet_swarm) | +| SequentialWorkflow | Ensures strict process control by executing tasks in a predefined order. Perfect for workflows where each step depends on the completion of previous steps. | [Learn More](/swarms/structs/sequential_workflow) | +| ConcurrentWorkflow | Maximizes efficiency by running independent tasks in parallel, significantly reducing overall processing time for complex operations. Ideal for independent tasks that can be processed simultaneously. | [Learn More](/swarms/structs/concurrentworkflow) | +| GroupChat | Enables dynamic collaboration between agents through a chat-based interface, facilitating real-time information sharing and decision-making. | [Learn More](/swarms/structs/group_chat) | +| MultiAgentRouter | Acts as an intelligent task dispatcher, ensuring optimal distribution of work across available agents based on their capabilities and current workload. | [Learn More](/swarms/structs/multi_agent_router) | +| AutoSwarmBuilder | Simplifies swarm creation by automatically configuring agent architectures based on task requirements and performance metrics. | [Learn More](/swarms/structs/auto_swarm_builder) | +| HiearchicalSwarm | Implements a structured approach to task management, with clear lines of authority and delegation across multiple agent levels. | [Learn More](/swarms/structs/multi_swarm_orchestration) | +| auto | Provides intelligent swarm selection based on context, automatically choosing the most effective architecture for given tasks. | [Learn More](/swarms/concept/how_to_choose_swarms) | +| MajorityVoting | Implements robust decision-making through consensus, particularly useful for tasks requiring collective intelligence or verification. | [Learn More](/swarms/structs/majorityvoting) | +| MALT | Specialized framework for language-based tasks, optimizing agent collaboration for complex language processing operations. | [Learn More](/swarms/structs/malt) | -1. **AgentRearrange**: This swarm type is designed to rearrange agents to optimize their performance in a given task. It is useful in scenarios where agent positioning or order affects the outcome. - - čæ™ē§ē¾¤ē±»åž‹ę—ØåœØé‡ę–°ęŽ’åˆ—ä»£ē†ä»„ä¼˜åŒ–å…¶åœØē»™å®šä»»åŠ”äø­ēš„ę€§čƒ½ć€‚å®ƒåœØä»£ē†ä½ē½®ęˆ–é”ŗåŗå½±å“ē»“ęžœēš„ęƒ…å†µäø‹éžåøøęœ‰ē”Øć€‚ +# Learn More -2. **MixtureOfAgents**: This type combines various agents, each with unique capabilities, to work together towards a common goal. It leverages the strengths of different agents to enhance overall performance. - - čæ™ē§ē±»åž‹ē»“åˆäŗ†å„ē§ä»£ē†ļ¼ŒęÆäøŖä»£ē†éƒ½ęœ‰ē‹¬ē‰¹ēš„čƒ½åŠ›ļ¼Œå…±åŒåŠŖåŠ›å®žēŽ°å…±åŒē›®ę ‡ć€‚å®ƒåˆ©ē”ØäøåŒä»£ē†ēš„ä¼˜åŠæę„ęé«˜ę•“ä½“ę€§čƒ½ć€‚ +To learn more about Swarms architecture and how different swarm types work together, visit our comprehensive guides: -3. **SpreadSheetSwarm**: This swarm type uses spreadsheet-like structures to manage and operate on data. It is ideal for tasks that require organized data manipulation and analysis. - - čæ™ē§ē¾¤ē±»åž‹ä½æē”Øē±»ä¼¼ē”µå­č”Øę ¼ēš„ē»“ęž„ę„ē®”ē†å’Œę“ä½œę•°ę®ć€‚å®ƒéžåøøé€‚åˆéœ€č¦ęœ‰ē»„ē»‡ēš„ę•°ę®ę“ä½œå’Œåˆ†ęžēš„ä»»åŠ”ć€‚ +- [Introduction to Multi-Agent Architectures](/swarms/concept/swarm_architectures) -4. **SequentialWorkflow**: Tasks are executed one after another in this swarm type, ensuring that each step is completed before the next begins. It is suitable for processes that require strict order. - - åœØčæ™ē§ē¾¤ē±»åž‹äø­ļ¼Œä»»åŠ”äø€äøŖęŽ„äø€äøŖåœ°ę‰§č”Œļ¼Œē”®äæęÆäøŖę­„éŖ¤åœØäø‹äø€äøŖę­„éŖ¤å¼€å§‹ä¹‹å‰å®Œęˆć€‚å®ƒé€‚ē”ØäŗŽéœ€č¦äø„ę ¼é”ŗåŗēš„ęµēØ‹ć€‚ +- [How to Choose the Right Multi-Agent Architecture](/swarms/concept/how_to_choose_swarms) -5. **ConcurrentWorkflow**: This type allows multiple tasks to be executed simultaneously, improving efficiency and reducing time for completion. It is best for independent tasks that do not rely on each other. - - čæ™ē§ē±»åž‹å…č®øå¤šäøŖä»»åŠ”åŒę—¶ę‰§č”Œļ¼Œęé«˜ę•ˆēŽ‡å¹¶å‡å°‘å®Œęˆę—¶é—“ć€‚å®ƒęœ€é€‚åˆäøē›øäŗ’ä¾čµ–ēš„ē‹¬ē«‹ä»»åŠ”ć€‚ +- [Framework Architecture Overview](/swarms/concept/framework_architecture) -6. **GroupChat**: Facilitates communication among agents in a group chat format, enabling real-time collaboration and decision-making. - - ä»„ē¾¤čŠę ¼å¼äæƒčæ›ä»£ē†ä¹‹é—“ēš„ę²Ÿé€šļ¼Œå®žēŽ°å®žę—¶åä½œå’Œå†³ē­–ć€‚ - -7. **MultiAgentRouter**: This swarm type routes tasks and information among multiple agents, ensuring that each agent receives the necessary data to perform its function. - - čæ™ē§ē¾¤ē±»åž‹åœØå¤šäøŖä»£ē†ä¹‹é—“č·Æē”±ä»»åŠ”å’Œäæ”ęÆļ¼Œē”®äæęÆäøŖä»£ē†ęŽ„ę”¶åˆ°ę‰§č”Œå…¶åŠŸčƒ½ę‰€éœ€ēš„ę•°ę®ć€‚ - -8. **AutoSwarmBuilder**: Automatically builds and configures swarms based on predefined criteria, reducing the need for manual setup and configuration. - - ę ¹ę®é¢„å®šä¹‰ę ‡å‡†č‡ŖåŠØęž„å»ŗå’Œé…ē½®ē¾¤ļ¼Œå‡å°‘ę‰‹åŠØč®¾ē½®å’Œé…ē½®ēš„éœ€č¦ć€‚ - -9. **HiearchicalSwarm**: Organizes agents in a hierarchical structure, allowing for efficient task delegation and management. - - ä»„å±‚ę¬”ē»“ęž„ē»„ē»‡ä»£ē†ļ¼Œå…č®øé«˜ę•ˆēš„ä»»åŠ”å§”ę“¾å’Œē®”ē†ć€‚ - -10. **auto**: Automatically selects the most appropriate swarm type based on the context and requirements of the task. - - ę ¹ę®ä»»åŠ”ēš„äøŠäø‹ę–‡å’Œč¦ę±‚č‡ŖåŠØé€‰ę‹©ęœ€åˆé€‚ēš„ē¾¤ē±»åž‹ć€‚ - -11. **MajorityVoting**: Uses a majority voting mechanism among agents to make decisions, ensuring that the most popular choice is selected. - - ä½æē”Øä»£ē†ä¹‹é—“ēš„å¤šę•°ęŠ•ē„Øęœŗåˆ¶ę„åšå‡ŗå†³ē­–ļ¼Œē”®äæé€‰ę‹©ęœ€å—ę¬¢čæŽēš„é€‰é”¹ć€‚ - -12. **MALT**: A specialized swarm type designed for specific tasks. Further details are needed to fully document this type. - - äø€ē§äø“é—Øäøŗē‰¹å®šä»»åŠ”č®¾č®”ēš„ē¾¤ē±»åž‹ć€‚éœ€č¦čæ›äø€ę­„ēš„čÆ¦ē»†äæ”ęÆę„å®Œę•“č®°å½•čæ™ē§ē±»åž‹ć€‚ +- [Building Custom Swarms](/swarms/structs/custom_swarm) From 04db212bb4a5d508c6cee47cd3e0de61389f4910 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Wed, 2 Jul 2025 20:53:03 -0700 Subject: [PATCH 78/86] best practices for swarms api docs --- docs/mkdocs.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index e8e4be9f..ac78f6a7 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -351,6 +351,7 @@ nav: - Quickstart: "swarms_cloud/quickstart.md" - MCP Server: "swarms_cloud/mcp.md" - Rate Limits: "swarms_cloud/rate_limits.md" + - Best Practices: "swarms_cloud/best_practices.md" - Capabilities: - Agents: @@ -371,9 +372,6 @@ nav: - Pricing in Chinese: "swarms_cloud/chinese_api_pricing.md" - Subscription Tiers: "swarms_cloud/subscription_tiers.md" - - Guides: - - Best Practices: "swarms_cloud/best_practices.md" - - Swarms Marketplace: - Overview: "swarms_platform/index.md" - Marketplace: From 327e0d7ac377da4365d38f86e6a38ad8571d9e27 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Wed, 2 Jul 2025 21:02:21 -0700 Subject: [PATCH 79/86] docs.agents index duplicate --- docs/mkdocs.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index ac78f6a7..b6c7f57b 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -155,7 +155,6 @@ nav: - Quickstart: "swarms/install/quickstart.md" - Feature Set: "swarms/features.md" - Agents: - - Overview: "swarms/agents/index.md" - Overview: "swarms/agents/index.md" - Concepts: # - Managing Prompts in Production: "swarms/prompts/main.md" From d00dfa44f111a494ffb31c21c6a19fda9872e52d Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Thu, 3 Jul 2025 10:51:58 -0700 Subject: [PATCH 80/86] fix -- speaker functions import issue and fix agent tool usage printing with retry function --- examples/README_realtor.md | 46 ++++++++++ mortgage_tax_panel_example.py | 154 ++++++++++++++++++++++++++++++++++ realtor_agent.py | 154 ++++++++++++++++++++++++++++++++++ swarms/structs/__init__.py | 2 - swarms/structs/agent.py | 148 +++++++++++++++++++++++--------- swarms/utils/retry_func.py | 66 +++++++++++++++ 6 files changed, 528 insertions(+), 42 deletions(-) create mode 100644 examples/README_realtor.md create mode 100644 mortgage_tax_panel_example.py create mode 100644 realtor_agent.py create mode 100644 swarms/utils/retry_func.py diff --git a/examples/README_realtor.md b/examples/README_realtor.md new file mode 100644 index 00000000..889b2ba6 --- /dev/null +++ b/examples/README_realtor.md @@ -0,0 +1,46 @@ +# Realtor Agent Example + +This example demonstrates how to create an AI-powered rental property specialist using the Swarms framework and the Realtor API. + +## Quick Start + +1. Install dependencies: +```bash +pip install swarms +``` + +2. Get your Realtor API key: +- Visit [Realtor Search API](https://rapidapi.com/ntd119/api/realtor-search/) +- Sign up for RapidAPI +- Subscribe to the API +- Copy your API key + +3. Update the API key in `realtor_agent.py`: +```python +headers = { + "x-rapidapi-key": "YOUR_API_KEY_HERE", + "x-rapidapi-host": "realtor-search.p.rapidapi.com", +} +``` + +4. Run the example: +```python +from realtor_agent import agent + +# Search single location +response = agent.run( + "What are the best properties in Menlo Park for rent under $3,000?" + f"Data: {get_realtor_data_from_one_source('Menlo Park, CA')}" +) +print(response) +``` + +## Features + +- Property search across multiple locations +- Detailed property analysis +- Location assessment +- Financial analysis +- Tenant matching recommendations + +For full documentation, see [docs/examples/realtor_agent.md](../docs/examples/realtor_agent.md). \ No newline at end of file diff --git a/mortgage_tax_panel_example.py b/mortgage_tax_panel_example.py new file mode 100644 index 00000000..45be97af --- /dev/null +++ b/mortgage_tax_panel_example.py @@ -0,0 +1,154 @@ +""" +Mortgage and Tax Panel Discussion Example + +This example demonstrates a panel of mortgage and tax specialists discussing complex +financial situations using InteractiveGroupChat with different speaker functions. +The panel includes specialists from different financial fields who can collaborate +on complex mortgage and tax planning cases. +""" + +from swarms import Agent +from swarms.structs.interactive_groupchat import ( + InteractiveGroupChat, +) + + +def create_mortgage_tax_panel(): + """Create a panel of mortgage and tax specialists for discussion.""" + + # Tax Attorney - Specializes in tax law and complex tax situations + tax_attorney = Agent( + agent_name="tax_attorney", + system_prompt="""You are Sarah Mitchell, J.D., a tax attorney with 15 years of experience. + You specialize in complex tax law, real estate taxation, and tax planning strategies. + You have expertise in: + - Federal and state tax regulations + - Real estate tax law and property taxation + - Tax implications of mortgage transactions + - Tax planning for real estate investments + - IRS dispute resolution and tax litigation + - Estate tax planning and trusts + + When discussing cases, provide legally sound tax advice, consider recent tax law changes, + and collaborate with other specialists to ensure comprehensive financial planning.""", + model_name="claude-3-5-sonnet-20240620", + streaming_on=True, + print_on=True, + ) + + # Mortgage Broker - Lending and mortgage specialist + mortgage_broker = Agent( + agent_name="mortgage_broker", + system_prompt="""You are Michael Chen, a senior mortgage broker with 12 years of experience. + You specialize in residential and commercial mortgage lending. + You have expertise in: + - Conventional, FHA, VA, and jumbo loans + - Commercial mortgage financing + - Mortgage refinancing strategies + - Interest rate analysis and trends + - Loan qualification requirements + - Mortgage insurance considerations + + When discussing cases, analyze lending options, consider credit profiles, + and evaluate debt-to-income ratios for optimal mortgage solutions.""", + model_name="claude-3-5-sonnet-20240620", + streaming_on=True, + print_on=True, + ) + + # Real Estate CPA - Accounting specialist + real_estate_cpa = Agent( + agent_name="real_estate_cpa", + system_prompt="""You are Emily Rodriguez, CPA, a certified public accountant with 10 years of experience. + You specialize in real estate accounting and tax preparation. + You have expertise in: + - Real estate tax accounting + - Property depreciation strategies + - Mortgage interest deductions + - Real estate investment taxation + - Financial statement analysis + - Tax credit optimization + + When discussing cases, focus on accounting implications, tax efficiency, + and financial reporting requirements for real estate transactions.""", + model_name="claude-3-5-sonnet-20240620", + streaming_on=True, + print_on=True, + ) + + # Financial Advisor - Investment and planning specialist + financial_advisor = Agent( + agent_name="financial_advisor", + system_prompt="""You are James Thompson, CFPĀ®, a financial advisor with 8 years of experience. + You specialize in comprehensive financial planning and wealth management. + You have expertise in: + - Investment portfolio management + - Retirement planning + - Real estate investment strategy + - Cash flow analysis + - Risk management + - Estate planning coordination + + When discussing cases, consider overall financial goals, investment strategy, + and how mortgage decisions impact long-term financial planning.""", + model_name="claude-3-5-sonnet-20240620", + streaming_on=True, + print_on=True, + ) + + # Real Estate Attorney - Property law specialist + real_estate_attorney = Agent( + agent_name="real_estate_attorney", + system_prompt="""You are Lisa Park, J.D., a real estate attorney with 11 years of experience. + You specialize in real estate law and property transactions. + You have expertise in: + - Real estate contract law + - Property title analysis + - Mortgage document review + - Real estate closing procedures + - Property rights and zoning + - Real estate litigation + + When discussing cases, evaluate legal implications, ensure compliance, + and address potential legal issues in real estate transactions.""", + model_name="claude-3-5-sonnet-20240620", + streaming_on=True, + print_on=True, + ) + + return [ + tax_attorney, + mortgage_broker, + real_estate_cpa, + financial_advisor, + real_estate_attorney, + ] + + +def example_mortgage_tax_panel(): + """Example with random dynamic speaking order.""" + print("=== MORTGAGE AND TAX SPECIALIST PANEL ===\n") + + agents = create_mortgage_tax_panel() + + group_chat = InteractiveGroupChat( + name="Mortgage and Tax Panel Discussion", + description="A collaborative panel of mortgage and tax specialists discussing complex cases", + agents=agents, + interactive=False, + speaker_function="random-speaker", + ) + + # Case 1: Complex mortgage refinancing with tax implications + case1 = """CASE PRESENTATION: + @tax_attorney, @real_estate_cpa, and @real_estate_attorney, please discuss the possible legal and accounting strategies + for minimizing or potentially eliminating property taxes in Los Altos, California. Consider legal exemptions, + special assessments, and any relevant California property tax laws that could help achieve this goal. + """ + + group_chat.run(case1) + + +if __name__ == "__main__": + + example_mortgage_tax_panel() diff --git a/realtor_agent.py b/realtor_agent.py new file mode 100644 index 00000000..3ec208f7 --- /dev/null +++ b/realtor_agent.py @@ -0,0 +1,154 @@ +from typing import List +import http.client +import json +from swarms import Agent + +from dotenv import load_dotenv + +load_dotenv() +import os + + +def get_realtor_data_from_one_source(location: str): + """ + Fetch rental property data from the Realtor API for a specified location. + + Args: + location (str): The location to search for rental properties (e.g., "Menlo Park, CA") + + Returns: + str: JSON-formatted string containing rental property data + + Raises: + http.client.HTTPException: If the API request fails + json.JSONDecodeError: If the response cannot be parsed as JSON + """ + conn = http.client.HTTPSConnection( + "realtor-search.p.rapidapi.com" + ) + + headers = { + "x-rapidapi-key": os.getenv("RAPIDAPI_KEY"), + "x-rapidapi-host": "realtor-search.p.rapidapi.com", + } + + # URL encode the location parameter + encoded_location = location.replace(" ", "%20").replace( + ",", "%2C" + ) + endpoint = f"/properties/search-rent?location=city%3A{encoded_location}&sortBy=best_match" + + conn.request( + "GET", + endpoint, + headers=headers, + ) + + res = conn.getresponse() + data = res.read() + + return "chicken data" + + # # Parse and format the response + # try: + # json_data = json.loads(data.decode("utf-8")) + # # Return formatted string instead of raw JSON + # return json.dumps(json_data, indent=2) + # except json.JSONDecodeError: + # return "Error: Could not parse API response" + + +def get_realtor_data_from_multiple_sources( + locations: List[str], +) -> List[str]: + """ + Fetch rental property data from multiple sources for a specified location. + + Args: + location (List[str]): List of locations to search for rental properties (e.g., ["Menlo Park, CA", "Palo Alto, CA"]) + """ + output = [] + for location in locations: + data = get_realtor_data_from_one_source(location) + output.append(data) + return output + + +agent = Agent( + agent_name="Rental-Property-Specialist", + system_prompt=""" + You are an expert rental property specialist with deep expertise in real estate analysis and tenant matching. Your core responsibilities include: +1. Property Analysis & Evaluation + - Analyze rental property features and amenities + - Evaluate location benefits and drawbacks + - Assess property condition and maintenance needs + - Compare rental rates with market standards + - Review lease terms and conditions + - Identify potential red flags or issues + +2. Location Assessment + - Analyze neighborhood safety and demographics + - Evaluate proximity to amenities (schools, shopping, transit) + - Research local market trends and development plans + - Consider noise levels and traffic patterns + - Assess parking availability and restrictions + - Review zoning regulations and restrictions + +3. Financial Analysis + - Calculate price-to-rent ratios + - Analyze utility costs and included services + - Evaluate security deposit requirements + - Consider additional fees (pet rent, parking, etc.) + - Compare with similar properties in the area + - Assess potential for rent increases + +4. Tenant Matching + - Match properties to tenant requirements + - Consider commute distances + - Evaluate pet policies and restrictions + - Assess lease term flexibility + - Review application requirements + - Consider special accommodations needed + +5. Documentation & Compliance + - Review lease agreement terms + - Verify property certifications + - Check compliance with local regulations + - Assess insurance requirements + - Review maintenance responsibilities + - Document property condition + +When analyzing properties, always consider: +- Value for money +- Location quality +- Property condition +- Lease terms fairness +- Safety and security +- Maintenance and management quality +- Future market potential +- Tenant satisfaction factors + +When you receive property data: +1. Parse and analyze the JSON data +2. Format the output in a clear, readable way +3. Focus on properties under $3,000 +4. Include key details like: + - Property name/address + - Price + - Number of beds/baths + - Square footage + - Key amenities + - Links to listings +5. Sort properties by price (lowest to highest) + +Provide clear, objective analysis while maintaining professional standards and ethical considerations.""", + model_name="claude-3-sonnet-20240229", + max_loops=1, + tools=[get_realtor_data_from_one_source], + print_on=True, +) + + +agent.run( + "What are the best properties in Menlo Park, CA for rent under 3,000$?" +) diff --git a/swarms/structs/__init__.py b/swarms/structs/__init__.py index bb005cc0..e40d22ce 100644 --- a/swarms/structs/__init__.py +++ b/swarms/structs/__init__.py @@ -85,7 +85,6 @@ from swarms.structs.swarming_architectures import ( ) from swarms.structs.interactive_groupchat import ( InteractiveGroupChat, - speaker_function, round_robin_speaker, random_speaker, priority_speaker, @@ -163,7 +162,6 @@ __all__ = [ "find_agent_by_name", "run_agent", "InteractiveGroupChat", - "speaker_function", "round_robin_speaker", "random_speaker", "priority_speaker", diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index d1c30116..dab44638 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -5,6 +5,7 @@ import os import random import threading import time +import traceback import uuid from concurrent.futures import ThreadPoolExecutor from datetime import datetime @@ -85,6 +86,7 @@ from swarms.utils.index import ( ) from swarms.schemas.conversation_schema import ConversationSchema from swarms.utils.output_types import OutputType +from swarms.utils.retry_func import retry_function def stop_when_repeats(response: str) -> bool: @@ -153,6 +155,12 @@ class AgentLLMInitializationError(AgentError): pass +class AgentToolExecutionError(AgentError): + """Exception raised when the agent fails to execute a tool. Check the tool's configuration and availability.""" + + pass + + # [FEAT][AGENT] class Agent: """ @@ -425,6 +433,7 @@ class Agent: tool_call_summary: bool = True, output_raw_json_from_tool_call: bool = False, summarize_multiple_images: bool = False, + tool_retry_attempts: int = 3, *args, **kwargs, ): @@ -564,6 +573,7 @@ class Agent: output_raw_json_from_tool_call ) self.summarize_multiple_images = summarize_multiple_images + self.tool_retry_attempts = tool_retry_attempts # self.short_memory = self.short_memory_init() @@ -1015,8 +1025,8 @@ class Agent: # Print the request if print_task is True: formatter.print_panel( - f"\n User: {task}", - f"Task Request for {self.agent_name}", + content=f"\n User: {task}", + title=f"Task Request for {self.agent_name}", ) while ( @@ -1091,26 +1101,22 @@ class Agent: ) # Print - self.pretty_print(response, loop_count) + if self.print_on is True: + if isinstance(response, list): + self.pretty_print( + f"Structured Output - Attempting Function Call Execution [{time.strftime('%H:%M:%S')}] \n\n {format_data_structure(response)} ", + loop_count, + ) + else: + self.pretty_print( + response, loop_count + ) # Check and execute callable tools if exists(self.tools): - if ( - self.output_raw_json_from_tool_call - is True - ): - response = response - else: - # Only execute tools if response is not None - if response is not None: - self.execute_tools( - response=response, - loop_count=loop_count, - ) - else: - logger.warning( - f"LLM returned None response in loop {loop_count}, skipping tool execution" - ) + self.tool_execution_retry( + response, loop_count + ) # Handle MCP tools if ( @@ -2790,19 +2796,23 @@ class Agent: return self.role def pretty_print(self, response: str, loop_count: int): - if self.print_on is False: - if self.streaming_on is True: - # Skip printing here since real streaming is handled in call_llm - # This avoids double printing when streaming_on=True - pass - elif self.print_on is False: - pass - else: - # logger.info(f"Response: {response}") - formatter.print_panel( - f"{self.agent_name}: {response}", - f"Agent Name {self.agent_name} [Max Loops: {loop_count} ]", - ) + # if self.print_on is False: + # if self.streaming_on is True: + # # Skip printing here since real streaming is handled in call_llm + # # This avoids double printing when streaming_on=True + # pass + # elif self.print_on is False: + # pass + # else: + # # logger.info(f"Response: {response}") + # formatter.print_panel( + # response, + # f"Agent Name {self.agent_name} [Max Loops: {loop_count} ]", + # ) + formatter.print_panel( + response, + f"Agent Name {self.agent_name} [Max Loops: {loop_count} ]", + ) def parse_llm_output(self, response: Any): """Parse and standardize the output from the LLM. @@ -2915,10 +2925,10 @@ class Agent: # execute_tool_call_simple returns a string directly, not an object with content attribute text_content = f"MCP Tool Response: \n\n {json.dumps(tool_response, indent=2)}" - if self.print_on is False: + if self.print_on is True: formatter.print_panel( - text_content, - "MCP Tool Response: šŸ› ļø", + content=text_content, + title="MCP Tool Response: šŸ› ļø", style="green", ) @@ -2974,11 +2984,19 @@ class Agent: ) return - output = ( - self.tool_struct.execute_function_calls_from_api_response( + try: + output = self.tool_struct.execute_function_calls_from_api_response( + response + ) + except Exception as e: + # Retry the tool call + output = self.tool_struct.execute_function_calls_from_api_response( response ) - ) + + if output is None: + logger.error(f"Error executing tools: {e}") + raise e self.short_memory.add( role="Tool Executor", @@ -2986,7 +3004,7 @@ class Agent: ) self.pretty_print( - f"{format_data_structure(output)}", + "Tool Executed Successfully", loop_count, ) @@ -3013,7 +3031,7 @@ class Agent: ) self.pretty_print( - f"{tool_response}", + tool_response, loop_count, ) @@ -3150,3 +3168,53 @@ class Agent: raise Exception( f"Failed to find correct answer '{correct_answer}' after {max_attempts} attempts" ) + + def tool_execution_retry(self, response: any, loop_count: int): + """ + Execute tools with retry logic for handling failures. + + This method attempts to execute tools based on the LLM response. If the response + is None, it logs a warning and skips execution. If an exception occurs during + tool execution, it logs the error with full traceback and retries the operation + using the configured retry attempts. + + Args: + response (any): The response from the LLM that may contain tool calls to execute. + Can be None if the LLM failed to provide a valid response. + loop_count (int): The current iteration loop number for logging and debugging purposes. + + Returns: + None + + Raises: + Exception: Re-raises any exception that occurs during tool execution after + all retry attempts have been exhausted. + + Note: + - Uses self.tool_retry_attempts for the maximum number of retry attempts + - Logs detailed error information including agent name and loop count + - Skips execution gracefully if response is None + """ + try: + if response is not None: + self.execute_tools( + response=response, + loop_count=loop_count, + ) + else: + logger.warning( + f"Agent '{self.agent_name}' received None response from LLM in loop {loop_count}. " + f"This may indicate an issue with the model or prompt. Skipping tool execution." + ) + except Exception as e: + logger.error( + f"Agent '{self.agent_name}' encountered error during tool execution in loop {loop_count}: {str(e)}. " + f"Full traceback: {traceback.format_exc()}. " + f"Attempting to retry tool execution with 3 attempts" + ) + retry_function( + self.execute_tools, + response=response, + loop_count=loop_count, + max_retries=self.tool_retry_attempts, + ) diff --git a/swarms/utils/retry_func.py b/swarms/utils/retry_func.py new file mode 100644 index 00000000..2a32903d --- /dev/null +++ b/swarms/utils/retry_func.py @@ -0,0 +1,66 @@ +import time +from typing import Any, Callable, Type, Union, Tuple +from loguru import logger + + +def retry_function( + func: Callable, + *args: Any, + max_retries: int = 3, + delay: float = 1.0, + backoff_factor: float = 2.0, + exceptions: Union[ + Type[Exception], Tuple[Type[Exception], ...] + ] = Exception, + **kwargs: Any, +) -> Any: + """ + A function that retries another function if it raises specified exceptions. + + Args: + func (Callable): The function to retry + *args: Positional arguments to pass to the function + max_retries (int): Maximum number of retries before giving up. Defaults to 3. + delay (float): Initial delay between retries in seconds. Defaults to 1.0. + backoff_factor (float): Multiplier applied to delay between retries. Defaults to 2.0. + exceptions (Exception or tuple): Exception(s) that trigger a retry. Defaults to Exception. + **kwargs: Keyword arguments to pass to the function + + Returns: + Any: The return value of the function if successful + + Example: + def fetch_data(url: str) -> dict: + return requests.get(url).json() + + # Retry the fetch_data function + result = retry_function( + fetch_data, + "https://api.example.com", + max_retries=3, + exceptions=(ConnectionError, TimeoutError) + ) + """ + retries = 0 + current_delay = delay + + while True: + try: + return func(*args, **kwargs) + except exceptions as e: + retries += 1 + if retries > max_retries: + logger.error( + f"Function {func.__name__} failed after {max_retries} retries. " + f"Final error: {str(e)}" + ) + raise + + logger.warning( + f"Retry {retries}/{max_retries} for function {func.__name__} " + f"after error: {str(e)}. " + f"Waiting {current_delay} seconds..." + ) + + time.sleep(current_delay) + current_delay *= backoff_factor From 79bf0ca313aca243750960bfdc3a174543092250 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Fri, 4 Jul 2025 00:47:13 -0700 Subject: [PATCH 81/86] fix -- swarm router prompt --- conversation_test.py | 22 ++++++ realtor_agent.py | 29 ++++---- swarms/structs/agent.py | 69 +++++++++++++----- swarms/structs/conversation.py | 128 ++++++++++++++++++++++++++------- swarms/structs/swarm_router.py | 12 ++-- 5 files changed, 194 insertions(+), 66 deletions(-) create mode 100644 conversation_test.py diff --git a/conversation_test.py b/conversation_test.py new file mode 100644 index 00000000..ec8a0534 --- /dev/null +++ b/conversation_test.py @@ -0,0 +1,22 @@ +from swarms.structs.conversation import Conversation + +# Create a conversation object +conversation = Conversation(backend="in-memory") + +# Add a message to the conversation +conversation.add( + role="user", content="Hello, how are you?", category="input" +) + +# Add a message to the conversation +conversation.add( + role="assistant", + content="I'm good, thank you!", + category="output", +) + +print( + conversation.export_and_count_categories( + tokenizer_model_name="claude-3-5-sonnet-20240620" + ) +) diff --git a/realtor_agent.py b/realtor_agent.py index 3ec208f7..a2c9700c 100644 --- a/realtor_agent.py +++ b/realtor_agent.py @@ -1,12 +1,13 @@ -from typing import List import http.client import json -from swarms import Agent +import os +from typing import List from dotenv import load_dotenv +from swarms import Agent + load_dotenv() -import os def get_realtor_data_from_one_source(location: str): @@ -28,7 +29,7 @@ def get_realtor_data_from_one_source(location: str): ) headers = { - "x-rapidapi-key": os.getenv("RAPIDAPI_KEY"), + "x-rapidapi-key": os.getenv("RAPID_API_KEY"), "x-rapidapi-host": "realtor-search.p.rapidapi.com", } @@ -47,15 +48,15 @@ def get_realtor_data_from_one_source(location: str): res = conn.getresponse() data = res.read() - return "chicken data" + # return "chicken data" - # # Parse and format the response - # try: - # json_data = json.loads(data.decode("utf-8")) - # # Return formatted string instead of raw JSON - # return json.dumps(json_data, indent=2) - # except json.JSONDecodeError: - # return "Error: Could not parse API response" + # Parse and format the response + try: + json_data = json.loads(data.decode("utf-8")) + # Return formatted string instead of raw JSON + return json.dumps(json_data, indent=2) + except json.JSONDecodeError: + return "Error: Could not parse API response" def get_realtor_data_from_multiple_sources( @@ -144,11 +145,11 @@ When you receive property data: Provide clear, objective analysis while maintaining professional standards and ethical considerations.""", model_name="claude-3-sonnet-20240229", max_loops=1, - tools=[get_realtor_data_from_one_source], print_on=True, + streaming_on=True, ) agent.run( - "What are the best properties in Menlo Park, CA for rent under 3,000$?" + f"Create a report on the best properties in Menlo Park, CA, showcase, the name, description, price, and link to the property: {get_realtor_data_from_one_source('Menlo Park, CA')}" ) diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index dab44638..33c3fd91 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -801,10 +801,11 @@ class Agent: or exists(self.mcp_urls) or exists(self.mcp_config) ): - self.pretty_print( - f"✨ [SYSTEM] Successfully integrated {len(tools)} MCP tools into agent: {self.agent_name} | Status: ONLINE | Time: {time.strftime('%H:%M:%S')} ✨", - loop_count=0, - ) + if self.print_on is True: + self.pretty_print( + f"✨ [SYSTEM] Successfully integrated {len(tools)} MCP tools into agent: {self.agent_name} | Status: ONLINE | Time: {time.strftime('%H:%M:%S')} ✨", + loop_count=0, + ) return tools except AgentMCPConnectionError as e: @@ -1087,6 +1088,8 @@ class Agent: **kwargs, ) + # If streaming is enabled, then don't print the response + # Parse the response from the agent with the output type if exists(self.tools_list_dictionary): if isinstance(response, BaseModel): @@ -1107,6 +1110,8 @@ class Agent: f"Structured Output - Attempting Function Call Execution [{time.strftime('%H:%M:%S')}] \n\n {format_data_structure(response)} ", loop_count, ) + elif self.streaming_on is True: + pass else: self.pretty_print( response, loop_count @@ -1147,8 +1152,12 @@ class Agent: self.save() logger.error( - f"Attempt {attempt+1}: Error generating" - f" response: {e}" + f"Attempt {attempt+1}/{self.max_retries}: Error generating response in loop {loop_count} for agent '{self.agent_name}': {str(e)} | " + f"Error type: {type(e).__name__}, Error details: {e.__dict__ if hasattr(e, '__dict__') else 'No additional details'} | " + f"Current task: '{task}', Agent state: max_loops={self.max_loops}, " + f"model={getattr(self.llm, 'model_name', 'unknown')}, " + f"temperature={getattr(self.llm, 'temperature', 'unknown')}" + f"{f' | Traceback: {e.__traceback__}' if hasattr(e, '__traceback__') else ''}" ) attempt += 1 @@ -1170,13 +1179,19 @@ class Agent: self.stopping_condition is not None and self._check_stopping_condition(response) ): - logger.info("Stopping condition met.") + logger.info( + f"Agent '{self.agent_name}' stopping condition met. " + f"Loop: {loop_count}, Response length: {len(str(response)) if response else 0}" + ) break elif ( self.stopping_func is not None and self.stopping_func(response) ): - logger.info("Stopping function met.") + logger.info( + f"Agent '{self.agent_name}' stopping function condition met. " + f"Loop: {loop_count}, Response length: {len(str(response)) if response else 0}" + ) break if self.interactive: @@ -1223,14 +1238,27 @@ class Agent: self._handle_run_error(error) def __handle_run_error(self, error: any): + import traceback + log_agent_data(self.to_dict()) if self.autosave is True: self.save() - logger.info( - f"Error detected running your agent {self.agent_name} \n Error {error} \n Optimize your input parameters and or add an issue on the swarms github and contact our team on discord for support ;) " + # Get detailed error information + error_type = type(error).__name__ + error_message = str(error) + traceback_info = traceback.format_exc() + + logger.error( + f"Error detected running your agent {self.agent_name}\n" + f"Error Type: {error_type}\n" + f"Error Message: {error_message}\n" + f"Traceback:\n{traceback_info}\n" + f"Agent State: {self.to_dict()}\n" + f"Optimize your input parameters and or add an issue on the swarms github and contact our team on discord for support ;)" ) + raise error def _handle_run_error(self, error: any): @@ -2952,7 +2980,8 @@ class Agent: # Fallback: provide a default summary summary = "I successfully executed the MCP tool and retrieved the information above." - self.pretty_print(summary, loop_count=current_loop) + if self.print_on is True: + self.pretty_print(summary, loop_count=current_loop) # Add to the memory self.short_memory.add( @@ -3003,10 +3032,11 @@ class Agent: content=format_data_structure(output), ) - self.pretty_print( - "Tool Executed Successfully", - loop_count, - ) + if self.print_on is True: + self.pretty_print( + f"Tool Executed Successfully [{time.strftime('%H:%M:%S')}]", + loop_count, + ) # Now run the LLM again without tools - create a temporary LLM instance # instead of modifying the cached one @@ -3030,10 +3060,11 @@ class Agent: content=tool_response, ) - self.pretty_print( - tool_response, - loop_count, - ) + if self.print_on is True: + self.pretty_print( + tool_response, + loop_count, + ) def list_output_types(self): return OutputType diff --git a/swarms/structs/conversation.py b/swarms/structs/conversation.py index 91d06154..82493f38 100644 --- a/swarms/structs/conversation.py +++ b/swarms/structs/conversation.py @@ -221,27 +221,6 @@ class Conversation(BaseStructure): ): super().__init__() - # Support both 'provider' and 'backend' parameters for backwards compatibility - # 'backend' takes precedence if both are provided - self.backend = backend or provider - self.backend_instance = None - - # Validate backend - valid_backends = [ - "in-memory", - "mem0", - "supabase", - "redis", - "sqlite", - "duckdb", - "pulsar", - ] - if self.backend not in valid_backends: - raise ValueError( - f"Invalid backend: '{self.backend}'. " - f"Valid backends are: {', '.join(valid_backends)}" - ) - # Initialize all attributes first self.id = id self.name = name or id @@ -275,6 +254,27 @@ class Conversation(BaseStructure): self.provider = provider # Keep for backwards compatibility self.conversations_dir = conversations_dir + # Support both 'provider' and 'backend' parameters for backwards compatibility + # 'backend' takes precedence if both are provided + self.backend = backend or provider + self.backend_instance = None + + # Validate backend + valid_backends = [ + "in-memory", + "mem0", + "supabase", + "redis", + "sqlite", + "duckdb", + "pulsar", + ] + if self.backend not in valid_backends: + raise ValueError( + f"Invalid backend: '{self.backend}'. " + f"Valid backends are: {', '.join(valid_backends)}" + ) + # Initialize backend if using persistent storage if self.backend in [ "supabase", @@ -484,8 +484,7 @@ class Conversation(BaseStructure): self, role: str, content: Union[str, dict, list, Any], - *args, - **kwargs, + category: Optional[str] = None, ): """Add a message to the conversation history. @@ -505,6 +504,9 @@ class Conversation(BaseStructure): if self.message_id_on: message["message_id"] = str(uuid.uuid4()) + if category: + message["category"] = category + # Add message to conversation history self.conversation_history.append(message) @@ -520,6 +522,79 @@ class Conversation(BaseStructure): f"Failed to autosave conversation: {str(e)}" ) + def export_and_count_categories( + self, tokenizer_model_name: Optional[str] = "gpt-4.1-mini" + ) -> Dict[str, int]: + """Export all messages with category 'input' and 'output' and count their tokens. + + This method searches through the conversation history and: + 1. Extracts all messages marked with category 'input' or 'output' + 2. Concatenates the content of each category + 3. Counts tokens for each category using the specified tokenizer model + + Args: + tokenizer_model_name (str): Name of the model to use for tokenization + + Returns: + Dict[str, int]: A dictionary containing: + - input_tokens: Number of tokens in input messages + - output_tokens: Number of tokens in output messages + - total_tokens: Total tokens across both categories + """ + try: + # Extract input and output messages + input_messages = [] + output_messages = [] + + for message in self.conversation_history: + # Get message content and ensure it's a string + content = message.get("content", "") + if not isinstance(content, str): + content = str(content) + + # Sort messages by category + category = message.get("category", "") + if category == "input": + input_messages.append(content) + elif category == "output": + output_messages.append(content) + + # Join messages with spaces + all_input_text = " ".join(input_messages) + all_output_text = " ".join(output_messages) + + print(all_input_text) + print(all_output_text) + + # Count tokens only if there is text + input_tokens = ( + count_tokens(all_input_text, tokenizer_model_name) + if all_input_text.strip() + else 0 + ) + output_tokens = ( + count_tokens(all_output_text, tokenizer_model_name) + if all_output_text.strip() + else 0 + ) + total_tokens = input_tokens + output_tokens + + return { + "input_tokens": input_tokens, + "output_tokens": output_tokens, + "total_tokens": total_tokens, + } + + except Exception as e: + logger.error( + f"Error in export_and_count_categories: {str(e)}" + ) + return { + "input_tokens": 0, + "output_tokens": 0, + "total_tokens": 0, + } + def add_mem0( self, role: str, @@ -546,8 +621,9 @@ class Conversation(BaseStructure): def add( self, role: str, - content: Union[str, dict, list], + content: Union[str, dict, list, Any], metadata: Optional[dict] = None, + category: Optional[str] = None, ): """Add a message to the conversation history.""" # If using a persistent backend, delegate to it @@ -562,7 +638,9 @@ class Conversation(BaseStructure): ) return self.add_in_memory(role, content) elif self.provider == "in-memory": - return self.add_in_memory(role, content) + return self.add_in_memory( + role=role, content=content, category=category + ) elif self.provider == "mem0": return self.add_mem0( role=role, content=content, metadata=metadata diff --git a/swarms/structs/swarm_router.py b/swarms/structs/swarm_router.py index 3452343b..77f3d189 100644 --- a/swarms/structs/swarm_router.py +++ b/swarms/structs/swarm_router.py @@ -466,14 +466,10 @@ class SwarmRouter: def update_system_prompt_for_agent_in_swarm(self): # Use list comprehension for faster iteration - [ - setattr( - agent, - "system_prompt", - agent.system_prompt + MULTI_AGENT_COLLAB_PROMPT_TWO, - ) - for agent in self.agents - ] + for agent in self.agents: + if agent.system_prompt is None: + agent.system_prompt = "" + agent.system_prompt += MULTI_AGENT_COLLAB_PROMPT_TWO def agent_config(self): agent_config = {} From bcd8a31417e474f441716f7b5c1165149ff34d72 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Fri, 4 Jul 2025 00:50:35 -0700 Subject: [PATCH 82/86] [FIX][ConcurrentWorkflow] --- pyproject.toml | 2 +- swarms/structs/concurrent_workflow.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 963c2c2c..12cdf745 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "7.9.0" +version = "7.9.2" description = "Swarms - TGSC" license = "MIT" authors = ["Kye Gomez "] diff --git a/swarms/structs/concurrent_workflow.py b/swarms/structs/concurrent_workflow.py index 86951e95..a3abe1eb 100644 --- a/swarms/structs/concurrent_workflow.py +++ b/swarms/structs/concurrent_workflow.py @@ -164,7 +164,7 @@ class ConcurrentWorkflow(BaseSwarm): return history_output_formatter( conversation=self.conversation, - output_type=self.output_type, + type=self.output_type, ) def batch_run( From 3c0298fe3eb4cb9e7eb74b544415a3ec848fa8f3 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Fri, 4 Jul 2025 01:35:24 -0700 Subject: [PATCH 83/86] fix swarm router and now sequential workflow --- example.py | 3 +- sequential_workflow_example.py | 74 +++++++++++++++++++++++++++ swarm_router.py | 68 ++++++++++++++++++++++++ swarms/structs/agent.py | 8 +-- swarms/structs/rearrange.py | 60 +--------------------- swarms/structs/sequential_workflow.py | 27 ++++------ swarms/structs/swarm_router.py | 7 ++- 7 files changed, 162 insertions(+), 85 deletions(-) create mode 100644 sequential_workflow_example.py create mode 100644 swarm_router.py diff --git a/example.py b/example.py index 34ed764e..cd0d78be 100644 --- a/example.py +++ b/example.py @@ -35,10 +35,9 @@ agent = Agent( You communicate in precise, technical terms while maintaining clarity for stakeholders.""", max_loops=1, - model_name="gpt-4o-mini", + model_name="claude-3-sonnet-20240229", dynamic_temperature_enabled=True, output_type="all", - max_tokens=16384, # dashboard=True ) diff --git a/sequential_workflow_example.py b/sequential_workflow_example.py new file mode 100644 index 00000000..a2242602 --- /dev/null +++ b/sequential_workflow_example.py @@ -0,0 +1,74 @@ +from swarms import Agent, SequentialWorkflow +import litellm + +litellm._turn_on_debug() + + +# Initialize market research agent +market_researcher = Agent( + agent_name="Market-Researcher", + system_prompt="""You are a market research specialist. Your tasks include: + 1. Analyzing market trends and patterns + 2. Identifying market opportunities and threats + 3. Evaluating competitor strategies + 4. Assessing customer needs and preferences + 5. Providing actionable market insights""", + model_name="claude-3-sonnet-20240229", + max_loops=1, + temperature=0.7, +) + +# Initialize financial analyst agent +financial_analyst = Agent( + agent_name="Financial-Analyst", + system_prompt="""You are a financial analysis expert. Your responsibilities include: + 1. Analyzing financial statements + 2. Evaluating investment opportunities + 3. Assessing risk factors + 4. Providing financial forecasts + 5. Recommending financial strategies""", + model_name="claude-3-sonnet-20240229", + max_loops=1, + temperature=0.7, +) + +# Initialize technical analyst agent +technical_analyst = Agent( + agent_name="Technical-Analyst", + system_prompt="""You are a technical analysis specialist. Your focus areas include: + 1. Analyzing price patterns and trends + 2. Evaluating technical indicators + 3. Identifying support and resistance levels + 4. Assessing market momentum + 5. Providing trading recommendations""", + model_name="claude-3-sonnet-20240229", + max_loops=1, + temperature=0.7, +) + +# Create list of agents +agents = [market_researcher, financial_analyst, technical_analyst] + +# # Initialize the concurrent workflow +# workflow = ConcurrentWorkflow( +# name="market-analysis-workflow", +# agents=agents, +# max_loops=1, +# ) + +# # Run the workflow +# result = workflow.run( +# "Analyze Tesla (TSLA) stock from market, financial, and technical perspectives" +# ) +router = SequentialWorkflow( + name="market-analysis-router", + agents=agents, + max_loops=1, + # output_type="all", +) + +result = router.run( + "Analyze Tesla (TSLA) stock from market, financial, and technical perspectives" +) + +print(result) diff --git a/swarm_router.py b/swarm_router.py new file mode 100644 index 00000000..efe3bcb5 --- /dev/null +++ b/swarm_router.py @@ -0,0 +1,68 @@ +from swarms import Agent, SwarmRouter + +# Initialize market research agent +market_researcher = Agent( + agent_name="Market-Researcher", + system_prompt="""You are a market research specialist. Your tasks include: + 1. Analyzing market trends and patterns + 2. Identifying market opportunities and threats + 3. Evaluating competitor strategies + 4. Assessing customer needs and preferences + 5. Providing actionable market insights""", + model_name="claude-sonnet-4-20250514", + max_loops=1, +) + +# Initialize financial analyst agent +financial_analyst = Agent( + agent_name="Financial-Analyst", + system_prompt="""You are a financial analysis expert. Your responsibilities include: + 1. Analyzing financial statements + 2. Evaluating investment opportunities + 3. Assessing risk factors + 4. Providing financial forecasts + 5. Recommending financial strategies""", + model_name="claude-sonnet-4-20250514", + max_loops=1, +) + +# Initialize technical analyst agent +technical_analyst = Agent( + agent_name="Technical-Analyst", + system_prompt="""You are a technical analysis specialist. Your focus areas include: + 1. Analyzing price patterns and trends + 2. Evaluating technical indicators + 3. Identifying support and resistance levels + 4. Assessing market momentum + 5. Providing trading recommendations""", + model_name="claude-sonnet-4-20250514", + max_loops=1, +) + +# Create list of agents +agents = [market_researcher, financial_analyst, technical_analyst] + +# # Initialize the concurrent workflow +# workflow = ConcurrentWorkflow( +# name="market-analysis-workflow", +# agents=agents, +# max_loops=1, +# ) + +# # Run the workflow +# result = workflow.run( +# "Analyze Tesla (TSLA) stock from market, financial, and technical perspectives" +# ) +router = SwarmRouter( + name="market-analysis-router", + swarm_type="ConcurrentWorkflow", + agents=agents, + max_loops=1, + # output_type="all", +) + +result = router.run( + "Analyze Tesla (TSLA) stock from market, financial, and technical perspectives" +) + +print(result) diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 33c3fd91..b4366258 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -161,7 +161,6 @@ class AgentToolExecutionError(AgentError): pass -# [FEAT][AGENT] class Agent: """ Agent is the backbone to connect LLMs with tools and long term memory. Agent also provides the ability to @@ -1152,12 +1151,7 @@ class Agent: self.save() logger.error( - f"Attempt {attempt+1}/{self.max_retries}: Error generating response in loop {loop_count} for agent '{self.agent_name}': {str(e)} | " - f"Error type: {type(e).__name__}, Error details: {e.__dict__ if hasattr(e, '__dict__') else 'No additional details'} | " - f"Current task: '{task}', Agent state: max_loops={self.max_loops}, " - f"model={getattr(self.llm, 'model_name', 'unknown')}, " - f"temperature={getattr(self.llm, 'temperature', 'unknown')}" - f"{f' | Traceback: {e.__traceback__}' if hasattr(e, '__traceback__') else ''}" + f"Attempt {attempt+1}/{self.retry_attempts}: Error generating response in loop {loop_count} for agent '{self.agent_name}': {str(e)} | " ) attempt += 1 diff --git a/swarms/structs/rearrange.py b/swarms/structs/rearrange.py index 7c59b864..c0e75c7d 100644 --- a/swarms/structs/rearrange.py +++ b/swarms/structs/rearrange.py @@ -1,4 +1,3 @@ -import asyncio import json import uuid from concurrent.futures import ThreadPoolExecutor @@ -344,7 +343,8 @@ class AgentRearrange(BaseSwarm): logger.info("Task execution completed") return history_output_formatter( - self.conversation, self.output_type + conversation=self.conversation, + type=self.output_type, ) except Exception as e: @@ -364,11 +364,6 @@ class AgentRearrange(BaseSwarm): self, task: str = None, img: str = None, - device: str = "cpu", - device_id: int = 2, - all_cores: bool = True, - all_gpus: bool = False, - no_use_clusterops: bool = True, *args, **kwargs, ): @@ -481,58 +476,11 @@ class AgentRearrange(BaseSwarm): except Exception as e: self._catch_error(e) - async def abatch_run( - self, - tasks: List[str], - img: Optional[List[str]] = None, - batch_size: int = 10, - *args, - **kwargs, - ) -> List[str]: - """ - Asynchronously process multiple tasks in batches. - - Args: - tasks: List of tasks to process - img: Optional list of images corresponding to tasks - batch_size: Number of tasks to process simultaneously - - Returns: - List of results corresponding to input tasks - """ - try: - results = [] - for i in range(0, len(tasks), batch_size): - batch_tasks = tasks[i : i + batch_size] - batch_imgs = ( - img[i : i + batch_size] - if img - else [None] * len(batch_tasks) - ) - - # Process batch using asyncio.gather - batch_coros = [ - self.astream( - task=task, img=img_path, *args, **kwargs - ) - for task, img_path in zip(batch_tasks, batch_imgs) - ] - batch_results = await asyncio.gather(*batch_coros) - results.extend(batch_results) - - return results - except Exception as e: - self._catch_error(e) - def concurrent_run( self, tasks: List[str], img: Optional[List[str]] = None, max_workers: Optional[int] = None, - device: str = "cpu", - device_id: int = None, - all_cores: bool = True, - all_gpus: bool = False, *args, **kwargs, ) -> List[str]: @@ -561,10 +509,6 @@ class AgentRearrange(BaseSwarm): self.run, task=task, img=img_path, - device=device, - device_id=device_id, - all_cores=all_cores, - all_gpus=all_gpus, *args, **kwargs, ) diff --git a/swarms/structs/sequential_workflow.py b/swarms/structs/sequential_workflow.py index 5c26df7f..da6c5e3d 100644 --- a/swarms/structs/sequential_workflow.py +++ b/swarms/structs/sequential_workflow.py @@ -49,15 +49,12 @@ class SequentialWorkflow: self.flow = self.sequential_flow() self.agent_rearrange = AgentRearrange( - name=name, - description=description, - agents=agents, + name=self.name, + description=self.description, + agents=self.agents, flow=self.flow, - max_loops=max_loops, - output_type=output_type, - shared_memory_system=shared_memory_system, - *args, - **kwargs, + max_loops=self.max_loops, + output_type=self.output_type, ) def sequential_flow(self): @@ -105,11 +102,7 @@ class SequentialWorkflow: self, task: str, img: Optional[str] = None, - device: str = "cpu", - all_cores: bool = False, - all_gpus: bool = False, - device_id: int = 0, - no_use_clusterops: bool = True, + imgs: Optional[List[str]] = None, *args, **kwargs, ): @@ -134,14 +127,14 @@ class SequentialWorkflow: """ try: - result = self.agent_rearrange.run( + return self.agent_rearrange.run( task=task, img=img, - *args, - **kwargs, + # imgs=imgs, + # *args, + # **kwargs, ) - return result except Exception as e: logger.error( f"An error occurred while executing the task: {e}" diff --git a/swarms/structs/swarm_router.py b/swarms/structs/swarm_router.py index 77f3d189..e79d2f82 100644 --- a/swarms/structs/swarm_router.py +++ b/swarms/structs/swarm_router.py @@ -503,7 +503,12 @@ class SwarmRouter: """ self.swarm = self._create_swarm(task, *args, **kwargs) - self.conversation = self.swarm.conversation + if self.swarm_type == "SequentialWorkflow": + self.conversation = ( + self.swarm.agent_rearrange.conversation + ) + else: + self.conversation = self.swarm.conversation if self.list_all_agents is True: list_all_agents( From c7c1ea16f5bb51f0318b7ddbf631ec24b2e783af Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Fri, 4 Jul 2025 01:38:00 -0700 Subject: [PATCH 84/86] removed is_last parameter which messed up sequential workflow --- sequential_workflow_example.py | 7 +++++-- swarms/structs/agent.py | 4 ++++ swarms/structs/rearrange.py | 3 --- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/sequential_workflow_example.py b/sequential_workflow_example.py index a2242602..bb96f1c4 100644 --- a/sequential_workflow_example.py +++ b/sequential_workflow_example.py @@ -1,7 +1,7 @@ from swarms import Agent, SequentialWorkflow -import litellm +# import litellm -litellm._turn_on_debug() +# litellm._turn_on_debug() # Initialize market research agent @@ -16,6 +16,7 @@ market_researcher = Agent( model_name="claude-3-sonnet-20240229", max_loops=1, temperature=0.7, + streaming_on=True, ) # Initialize financial analyst agent @@ -29,6 +30,7 @@ financial_analyst = Agent( 5. Recommending financial strategies""", model_name="claude-3-sonnet-20240229", max_loops=1, + streaming_on=True, temperature=0.7, ) @@ -44,6 +46,7 @@ technical_analyst = Agent( model_name="claude-3-sonnet-20240229", max_loops=1, temperature=0.7, + streaming_on=True, ) # Create list of agents diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index b4366258..f275f095 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -2528,6 +2528,10 @@ class Agent: ValueError: If task is empty. """ + # Filter out is_last from kwargs if present + if 'is_last' in kwargs: + del kwargs['is_last'] + try: # Set streaming parameter in LLM if streaming is enabled if self.streaming_on and hasattr(self.llm, "stream"): diff --git a/swarms/structs/rearrange.py b/swarms/structs/rearrange.py index c0e75c7d..dd9a65c7 100644 --- a/swarms/structs/rearrange.py +++ b/swarms/structs/rearrange.py @@ -281,7 +281,6 @@ class AgentRearrange(BaseSwarm): ) for task_idx, task in enumerate(tasks): - is_last = task == tasks[-1] agent_names = [ name.strip() for name in task.split(",") ] @@ -297,7 +296,6 @@ class AgentRearrange(BaseSwarm): result = agent.run( task=self.conversation.get_str(), img=img, - is_last=is_last, *args, **kwargs, ) @@ -326,7 +324,6 @@ class AgentRearrange(BaseSwarm): current_task = agent.run( task=self.conversation.get_str(), img=img, - is_last=is_last, *args, **kwargs, ) From ab7781b1003e5640d012fae3150af269df1ca2aa Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Fri, 4 Jul 2025 01:40:27 -0700 Subject: [PATCH 85/86] example cleanup --- .../misc/conversation_test.py | 0 .../groupchat_examples/crypto_tax.py | 0 .../groupchat_examples/crypto_tax_swarm 2.py | 0 .../groupchat_examples/crypto_tax_swarm.py | 0 .../groupchat_examples/group_chat_example.py | 0 .../groupchat_examples/groupchat_example.py | 0 .../mortgage_tax_panel_example.py | 0 .../groupchat/random_dynamic_speaker_example.py | 0 sequential_workflow_example.py | 14 -------------- swarms/structs/agent.py | 4 ++-- 10 files changed, 2 insertions(+), 16 deletions(-) rename conversation_test.py => examples/misc/conversation_test.py (100%) rename examples/multi_agent/{ => groupchat}/groupchat_examples/crypto_tax.py (100%) rename examples/multi_agent/{ => groupchat}/groupchat_examples/crypto_tax_swarm 2.py (100%) rename examples/multi_agent/{ => groupchat}/groupchat_examples/crypto_tax_swarm.py (100%) rename examples/multi_agent/{ => groupchat}/groupchat_examples/group_chat_example.py (100%) rename examples/multi_agent/{ => groupchat}/groupchat_examples/groupchat_example.py (100%) rename mortgage_tax_panel_example.py => examples/multi_agent/groupchat/groupchat_examples/mortgage_tax_panel_example.py (100%) rename random_dynamic_speaker_example.py => examples/multi_agent/groupchat/random_dynamic_speaker_example.py (100%) diff --git a/conversation_test.py b/examples/misc/conversation_test.py similarity index 100% rename from conversation_test.py rename to examples/misc/conversation_test.py diff --git a/examples/multi_agent/groupchat_examples/crypto_tax.py b/examples/multi_agent/groupchat/groupchat_examples/crypto_tax.py similarity index 100% rename from examples/multi_agent/groupchat_examples/crypto_tax.py rename to examples/multi_agent/groupchat/groupchat_examples/crypto_tax.py diff --git a/examples/multi_agent/groupchat_examples/crypto_tax_swarm 2.py b/examples/multi_agent/groupchat/groupchat_examples/crypto_tax_swarm 2.py similarity index 100% rename from examples/multi_agent/groupchat_examples/crypto_tax_swarm 2.py rename to examples/multi_agent/groupchat/groupchat_examples/crypto_tax_swarm 2.py diff --git a/examples/multi_agent/groupchat_examples/crypto_tax_swarm.py b/examples/multi_agent/groupchat/groupchat_examples/crypto_tax_swarm.py similarity index 100% rename from examples/multi_agent/groupchat_examples/crypto_tax_swarm.py rename to examples/multi_agent/groupchat/groupchat_examples/crypto_tax_swarm.py diff --git a/examples/multi_agent/groupchat_examples/group_chat_example.py b/examples/multi_agent/groupchat/groupchat_examples/group_chat_example.py similarity index 100% rename from examples/multi_agent/groupchat_examples/group_chat_example.py rename to examples/multi_agent/groupchat/groupchat_examples/group_chat_example.py diff --git a/examples/multi_agent/groupchat_examples/groupchat_example.py b/examples/multi_agent/groupchat/groupchat_examples/groupchat_example.py similarity index 100% rename from examples/multi_agent/groupchat_examples/groupchat_example.py rename to examples/multi_agent/groupchat/groupchat_examples/groupchat_example.py diff --git a/mortgage_tax_panel_example.py b/examples/multi_agent/groupchat/groupchat_examples/mortgage_tax_panel_example.py similarity index 100% rename from mortgage_tax_panel_example.py rename to examples/multi_agent/groupchat/groupchat_examples/mortgage_tax_panel_example.py diff --git a/random_dynamic_speaker_example.py b/examples/multi_agent/groupchat/random_dynamic_speaker_example.py similarity index 100% rename from random_dynamic_speaker_example.py rename to examples/multi_agent/groupchat/random_dynamic_speaker_example.py diff --git a/sequential_workflow_example.py b/sequential_workflow_example.py index bb96f1c4..84a3ad49 100644 --- a/sequential_workflow_example.py +++ b/sequential_workflow_example.py @@ -1,8 +1,4 @@ from swarms import Agent, SequentialWorkflow -# import litellm - -# litellm._turn_on_debug() - # Initialize market research agent market_researcher = Agent( @@ -52,17 +48,7 @@ technical_analyst = Agent( # Create list of agents agents = [market_researcher, financial_analyst, technical_analyst] -# # Initialize the concurrent workflow -# workflow = ConcurrentWorkflow( -# name="market-analysis-workflow", -# agents=agents, -# max_loops=1, -# ) -# # Run the workflow -# result = workflow.run( -# "Analyze Tesla (TSLA) stock from market, financial, and technical perspectives" -# ) router = SequentialWorkflow( name="market-analysis-router", agents=agents, diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index f275f095..1b30644c 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -2529,8 +2529,8 @@ class Agent: """ # Filter out is_last from kwargs if present - if 'is_last' in kwargs: - del kwargs['is_last'] + if "is_last" in kwargs: + del kwargs["is_last"] try: # Set streaming parameter in LLM if streaming is enabled From c96b9e1160a1ab241199966deee504ad64ace5f2 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Fri, 4 Jul 2025 14:26:31 -0700 Subject: [PATCH 86/86] github actions stuff --- .github/workflows/lint.yml | 52 +++++++++++++++++------------------ .github/workflows/semgrep.yml | 49 --------------------------------- .github/workflows/tests.yml | 31 +++++++++++++++++++++ pyproject.toml | 2 +- 4 files changed, 58 insertions(+), 76 deletions(-) delete mode 100644 .github/workflows/semgrep.yml create mode 100644 .github/workflows/tests.yml diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index f2295d07..2d09ad85 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,33 +1,33 @@ --- name: Lint on: [push, pull_request] # yamllint disable-line rule:truthy + jobs: - yaml-lint: + lint: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 - - run: pip install yamllint - - run: yamllint . - flake8-lint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 - - run: pip install flake8 - - run: flake8 . - ruff-lint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 - - run: pip install ruff - - run: ruff format . - - run: ruff check --fix . - pylint-lint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 - - run: pip install pylint - - run: pylint swarms --recursive=y + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.10' + + - name: Cache pip dependencies + uses: actions/cache@v3 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ hashFiles('**/pyproject.toml') }} + restore-keys: | + ${{ runner.os }}-pip- + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install black==24.2.0 ruff==0.2.1 + + - name: Check Black formatting + run: black . --check --diff + + - name: Run Ruff linting + run: ruff check . diff --git a/.github/workflows/semgrep.yml b/.github/workflows/semgrep.yml deleted file mode 100644 index 4a122c7b..00000000 --- a/.github/workflows/semgrep.yml +++ /dev/null @@ -1,49 +0,0 @@ -# This workflow uses actions that are not certified by GitHub. -# They are provided by a third-party and are governed by -# separate terms of service, privacy policy, and support -# documentation. - -# This workflow file requires a free account on Semgrep.dev to -# manage rules, file ignores, notifications, and more. -# -# See https://semgrep.dev/docs - -name: Semgrep - -on: - push: - branches: [ "master" ] - pull_request: - # The branches below must be a subset of the branches above - branches: [ "master" ] - schedule: - - cron: '19 7 * * 3' - -permissions: - contents: read - -jobs: - semgrep: - permissions: - contents: read # for actions/checkout to fetch code - security-events: write # for github/codeql-action/upload-sarif to upload SARIF results - actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status - name: Scan - runs-on: ubuntu-latest - steps: - # Checkout project source - - uses: actions/checkout@v4 - - # Scan code using project's configuration on https://semgrep.dev/manage - - uses: returntocorp/semgrep-action@713efdd345f3035192eaa63f56867b88e63e4e5d - with: - publishToken: ${{ secrets.SEMGREP_APP_TOKEN }} - publishDeployment: ${{ secrets.SEMGREP_DEPLOYMENT_ID }} - generateSarif: "1" - - # Upload SARIF file generated in previous step - - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@v3 - with: - sarif_file: semgrep.sarif - if: always() diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 00000000..6e16b0dc --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,31 @@ +name: Run Tests + +on: + push: + branches: [ "master" ] + pull_request: + branches: [ "master" ] + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install Poetry + run: | + curl -sSL https://install.python-poetry.org | python3 - + + - name: Install dependencies + run: | + poetry install --with test + + - name: Run tests + run: | + poetry run pytest tests/ -v \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 12cdf745..671cbb1e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "7.9.2" +version = "7.9.3" description = "Swarms - TGSC" license = "MIT" authors = ["Kye Gomez "]