diff --git a/swarms/structs/__init__.py b/swarms/structs/__init__.py index 7b99e637..e6383155 100644 --- a/swarms/structs/__init__.py +++ b/swarms/structs/__init__.py @@ -100,6 +100,7 @@ from swarms.structs.swarming_architectures import ( staircase_swarm, star_swarm, ) +from swarms.structs.aop import AOP __all__ = [ "Agent", @@ -184,4 +185,5 @@ __all__ = [ "check_end", "AgentLoader", "BatchedGridWorkflow", + "AOP", ] diff --git a/swarms/structs/aop.py b/swarms/structs/aop.py index fbb21f19..d04b956e 100644 --- a/swarms/structs/aop.py +++ b/swarms/structs/aop.py @@ -983,21 +983,23 @@ class AOP: port: Port to bind the server to """ logger.info( - f"Starting MCP server '{self.server_name}' on {self.host}:{self.port}" + f"Starting MCP server '{self.server_name}' on {self.host}:{self.port}\n" + f"Transport: {self.transport}\n" + f"Log level: {self.log_level}\n" + f"Verbose mode: {self.verbose}\n" + f"Traceback enabled: {self.traceback_enabled}\n" + f"Available tools: {self.list_agents()}" ) - logger.info(f"Transport: {self.transport}") - logger.info(f"Log level: {self.log_level}") - logger.info(f"Verbose mode: {self.verbose}") - logger.info(f"Traceback enabled: {self.traceback_enabled}") - logger.info(f"Available tools: {self.list_agents()}") if self.verbose: - logger.debug("Server configuration:") - logger.debug(f" - Server name: {self.server_name}") - logger.debug(f" - Host: {self.host}") - logger.debug(f" - Port: {self.port}") - logger.debug(f" - Transport: {self.transport}") - logger.debug(f" - Total agents: {len(self.agents)}") + logger.debug( + "Server configuration:\n" + f" - Server name: {self.server_name}\n" + f" - Host: {self.host}\n" + f" - Port: {self.port}\n" + f" - Transport: {self.transport}\n" + f" - Total agents: {len(self.agents)}" + ) for tool_name, config in self.tool_configs.items(): logger.debug( f" - Tool '{tool_name}': timeout={config.timeout}s, verbose={config.verbose}, traceback={config.traceback_enabled}" @@ -1005,12 +1007,12 @@ class AOP: self.mcp_server.run(transport=self.transport) - # Note: FastMCP doesn't have a direct start method in the current implementation - # This would need to be implemented based on the specific MCP server setup - print( + logger.info( f"MCP Server '{self.server_name}' is ready with {len(self.agents)} tools" ) - print(f"Tools available: {', '.join(self.list_agents())}") + logger.info( + f"Tools available: {', '.join(self.list_agents())}" + ) def run(self) -> None: """ diff --git a/swarms/tools/base_tool.py b/swarms/tools/base_tool.py index af08f11e..24d71b40 100644 --- a/swarms/tools/base_tool.py +++ b/swarms/tools/base_tool.py @@ -231,9 +231,10 @@ class BaseTool(BaseModel): def base_model_to_dict( self, pydantic_type: type[BaseModel], + output_str: bool = False, *args: Any, **kwargs: Any, - ) -> dict[str, Any]: + ) -> Union[dict[str, Any], str]: """ Convert a Pydantic BaseModel to OpenAI function calling schema dictionary. @@ -247,7 +248,7 @@ class BaseTool(BaseModel): **kwargs: Additional keyword arguments Returns: - dict[str, Any]: OpenAI function calling schema dictionary + Union[dict[str, Any], str]: OpenAI function calling schema dictionary or JSON string Raises: ToolValidationError: If pydantic_type validation fails @@ -278,9 +279,13 @@ class BaseTool(BaseModel): # Get the base function schema base_result = base_model_to_openai_function( - pydantic_type, *args, **kwargs + pydantic_type, output_str=output_str, *args, **kwargs ) + # If output_str is True, return the string directly + if output_str and isinstance(base_result, str): + return base_result + # Extract the function definition from the functions array if ( "functions" in base_result @@ -314,8 +319,8 @@ class BaseTool(BaseModel): ) from e def multi_base_models_to_dict( - self, base_models: List[BaseModel] - ) -> dict[str, Any]: + self, base_models: List[BaseModel], output_str: bool = False + ) -> Union[dict[str, Any], str]: """ Convert multiple Pydantic BaseModels to OpenAI function calling schema. @@ -323,12 +328,11 @@ class BaseTool(BaseModel): a unified OpenAI function calling schema format. Args: - return_str (bool): Whether to return string format - *args: Additional positional arguments - **kwargs: Additional keyword arguments + base_models (List[BaseModel]): List of Pydantic models to convert + output_str (bool): Whether to return string format. Defaults to False. Returns: - dict[str, Any]: Combined OpenAI function calling schema + dict[str, Any] or str: Combined OpenAI function calling schema or JSON string Raises: ToolValidationError: If base_models validation fails @@ -344,10 +348,18 @@ class BaseTool(BaseModel): ) try: - return [ - self.base_model_to_dict(model) + results = [ + self.base_model_to_dict(model, output_str=output_str) for model in base_models ] + + # If output_str is True, return the string directly + if output_str: + import json + + return json.dumps(results, indent=2) + + return results except Exception as e: self._log_if_verbose( "error", f"Failed to convert multiple models: {e}" diff --git a/swarms/tools/pydantic_to_json.py b/swarms/tools/pydantic_to_json.py index ae8b6a44..0efb060b 100644 --- a/swarms/tools/pydantic_to_json.py +++ b/swarms/tools/pydantic_to_json.py @@ -39,12 +39,14 @@ def check_pydantic_name(pydantic_type: type[BaseModel]) -> str: def base_model_to_openai_function( pydantic_type: type[BaseModel], + output_str: bool = False, ) -> dict[str, Any]: """ Convert a Pydantic model to a dictionary representation of functions. Args: pydantic_type (type[BaseModel]): The Pydantic model type to convert. + output_str (bool): Whether to return string output format. Defaults to False. Returns: dict[str, Any]: A dictionary representation of the functions. @@ -85,7 +87,7 @@ def base_model_to_openai_function( _remove_a_key(parameters, "title") _remove_a_key(parameters, "additionalProperties") - return { + result = { "function_call": { "name": name, }, @@ -98,6 +100,14 @@ def base_model_to_openai_function( ], } + # Handle output_str parameter + if output_str: + import json + + return json.dumps(result, indent=2) + + return result + def multi_base_model_to_openai_function( pydantic_types: List[BaseModel] = None, @@ -114,13 +124,21 @@ def multi_base_model_to_openai_function( """ functions: list[dict[str, Any]] = [ - base_model_to_openai_function(pydantic_type, output_str)[ - "functions" - ][0] + base_model_to_openai_function( + pydantic_type, output_str=False + )["functions"][0] for pydantic_type in pydantic_types ] - return { + result = { "function_call": "auto", "functions": functions, } + + # Handle output_str parameter + if output_str: + import json + + return json.dumps(result, indent=2) + + return result diff --git a/tests/utils/test_output_str_fix.py b/tests/utils/test_output_str_fix.py new file mode 100644 index 00000000..27882567 --- /dev/null +++ b/tests/utils/test_output_str_fix.py @@ -0,0 +1,150 @@ +from pydantic import BaseModel +from swarms.tools.pydantic_to_json import ( + base_model_to_openai_function, + multi_base_model_to_openai_function, +) +from swarms.tools.base_tool import BaseTool + + +# Test Pydantic model +class TestModel(BaseModel): + """A test model for validation.""" + + name: str + age: int + email: str = "test@example.com" + + +def test_base_model_to_openai_function(): + """Test that base_model_to_openai_function accepts output_str parameter.""" + print( + "Testing base_model_to_openai_function with output_str=False..." + ) + result_dict = base_model_to_openai_function( + TestModel, output_str=False + ) + print(f"✓ Dict result type: {type(result_dict)}") + print(f"✓ Dict result keys: {list(result_dict.keys())}") + + print( + "\nTesting base_model_to_openai_function with output_str=True..." + ) + result_str = base_model_to_openai_function( + TestModel, output_str=True + ) + print(f"✓ String result type: {type(result_str)}") + print(f"✓ String result preview: {result_str[:100]}...") + + +def test_multi_base_model_to_openai_function(): + """Test that multi_base_model_to_openai_function handles output_str correctly.""" + print( + "\nTesting multi_base_model_to_openai_function with output_str=False..." + ) + result_dict = multi_base_model_to_openai_function( + [TestModel], output_str=False + ) + print(f"✓ Dict result type: {type(result_dict)}") + print(f"✓ Dict result keys: {list(result_dict.keys())}") + + print( + "\nTesting multi_base_model_to_openai_function with output_str=True..." + ) + result_str = multi_base_model_to_openai_function( + [TestModel], output_str=True + ) + print(f"✓ String result type: {type(result_str)}") + print(f"✓ String result preview: {result_str[:100]}...") + + +def test_base_tool_methods(): + """Test that BaseTool methods handle output_str parameter correctly.""" + print( + "\nTesting BaseTool.base_model_to_dict with output_str=False..." + ) + tool = BaseTool() + result_dict = tool.base_model_to_dict(TestModel, output_str=False) + print(f"✓ Dict result type: {type(result_dict)}") + print(f"✓ Dict result keys: {list(result_dict.keys())}") + + print( + "\nTesting BaseTool.base_model_to_dict with output_str=True..." + ) + result_str = tool.base_model_to_dict(TestModel, output_str=True) + print(f"✓ String result type: {type(result_str)}") + print(f"✓ String result preview: {result_str[:100]}...") + + print( + "\nTesting BaseTool.multi_base_models_to_dict with output_str=False..." + ) + result_dict = tool.multi_base_models_to_dict( + [TestModel], output_str=False + ) + print(f"✓ Dict result type: {type(result_dict)}") + print(f"✓ Dict result length: {len(result_dict)}") + + print( + "\nTesting BaseTool.multi_base_models_to_dict with output_str=True..." + ) + result_str = tool.multi_base_models_to_dict( + [TestModel], output_str=True + ) + print(f"✓ String result type: {type(result_str)}") + print(f"✓ String result preview: {result_str[:100]}...") + + +def test_agent_integration(): + """Test that the Agent class can use the fixed methods without errors.""" + print("\nTesting Agent integration...") + try: + from swarms import Agent + + # Create a simple agent with a tool schema + agent = Agent( + model_name="gpt-4o-mini", + tool_schema=TestModel, + max_loops=1, + verbose=True, + ) + + # This should not raise an error anymore + agent.handle_tool_schema_ops() + print( + "✓ Agent.handle_tool_schema_ops() completed successfully" + ) + + except Exception as e: + print(f"✗ Agent integration failed: {e}") + return False + + return True + + +if __name__ == "__main__": + print("=" * 60) + print("Testing output_str parameter fix") + print("=" * 60) + + try: + test_base_model_to_openai_function() + test_multi_base_model_to_openai_function() + test_base_tool_methods() + + if test_agent_integration(): + print("\n" + "=" * 60) + print( + "✅ All tests passed! The output_str parameter fix is working correctly." + ) + print("=" * 60) + else: + print("\n" + "=" * 60) + print( + "❌ Some tests failed. Please check the implementation." + ) + print("=" * 60) + + except Exception as e: + print(f"\n❌ Test failed with error: {e}") + import traceback + + traceback.print_exc()