[EXPORT AOP] [BUG][#1108]

pull/1110/head
Kye Gomez 1 week ago
parent 3de4c78bfe
commit ea30c40d74

@ -100,6 +100,7 @@ from swarms.structs.swarming_architectures import (
staircase_swarm, staircase_swarm,
star_swarm, star_swarm,
) )
from swarms.structs.aop import AOP
__all__ = [ __all__ = [
"Agent", "Agent",
@ -184,4 +185,5 @@ __all__ = [
"check_end", "check_end",
"AgentLoader", "AgentLoader",
"BatchedGridWorkflow", "BatchedGridWorkflow",
"AOP",
] ]

@ -983,21 +983,23 @@ class AOP:
port: Port to bind the server to port: Port to bind the server to
""" """
logger.info( logger.info(
f"Starting MCP server '{self.server_name}' on {self.host}:{self.port}" f"Starting MCP server '{self.server_name}' on {self.host}:{self.port}\n"
f"Transport: {self.transport}\n"
f"Log level: {self.log_level}\n"
f"Verbose mode: {self.verbose}\n"
f"Traceback enabled: {self.traceback_enabled}\n"
f"Available tools: {self.list_agents()}"
) )
logger.info(f"Transport: {self.transport}")
logger.info(f"Log level: {self.log_level}")
logger.info(f"Verbose mode: {self.verbose}")
logger.info(f"Traceback enabled: {self.traceback_enabled}")
logger.info(f"Available tools: {self.list_agents()}")
if self.verbose: if self.verbose:
logger.debug("Server configuration:") logger.debug(
logger.debug(f" - Server name: {self.server_name}") "Server configuration:\n"
logger.debug(f" - Host: {self.host}") f" - Server name: {self.server_name}\n"
logger.debug(f" - Port: {self.port}") f" - Host: {self.host}\n"
logger.debug(f" - Transport: {self.transport}") f" - Port: {self.port}\n"
logger.debug(f" - Total agents: {len(self.agents)}") f" - Transport: {self.transport}\n"
f" - Total agents: {len(self.agents)}"
)
for tool_name, config in self.tool_configs.items(): for tool_name, config in self.tool_configs.items():
logger.debug( logger.debug(
f" - Tool '{tool_name}': timeout={config.timeout}s, verbose={config.verbose}, traceback={config.traceback_enabled}" f" - Tool '{tool_name}': timeout={config.timeout}s, verbose={config.verbose}, traceback={config.traceback_enabled}"
@ -1005,12 +1007,12 @@ class AOP:
self.mcp_server.run(transport=self.transport) self.mcp_server.run(transport=self.transport)
# Note: FastMCP doesn't have a direct start method in the current implementation logger.info(
# This would need to be implemented based on the specific MCP server setup
print(
f"MCP Server '{self.server_name}' is ready with {len(self.agents)} tools" f"MCP Server '{self.server_name}' is ready with {len(self.agents)} tools"
) )
print(f"Tools available: {', '.join(self.list_agents())}") logger.info(
f"Tools available: {', '.join(self.list_agents())}"
)
def run(self) -> None: def run(self) -> None:
""" """

@ -231,9 +231,10 @@ class BaseTool(BaseModel):
def base_model_to_dict( def base_model_to_dict(
self, self,
pydantic_type: type[BaseModel], pydantic_type: type[BaseModel],
output_str: bool = False,
*args: Any, *args: Any,
**kwargs: Any, **kwargs: Any,
) -> dict[str, Any]: ) -> Union[dict[str, Any], str]:
""" """
Convert a Pydantic BaseModel to OpenAI function calling schema dictionary. Convert a Pydantic BaseModel to OpenAI function calling schema dictionary.
@ -247,7 +248,7 @@ class BaseTool(BaseModel):
**kwargs: Additional keyword arguments **kwargs: Additional keyword arguments
Returns: Returns:
dict[str, Any]: OpenAI function calling schema dictionary Union[dict[str, Any], str]: OpenAI function calling schema dictionary or JSON string
Raises: Raises:
ToolValidationError: If pydantic_type validation fails ToolValidationError: If pydantic_type validation fails
@ -278,9 +279,13 @@ class BaseTool(BaseModel):
# Get the base function schema # Get the base function schema
base_result = base_model_to_openai_function( base_result = base_model_to_openai_function(
pydantic_type, *args, **kwargs pydantic_type, output_str=output_str, *args, **kwargs
) )
# If output_str is True, return the string directly
if output_str and isinstance(base_result, str):
return base_result
# Extract the function definition from the functions array # Extract the function definition from the functions array
if ( if (
"functions" in base_result "functions" in base_result
@ -314,8 +319,8 @@ class BaseTool(BaseModel):
) from e ) from e
def multi_base_models_to_dict( def multi_base_models_to_dict(
self, base_models: List[BaseModel] self, base_models: List[BaseModel], output_str: bool = False
) -> dict[str, Any]: ) -> Union[dict[str, Any], str]:
""" """
Convert multiple Pydantic BaseModels to OpenAI function calling schema. Convert multiple Pydantic BaseModels to OpenAI function calling schema.
@ -323,12 +328,11 @@ class BaseTool(BaseModel):
a unified OpenAI function calling schema format. a unified OpenAI function calling schema format.
Args: Args:
return_str (bool): Whether to return string format base_models (List[BaseModel]): List of Pydantic models to convert
*args: Additional positional arguments output_str (bool): Whether to return string format. Defaults to False.
**kwargs: Additional keyword arguments
Returns: Returns:
dict[str, Any]: Combined OpenAI function calling schema dict[str, Any] or str: Combined OpenAI function calling schema or JSON string
Raises: Raises:
ToolValidationError: If base_models validation fails ToolValidationError: If base_models validation fails
@ -344,10 +348,18 @@ class BaseTool(BaseModel):
) )
try: try:
return [ results = [
self.base_model_to_dict(model) self.base_model_to_dict(model, output_str=output_str)
for model in base_models for model in base_models
] ]
# If output_str is True, return the string directly
if output_str:
import json
return json.dumps(results, indent=2)
return results
except Exception as e: except Exception as e:
self._log_if_verbose( self._log_if_verbose(
"error", f"Failed to convert multiple models: {e}" "error", f"Failed to convert multiple models: {e}"

@ -39,12 +39,14 @@ def check_pydantic_name(pydantic_type: type[BaseModel]) -> str:
def base_model_to_openai_function( def base_model_to_openai_function(
pydantic_type: type[BaseModel], pydantic_type: type[BaseModel],
output_str: bool = False,
) -> dict[str, Any]: ) -> dict[str, Any]:
""" """
Convert a Pydantic model to a dictionary representation of functions. Convert a Pydantic model to a dictionary representation of functions.
Args: Args:
pydantic_type (type[BaseModel]): The Pydantic model type to convert. pydantic_type (type[BaseModel]): The Pydantic model type to convert.
output_str (bool): Whether to return string output format. Defaults to False.
Returns: Returns:
dict[str, Any]: A dictionary representation of the functions. dict[str, Any]: A dictionary representation of the functions.
@ -85,7 +87,7 @@ def base_model_to_openai_function(
_remove_a_key(parameters, "title") _remove_a_key(parameters, "title")
_remove_a_key(parameters, "additionalProperties") _remove_a_key(parameters, "additionalProperties")
return { result = {
"function_call": { "function_call": {
"name": name, "name": name,
}, },
@ -98,6 +100,14 @@ def base_model_to_openai_function(
], ],
} }
# Handle output_str parameter
if output_str:
import json
return json.dumps(result, indent=2)
return result
def multi_base_model_to_openai_function( def multi_base_model_to_openai_function(
pydantic_types: List[BaseModel] = None, pydantic_types: List[BaseModel] = None,
@ -114,13 +124,21 @@ def multi_base_model_to_openai_function(
""" """
functions: list[dict[str, Any]] = [ functions: list[dict[str, Any]] = [
base_model_to_openai_function(pydantic_type, output_str)[ base_model_to_openai_function(
"functions" pydantic_type, output_str=False
][0] )["functions"][0]
for pydantic_type in pydantic_types for pydantic_type in pydantic_types
] ]
return { result = {
"function_call": "auto", "function_call": "auto",
"functions": functions, "functions": functions,
} }
# Handle output_str parameter
if output_str:
import json
return json.dumps(result, indent=2)
return result

@ -0,0 +1,150 @@
from pydantic import BaseModel
from swarms.tools.pydantic_to_json import (
base_model_to_openai_function,
multi_base_model_to_openai_function,
)
from swarms.tools.base_tool import BaseTool
# Test Pydantic model
class TestModel(BaseModel):
"""A test model for validation."""
name: str
age: int
email: str = "test@example.com"
def test_base_model_to_openai_function():
"""Test that base_model_to_openai_function accepts output_str parameter."""
print(
"Testing base_model_to_openai_function with output_str=False..."
)
result_dict = base_model_to_openai_function(
TestModel, output_str=False
)
print(f"✓ Dict result type: {type(result_dict)}")
print(f"✓ Dict result keys: {list(result_dict.keys())}")
print(
"\nTesting base_model_to_openai_function with output_str=True..."
)
result_str = base_model_to_openai_function(
TestModel, output_str=True
)
print(f"✓ String result type: {type(result_str)}")
print(f"✓ String result preview: {result_str[:100]}...")
def test_multi_base_model_to_openai_function():
"""Test that multi_base_model_to_openai_function handles output_str correctly."""
print(
"\nTesting multi_base_model_to_openai_function with output_str=False..."
)
result_dict = multi_base_model_to_openai_function(
[TestModel], output_str=False
)
print(f"✓ Dict result type: {type(result_dict)}")
print(f"✓ Dict result keys: {list(result_dict.keys())}")
print(
"\nTesting multi_base_model_to_openai_function with output_str=True..."
)
result_str = multi_base_model_to_openai_function(
[TestModel], output_str=True
)
print(f"✓ String result type: {type(result_str)}")
print(f"✓ String result preview: {result_str[:100]}...")
def test_base_tool_methods():
"""Test that BaseTool methods handle output_str parameter correctly."""
print(
"\nTesting BaseTool.base_model_to_dict with output_str=False..."
)
tool = BaseTool()
result_dict = tool.base_model_to_dict(TestModel, output_str=False)
print(f"✓ Dict result type: {type(result_dict)}")
print(f"✓ Dict result keys: {list(result_dict.keys())}")
print(
"\nTesting BaseTool.base_model_to_dict with output_str=True..."
)
result_str = tool.base_model_to_dict(TestModel, output_str=True)
print(f"✓ String result type: {type(result_str)}")
print(f"✓ String result preview: {result_str[:100]}...")
print(
"\nTesting BaseTool.multi_base_models_to_dict with output_str=False..."
)
result_dict = tool.multi_base_models_to_dict(
[TestModel], output_str=False
)
print(f"✓ Dict result type: {type(result_dict)}")
print(f"✓ Dict result length: {len(result_dict)}")
print(
"\nTesting BaseTool.multi_base_models_to_dict with output_str=True..."
)
result_str = tool.multi_base_models_to_dict(
[TestModel], output_str=True
)
print(f"✓ String result type: {type(result_str)}")
print(f"✓ String result preview: {result_str[:100]}...")
def test_agent_integration():
"""Test that the Agent class can use the fixed methods without errors."""
print("\nTesting Agent integration...")
try:
from swarms import Agent
# Create a simple agent with a tool schema
agent = Agent(
model_name="gpt-4o-mini",
tool_schema=TestModel,
max_loops=1,
verbose=True,
)
# This should not raise an error anymore
agent.handle_tool_schema_ops()
print(
"✓ Agent.handle_tool_schema_ops() completed successfully"
)
except Exception as e:
print(f"✗ Agent integration failed: {e}")
return False
return True
if __name__ == "__main__":
print("=" * 60)
print("Testing output_str parameter fix")
print("=" * 60)
try:
test_base_model_to_openai_function()
test_multi_base_model_to_openai_function()
test_base_tool_methods()
if test_agent_integration():
print("\n" + "=" * 60)
print(
"✅ All tests passed! The output_str parameter fix is working correctly."
)
print("=" * 60)
else:
print("\n" + "=" * 60)
print(
"❌ Some tests failed. Please check the implementation."
)
print("=" * 60)
except Exception as e:
print(f"\n❌ Test failed with error: {e}")
import traceback
traceback.print_exc()
Loading…
Cancel
Save