cleanup and format

pull/1119/head
Kye Gomez 4 days ago
parent f2e805ee04
commit da87691b90

@ -12,7 +12,7 @@ def simulate_agent_discovery():
"""Simulate how an agent would use the discovery tool.""" """Simulate how an agent would use the discovery tool."""
# Create a sample agent that will use the discovery tool # Create a sample agent that will use the discovery tool
coordinator_agent = Agent( Agent(
agent_name="ProjectCoordinator", agent_name="ProjectCoordinator",
agent_description="Coordinates projects and assigns tasks to other agents", agent_description="Coordinates projects and assigns tasks to other agents",
system_prompt="You are a project coordinator who helps organize work and delegate tasks to the most appropriate team members. You can discover information about other agents to make better decisions.", system_prompt="You are a project coordinator who helps organize work and delegate tasks to the most appropriate team members. You can discover information about other agents to make better decisions.",
@ -118,34 +118,6 @@ def simulate_agent_discovery():
# Show what the MCP tool response would look like # Show what the MCP tool response would look like
print("📡 Sample MCP tool response structure:") print("📡 Sample MCP tool response structure:")
sample_response = {
"success": True,
"agents": [
{
"tool_name": "data_specialist",
"agent_name": "DataSpecialist",
"description": "Handles all data-related tasks and analysis",
"short_system_prompt": "You are a data specialist with expertise in data processing, analysis, and visualization...",
"tags": [
"data",
"analysis",
"python",
"sql",
"statistics",
],
"capabilities": [
"data_processing",
"statistical_analysis",
"visualization",
],
"role": "specialist",
"model_name": "gpt-4o-mini",
"max_loops": 1,
"temperature": 0.5,
"max_tokens": 4096,
}
],
}
print(" discover_agents() -> {") print(" discover_agents() -> {")
print(" 'success': True,") print(" 'success': True,")

@ -15,7 +15,7 @@ async def demonstrate_new_agent_tools():
"""Demonstrate the new agent information tools.""" """Demonstrate the new agent information tools."""
# Create AOP cluster connection # Create AOP cluster connection
aop_cluster = AOPCluster( AOPCluster(
urls=["http://localhost:5932/mcp"], urls=["http://localhost:5932/mcp"],
transport="streamable-http", transport="streamable-http",
) )
@ -77,7 +77,7 @@ async def demonstrate_new_agent_tools():
if isinstance(result, list) and len(result) > 0: if isinstance(result, list) and len(result) > 0:
data = result[0] data = result[0]
if data.get("success"): if data.get("success"):
agent_info = data.get("agent_info", {}) data.get("agent_info", {})
discovery_info = data.get("discovery_info", {}) discovery_info = data.get("discovery_info", {})
print( print(
f" Agent: {discovery_info.get('agent_name', 'Unknown')}" f" Agent: {discovery_info.get('agent_name', 'Unknown')}"

@ -1377,7 +1377,7 @@ class AOPBenchmarkSuite:
# Execute with first available agent # Execute with first available agent
agent_name = available_agents[0] agent_name = available_agents[0]
try: try:
response = aop._execute_agent_with_timeout( aop._execute_agent_with_timeout(
agent_name, task, timeout=30 agent_name, task, timeout=30
) )
execution_time = time.time() - execution_start execution_time = time.time() - execution_start
@ -1485,7 +1485,7 @@ class AOPBenchmarkSuite:
"data": [response2], "data": [response2],
"analysis_type": "classification", "analysis_type": "classification",
} }
response3 = aop._execute_agent_with_timeout( aop._execute_agent_with_timeout(
available_agents[2], task3, timeout=30 available_agents[2], task3, timeout=30
) )
@ -1664,7 +1664,7 @@ class AOPBenchmarkSuite:
initial_memory = ( initial_memory = (
psutil.Process().memory_info().rss / 1024 / 1024 psutil.Process().memory_info().rss / 1024 / 1024
) )
initial_cpu = psutil.cpu_percent() psutil.cpu_percent()
# Execute some tasks # Execute some tasks
available_agents = aop.list_agents() available_agents = aop.list_agents()
@ -1813,7 +1813,7 @@ class AOPBenchmarkSuite:
tool_start = time.time() tool_start = time.time()
try: try:
# Execute tool test # Execute tool test
response = aop._execute_agent_with_timeout( aop._execute_agent_with_timeout(
available_agents[0], test, timeout=15 available_agents[0], test, timeout=15
) )
tool_time = time.time() - tool_start tool_time = time.time() - tool_start
@ -2501,7 +2501,7 @@ class AOPBenchmarkSuite:
# 3. Tool Quality vs Cost by Model # 3. Tool Quality vs Cost by Model
ax3 = axes[1, 0] ax3 = axes[1, 0]
scatter = ax3.scatter( ax3.scatter(
df["cost_usd"], df["cost_usd"],
df["response_quality_score"], df["response_quality_score"],
s=100, s=100,

@ -8,7 +8,6 @@ Bug: https://github.com/kyegomez/swarms/issues/1115
""" """
import pytest import pytest
from pydantic import BaseModel
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.auto_swarm_builder import ( from swarms.structs.auto_swarm_builder import (

Loading…
Cancel
Save