From da87691b90b2cd915ecf41ed80234c4a09f747bc Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Sun, 5 Oct 2025 12:53:29 -0700 Subject: [PATCH] cleanup and format --- .../discovery/example_agent_communication.py | 30 +------------------ .../aop_examples/example_new_agent_tools.py | 4 +-- tests/aop/aop_benchmark.py | 10 +++---- tests/structs/test_auto_swarm_builder_fix.py | 1 - 4 files changed, 8 insertions(+), 37 deletions(-) diff --git a/examples/aop_examples/discovery/example_agent_communication.py b/examples/aop_examples/discovery/example_agent_communication.py index 5fb6a0dc..c4fb28ec 100644 --- a/examples/aop_examples/discovery/example_agent_communication.py +++ b/examples/aop_examples/discovery/example_agent_communication.py @@ -12,7 +12,7 @@ def simulate_agent_discovery(): """Simulate how an agent would use the discovery tool.""" # Create a sample agent that will use the discovery tool - coordinator_agent = Agent( + Agent( agent_name="ProjectCoordinator", agent_description="Coordinates projects and assigns tasks to other agents", system_prompt="You are a project coordinator who helps organize work and delegate tasks to the most appropriate team members. You can discover information about other agents to make better decisions.", @@ -118,34 +118,6 @@ def simulate_agent_discovery(): # Show what the MCP tool response would look like print("📡 Sample MCP tool response structure:") - sample_response = { - "success": True, - "agents": [ - { - "tool_name": "data_specialist", - "agent_name": "DataSpecialist", - "description": "Handles all data-related tasks and analysis", - "short_system_prompt": "You are a data specialist with expertise in data processing, analysis, and visualization...", - "tags": [ - "data", - "analysis", - "python", - "sql", - "statistics", - ], - "capabilities": [ - "data_processing", - "statistical_analysis", - "visualization", - ], - "role": "specialist", - "model_name": "gpt-4o-mini", - "max_loops": 1, - "temperature": 0.5, - "max_tokens": 4096, - } - ], - } print(" discover_agents() -> {") print(" 'success': True,") diff --git a/examples/aop_examples/example_new_agent_tools.py b/examples/aop_examples/example_new_agent_tools.py index 4e460943..4806fa8e 100644 --- a/examples/aop_examples/example_new_agent_tools.py +++ b/examples/aop_examples/example_new_agent_tools.py @@ -15,7 +15,7 @@ async def demonstrate_new_agent_tools(): """Demonstrate the new agent information tools.""" # Create AOP cluster connection - aop_cluster = AOPCluster( + AOPCluster( urls=["http://localhost:5932/mcp"], transport="streamable-http", ) @@ -77,7 +77,7 @@ async def demonstrate_new_agent_tools(): if isinstance(result, list) and len(result) > 0: data = result[0] if data.get("success"): - agent_info = data.get("agent_info", {}) + data.get("agent_info", {}) discovery_info = data.get("discovery_info", {}) print( f" Agent: {discovery_info.get('agent_name', 'Unknown')}" diff --git a/tests/aop/aop_benchmark.py b/tests/aop/aop_benchmark.py index c64dfbb0..c727ba7c 100644 --- a/tests/aop/aop_benchmark.py +++ b/tests/aop/aop_benchmark.py @@ -1377,7 +1377,7 @@ class AOPBenchmarkSuite: # Execute with first available agent agent_name = available_agents[0] try: - response = aop._execute_agent_with_timeout( + aop._execute_agent_with_timeout( agent_name, task, timeout=30 ) execution_time = time.time() - execution_start @@ -1485,7 +1485,7 @@ class AOPBenchmarkSuite: "data": [response2], "analysis_type": "classification", } - response3 = aop._execute_agent_with_timeout( + aop._execute_agent_with_timeout( available_agents[2], task3, timeout=30 ) @@ -1664,7 +1664,7 @@ class AOPBenchmarkSuite: initial_memory = ( psutil.Process().memory_info().rss / 1024 / 1024 ) - initial_cpu = psutil.cpu_percent() + psutil.cpu_percent() # Execute some tasks available_agents = aop.list_agents() @@ -1813,7 +1813,7 @@ class AOPBenchmarkSuite: tool_start = time.time() try: # Execute tool test - response = aop._execute_agent_with_timeout( + aop._execute_agent_with_timeout( available_agents[0], test, timeout=15 ) tool_time = time.time() - tool_start @@ -2501,7 +2501,7 @@ class AOPBenchmarkSuite: # 3. Tool Quality vs Cost by Model ax3 = axes[1, 0] - scatter = ax3.scatter( + ax3.scatter( df["cost_usd"], df["response_quality_score"], s=100, diff --git a/tests/structs/test_auto_swarm_builder_fix.py b/tests/structs/test_auto_swarm_builder_fix.py index 4167117d..420c1892 100644 --- a/tests/structs/test_auto_swarm_builder_fix.py +++ b/tests/structs/test_auto_swarm_builder_fix.py @@ -8,7 +8,6 @@ Bug: https://github.com/kyegomez/swarms/issues/1115 """ import pytest -from pydantic import BaseModel from swarms.structs.agent import Agent from swarms.structs.auto_swarm_builder import (