Code quality fixes

pull/1133/head
ChethanUK 2 months ago
parent 28aa8ef4f0
commit 80f38251d4
No known key found for this signature in database

@ -11,15 +11,6 @@ from swarms.structs.aop import AOP
def simulate_agent_discovery():
"""Simulate how an agent would use the discovery tool."""
# Create a sample agent that will use the discovery tool
Agent(
agent_name="ProjectCoordinator",
agent_description="Coordinates projects and assigns tasks to other agents",
system_prompt="You are a project coordinator who helps organize work and delegate tasks to the most appropriate team members. You can discover information about other agents to make better decisions.",
model_name="gpt-4o-mini",
temperature=0.4,
)
# Create the AOP cluster
aop = AOP(
server_name="Project Team",

@ -7,19 +7,12 @@ This example shows how to use the new MCP tools for getting agent information.
import json
import asyncio
from swarms.structs.aop import AOPCluster
from swarms.tools.mcp_client_tools import execute_tool_call_simple
async def demonstrate_new_agent_tools():
"""Demonstrate the new agent information tools."""
# Create AOP cluster connection
AOPCluster(
urls=["http://localhost:5932/mcp"],
transport="streamable-http",
)
print("🔧 New AOP Agent Information Tools Demo")
print("=" * 50)
print()
@ -77,7 +70,6 @@ async def demonstrate_new_agent_tools():
if isinstance(result, list) and len(result) > 0:
data = result[0]
if data.get("success"):
data.get("agent_info", {})
discovery_info = data.get("discovery_info", {})
print(
f" Agent: {discovery_info.get('agent_name', 'Unknown')}"

@ -303,7 +303,6 @@ class LegalSwarm:
"""
try:
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
from reportlab.lib.styles import (
getSampleStyleSheet,
ParagraphStyle,

@ -337,7 +337,7 @@ def step_4_advanced_patterns():
print("\n📊 Workflow structure:")
try:
advanced_workflow.visualize_simple()
except:
except Exception:
print(" (Text visualization not available)")
# Execute advanced workflow

@ -109,7 +109,7 @@ def test_basic_import() -> bool:
print("\n🧪 Testing basic GraphWorkflow import...")
try:
from swarms.structs.graph_workflow import GraphWorkflow
from swarms.structs.graph_workflow import GraphWorkflow # noqa: F401
print("✅ GraphWorkflow imported successfully")
return True
@ -123,7 +123,7 @@ def test_agent_import() -> bool:
print("\n🧪 Testing Agent import...")
try:
from swarms import Agent
from swarms import Agent # noqa: F401
print("✅ Agent imported successfully")
return True

@ -213,7 +213,7 @@ def test_file_save_load():
try:
os.remove("test_workflow.json")
print("\n🧹 Cleaned up test file")
except:
except Exception:
pass

@ -1202,7 +1202,7 @@ class AgentMapSimulation:
self.fig.canvas.manager.window.wm_attributes(
"-topmost", 0
)
except:
except Exception:
pass # Not all backends support this
plt.show(block=False)
@ -1535,7 +1535,7 @@ class AgentMapSimulation:
if with_visualization and self.fig is not None:
try:
self.update_visualization()
except:
except Exception:
pass # Ignore visualization errors
# Print status every 10 seconds

@ -250,7 +250,7 @@ def main():
if simulation.fig is not None:
try:
simulation.update_visualization()
except:
except Exception:
pass # Ignore visualization errors
# Check if we have enough conversations to make it interesting

@ -25,7 +25,6 @@ warnings.filterwarnings("ignore")
# Third-party model imports
try:
import timm
from segment_anything import (
SamAutomaticMaskGenerator,
sam_model_registry,

@ -1,4 +1,5 @@
from swarms.utils import load_agents_from_markdown
from swarms.structs.sequential_workflow import SequentialWorkflow
agents = load_agents_from_markdown(
[
@ -9,7 +10,6 @@ agents = load_agents_from_markdown(
)
# Example 3: Use agents in a workflow
from swarms.structs.sequential_workflow import SequentialWorkflow
workflow = SequentialWorkflow(agents=agents, max_loops=1)

@ -22,7 +22,7 @@ def test_swarms_import() -> Dict[str, Any]:
)
# Test basic functionality
from swarms import Agent
from swarms import Agent # noqa: F401
print(" Agent class imported successfully")

@ -34,7 +34,7 @@ from contextlib import contextmanager
# Try to import transformers, but don't fail if not available
try:
import transformers
from transformers import AutoModel, AutoTokenizer
from transformers import AutoModel
TRANSFORMERS_AVAILABLE = True
except ImportError:

@ -16,7 +16,6 @@ from typing import (
Type,
TypeVar,
Union,
get_args,
)
from pydantic import BaseModel, Field

@ -117,7 +117,6 @@ def process_audio_with_model(
from litellm import (
completion,
supports_audio_input,
supports_audio_output,
)
if not supports_audio_input(model):

@ -1,11 +1,10 @@
from unittest.mock import MagicMock
import unittest
from swarms.structs.agent import Agent
from swarms.tools.tool_parse_exec import parse_and_execute_json
# Mock parse_and_execute_json for testing
parse_and_execute_json = MagicMock()
parse_and_execute_json.return_value = {
mock_parse_and_execute_json = MagicMock()
mock_parse_and_execute_json.return_value = {
"tool_name": "calculator",
"args": {"numbers": [2, 2]},
"output": "4",

@ -92,7 +92,7 @@ class SwarmsIssueReporter:
import swarms
return swarms.__version__
except:
except Exception:
return "Unknown"
def _get_system_info(self) -> SwarmSystemInfo:
@ -185,7 +185,7 @@ class SwarmsIssueReporter:
for dist in pkg_resources.working_set:
deps.append(f"- {dist.key} {dist.version}")
return "\n".join(deps)
except:
except Exception:
return "Unable to fetch dependency information"
# First, add this method to your SwarmsIssueReporter class

@ -1,13 +1,47 @@
#!/usr/bin/env python3
"""
AOP Framework Benchmarking Suite
This comprehensive benchmarking suite tests the scaling laws of the AOP (Agent Orchestration Platform)
framework by measuring latency, throughput, memory usage, and other performance metrics across different
agent counts and configurations.
Features:
- Scaling law analysis (1 to 100+ agents)
- Latency and throughput measurements
- Memory usage profiling
- Concurrent execution testing
- Error rate analysis
- Performance visualization with charts
- Statistical analysis and reporting
- Real agent testing with actual LLM calls
Usage:
1. Set your OpenAI API key: export OPENAI_API_KEY="your-key-here"
2. Install required dependencies: pip install swarms
3. Run the benchmark: python aop_benchmark.py
4. Check results in the generated charts and reports
Configuration:
- Edit BENCHMARK_CONFIG at the top of the file to customize settings
- Adjust model_name, max_agents, and other parameters as needed
- This benchmark ONLY uses real agents with actual LLM calls
Author: AI Assistant
Date: 2024
"""
import gc
import json
import os
import psutil
import random
import statistics
import time
import uuid
import warnings
import uuid
from concurrent.futures import ThreadPoolExecutor, as_completed
from dataclasses import asdict, dataclass
from dataclasses import dataclass, asdict
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple
@ -15,22 +49,14 @@ import matplotlib.pyplot as plt
import numpy as np
import openpyxl
import pandas as pd
import psutil
import seaborn as sns
from dotenv import load_dotenv
from loguru import logger
from openpyxl.styles import Font
from openpyxl.utils.dataframe import dataframe_to_rows
from swarms.structs.agent import Agent
from swarms.structs.aop import AOP
from swarms.utils.litellm_wrapper import LiteLLM
# Suppress warnings for cleaner output
warnings.filterwarnings("ignore")
# Load environment variables
load_dotenv()
# Configuration
BENCHMARK_CONFIG = {
"models": [
@ -60,6 +86,21 @@ BENCHMARK_CONFIG = {
"detailed_logging": True, # Enable detailed logging
}
# Suppress warnings for cleaner output
warnings.filterwarnings("ignore")
# Load environment variables
load_dotenv()
# Import swarms Agent directly to avoid uvloop dependency
try:
from swarms.structs.agent import Agent
from swarms.utils.litellm_wrapper import LiteLLM
SWARMS_AVAILABLE = True
except ImportError:
SWARMS_AVAILABLE = False
@dataclass
class BenchmarkResult:
@ -375,6 +416,12 @@ class AOPBenchmarkSuite:
"SWARMS_API_KEY or OPENAI_API_KEY environment variable is required for real agent testing"
)
# Check if swarms is available
if not SWARMS_AVAILABLE:
raise ImportError(
"Swarms not available - install swarms: pip install swarms"
)
# Create LiteLLM instance for the specific model
llm = LiteLLM(
model_name=model_name,
@ -877,7 +924,7 @@ class AOPBenchmarkSuite:
try:
if len(str(cell.value)) > max_length:
max_length = len(str(cell.value))
except:
except Exception:
pass
adjusted_width = min(max_length + 2, 50)
ws.column_dimensions[column_letter].width = adjusted_width
@ -1616,7 +1663,6 @@ class AOPBenchmarkSuite:
initial_memory = (
psutil.Process().memory_info().rss / 1024 / 1024
)
psutil.cpu_percent()
# Execute some tasks
available_agents = aop.list_agents()
@ -2439,13 +2485,13 @@ class AOPBenchmarkSuite:
ax2.grid(True, alpha=0.3)
# Add value labels on bars
for i, (bar, time) in enumerate(
for i, (bar, exec_time) in enumerate(
zip(bars2, df["avg_tool_execution_time"])
):
ax2.text(
bar.get_x() + bar.get_width() / 2,
bar.get_height() + 0.01,
f"{time:.2f}s",
f"{exec_time:.2f}s",
ha="center",
va="bottom",
fontsize=8,
@ -2905,6 +2951,33 @@ def main():
print(f" Context Length: {BENCHMARK_CONFIG['context_length']}")
print()
# Check for required environment variables
api_key = os.getenv("SWARMS_API_KEY") or os.getenv(
"OPENAI_API_KEY"
)
if not api_key:
print(
"❌ Error: SWARMS_API_KEY or OPENAI_API_KEY not found in environment variables"
)
print(
" This benchmark requires real LLM calls for accurate performance testing"
)
print(
" Set your API key: export SWARMS_API_KEY='your-key-here' or export OPENAI_API_KEY='your-key-here'"
)
return 1
# Check for required imports
if not SWARMS_AVAILABLE:
print("❌ Error: swarms not available")
print(
" Install required dependencies: pip install swarms openpyxl"
)
print(
" This benchmark requires swarms framework and Excel support"
)
return 1
# Initialize benchmark suite
benchmark = AOPBenchmarkSuite(
output_dir="aop_benchmark_results",

File diff suppressed because it is too large Load Diff

@ -2,9 +2,6 @@ import os
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
from swarms import (
Agent,
ConcurrentWorkflow,
@ -14,6 +11,9 @@ from swarms import (
from swarms.utils.formatter import Formatter
# Load environment variables
load_dotenv()
class MarkdownTestSwarm:
"""A test swarm that demonstrates markdown output capabilities"""

Loading…
Cancel
Save