diff --git a/docs/clusterops/reference.md b/docs/clusterops/reference.md
deleted file mode 100644
index eca83bbf..00000000
--- a/docs/clusterops/reference.md
+++ /dev/null
@@ -1,334 +0,0 @@
-# ClusterOps API Reference
-
-ClusterOps is a Python library for managing and executing tasks across CPU and GPU resources in a distributed computing environment. It provides functions for resource discovery, task execution, and performance monitoring.
-
-## Installation
-
-```bash
-
-$ pip3 install clusterops
-
-```
-
-## Table of Contents
-1. [CPU Operations](#cpu-operations)
-2. [GPU Operations](#gpu-operations)
-3. [Utility Functions](#utility-functions)
-4. [Resource Monitoring](#resource-monitoring)
-
-## CPU Operations
-
-### `list_available_cpus()`
-
-Lists all available CPU cores.
-
-#### Returns
-| Type | Description |
-|------|-------------|
-| `List[int]` | A list of available CPU core indices. |
-
-#### Raises
-| Exception | Description |
-|-----------|-------------|
-| `RuntimeError` | If no CPUs are found. |
-
-#### Example
-```python
-from clusterops import list_available_cpus
-
-available_cpus = list_available_cpus()
-print(f"Available CPU cores: {available_cpus}")
-```
-
-### `execute_on_cpu(cpu_id: int, func: Callable, *args: Any, **kwargs: Any) -> Any`
-
-Executes a callable on a specific CPU.
-
-#### Parameters
-| Name | Type | Description |
-|------|------|-------------|
-| `cpu_id` | `int` | The CPU core to run the function on. |
-| `func` | `Callable` | The function to be executed. |
-| `*args` | `Any` | Arguments for the callable. |
-| `**kwargs` | `Any` | Keyword arguments for the callable. |
-
-#### Returns
-| Type | Description |
-|------|-------------|
-| `Any` | The result of the function execution. |
-
-#### Raises
-| Exception | Description |
-|-----------|-------------|
-| `ValueError` | If the CPU core specified is invalid. |
-| `RuntimeError` | If there is an error executing the function on the CPU. |
-
-#### Example
-```python
-from clusterops import execute_on_cpu
-
-def sample_task(n: int) -> int:
- return n * n
-
-result = execute_on_cpu(0, sample_task, 10)
-print(f"Result of sample task on CPU 0: {result}")
-```
-
-### `execute_with_cpu_cores(core_count: int, func: Callable, *args: Any, **kwargs: Any) -> Any`
-
-Executes a callable using a specified number of CPU cores.
-
-#### Parameters
-| Name | Type | Description |
-|------|------|-------------|
-| `core_count` | `int` | The number of CPU cores to run the function on. |
-| `func` | `Callable` | The function to be executed. |
-| `*args` | `Any` | Arguments for the callable. |
-| `**kwargs` | `Any` | Keyword arguments for the callable. |
-
-#### Returns
-| Type | Description |
-|------|-------------|
-| `Any` | The result of the function execution. |
-
-#### Raises
-| Exception | Description |
-|-----------|-------------|
-| `ValueError` | If the number of CPU cores specified is invalid or exceeds available cores. |
-| `RuntimeError` | If there is an error executing the function on the specified CPU cores. |
-
-#### Example
-```python
-from clusterops import execute_with_cpu_cores
-
-def parallel_task(n: int) -> int:
- return sum(range(n))
-
-result = execute_with_cpu_cores(4, parallel_task, 1000000)
-print(f"Result of parallel task using 4 CPU cores: {result}")
-```
-
-## GPU Operations
-
-### `list_available_gpus() -> List[str]`
-
-Lists all available GPUs.
-
-#### Returns
-| Type | Description |
-|------|-------------|
-| `List[str]` | A list of available GPU names. |
-
-#### Raises
-| Exception | Description |
-|-----------|-------------|
-| `RuntimeError` | If no GPUs are found. |
-
-#### Example
-```python
-from clusterops import list_available_gpus
-
-available_gpus = list_available_gpus()
-print(f"Available GPUs: {available_gpus}")
-```
-
-### `select_best_gpu() -> Optional[int]`
-
-Selects the GPU with the most free memory.
-
-#### Returns
-| Type | Description |
-|------|-------------|
-| `Optional[int]` | The GPU ID of the best available GPU, or None if no GPUs are available. |
-
-#### Example
-```python
-from clusterops import select_best_gpu
-
-best_gpu = select_best_gpu()
-if best_gpu is not None:
- print(f"Best GPU for execution: GPU {best_gpu}")
-else:
- print("No GPUs available")
-```
-
-### `execute_on_gpu(gpu_id: int, func: Callable, *args: Any, **kwargs: Any) -> Any`
-
-Executes a callable on a specific GPU using Ray.
-
-#### Parameters
-| Name | Type | Description |
-|------|------|-------------|
-| `gpu_id` | `int` | The GPU to run the function on. |
-| `func` | `Callable` | The function to be executed. |
-| `*args` | `Any` | Arguments for the callable. |
-| `**kwargs` | `Any` | Keyword arguments for the callable. |
-
-#### Returns
-| Type | Description |
-|------|-------------|
-| `Any` | The result of the function execution. |
-
-#### Raises
-| Exception | Description |
-|-----------|-------------|
-| `ValueError` | If the GPU index is invalid. |
-| `RuntimeError` | If there is an error executing the function on the GPU. |
-
-#### Example
-```python
-from clusterops import execute_on_gpu
-
-def gpu_task(n: int) -> int:
- return n ** 2
-
-result = execute_on_gpu(0, gpu_task, 10)
-print(f"Result of GPU task on GPU 0: {result}")
-```
-
-### `execute_on_multiple_gpus(gpu_ids: List[int], func: Callable, all_gpus: bool = False, timeout: float = None, *args: Any, **kwargs: Any) -> List[Any]`
-
-Executes a callable across multiple GPUs using Ray.
-
-#### Parameters
-| Name | Type | Description |
-|------|------|-------------|
-| `gpu_ids` | `List[int]` | The list of GPU IDs to run the function on. |
-| `func` | `Callable` | The function to be executed. |
-| `all_gpus` | `bool` | Whether to use all available GPUs (default: False). |
-| `timeout` | `float` | Timeout for the execution in seconds (default: None). |
-| `*args` | `Any` | Arguments for the callable. |
-| `**kwargs` | `Any` | Keyword arguments for the callable. |
-
-#### Returns
-| Type | Description |
-|------|-------------|
-| `List[Any]` | A list of results from the execution on each GPU. |
-
-#### Raises
-| Exception | Description |
-|-----------|-------------|
-| `ValueError` | If any GPU index is invalid. |
-| `RuntimeError` | If there is an error executing the function on the GPUs. |
-
-#### Example
-```python
-from clusterops import execute_on_multiple_gpus
-
-def multi_gpu_task(n: int) -> int:
- return n ** 3
-
-results = execute_on_multiple_gpus([0, 1], multi_gpu_task, 5)
-print(f"Results of multi-GPU task: {results}")
-```
-
-### `distributed_execute_on_gpus(gpu_ids: List[int], func: Callable, *args: Any, **kwargs: Any) -> List[Any]`
-
-Executes a callable across multiple GPUs and nodes using Ray's distributed task scheduling.
-
-#### Parameters
-| Name | Type | Description |
-|------|------|-------------|
-| `gpu_ids` | `List[int]` | The list of GPU IDs across nodes to run the function on. |
-| `func` | `Callable` | The function to be executed. |
-| `*args` | `Any` | Arguments for the callable. |
-| `**kwargs` | `Any` | Keyword arguments for the callable. |
-
-#### Returns
-| Type | Description |
-|------|-------------|
-| `List[Any]` | A list of results from the execution on each GPU. |
-
-#### Example
-```python
-from clusterops import distributed_execute_on_gpus
-
-def distributed_task(n: int) -> int:
- return n ** 4
-
-results = distributed_execute_on_gpus([0, 1, 2, 3], distributed_task, 3)
-print(f"Results of distributed GPU task: {results}")
-```
-
-## Utility Functions
-
-### `retry_with_backoff(func: Callable, retries: int = RETRY_COUNT, delay: float = RETRY_DELAY, *args: Any, **kwargs: Any) -> Any`
-
-Retries a callable function with exponential backoff in case of failure.
-
-#### Parameters
-| Name | Type | Description |
-|------|------|-------------|
-| `func` | `Callable` | The function to execute with retries. |
-| `retries` | `int` | Number of retries (default: RETRY_COUNT from env). |
-| `delay` | `float` | Delay between retries in seconds (default: RETRY_DELAY from env). |
-| `*args` | `Any` | Arguments for the callable. |
-| `**kwargs` | `Any` | Keyword arguments for the callable. |
-
-#### Returns
-| Type | Description |
-|------|-------------|
-| `Any` | The result of the function execution. |
-
-#### Raises
-| Exception | Description |
-|-----------|-------------|
-| `Exception` | After all retries fail. |
-
-#### Example
-```python
-from clusterops import retry_with_backoff
-
-def unstable_task():
- # Simulating an unstable task that might fail
- import random
- if random.random() < 0.5:
- raise Exception("Task failed")
- return "Task succeeded"
-
-result = retry_with_backoff(unstable_task, retries=5, delay=1)
-print(f"Result of unstable task: {result}")
-```
-
-## Resource Monitoring
-
-### `monitor_resources()`
-
-Continuously monitors CPU and GPU resources and logs alerts when thresholds are crossed.
-
-#### Example
-```python
-from clusterops import monitor_resources
-
-# Start monitoring resources
-monitor_resources()
-```
-
-### `profile_execution(func: Callable, *args: Any, **kwargs: Any) -> Any`
-
-Profiles the execution of a task, collecting metrics like execution time and CPU/GPU usage.
-
-#### Parameters
-| Name | Type | Description |
-|------|------|-------------|
-| `func` | `Callable` | The function to profile. |
-| `*args` | `Any` | Arguments for the callable. |
-| `**kwargs` | `Any` | Keyword arguments for the callable. |
-
-#### Returns
-| Type | Description |
-|------|-------------|
-| `Any` | The result of the function execution along with the collected metrics. |
-
-#### Example
-```python
-from clusterops import profile_execution
-
-def cpu_intensive_task():
- return sum(i*i for i in range(10000000))
-
-result = profile_execution(cpu_intensive_task)
-print(f"Result of profiled task: {result}")
-```
-
-This API reference provides a comprehensive overview of the ClusterOps library's main functions, their parameters, return values, and usage examples. It should help users understand and utilize the library effectively for managing and executing tasks across CPU and GPU resources in a distributed computing environment.
\ No newline at end of file
diff --git a/docs/governance/main.md b/docs/governance/main.md
new file mode 100644
index 00000000..7e10f552
--- /dev/null
+++ b/docs/governance/main.md
@@ -0,0 +1,77 @@
+# π Links & Resources
+
+Welcome to the Swarms ecosystem. Click any tile below to explore our products, community, documentation, and social platforms.
+
+---
+
+
+
+
+
+---
+
+## π‘ Quick Summary
+
+| Category | Link |
+|--------------|----------------------------------------------------------------------|
+| API Docs | [docs.swarms.world](https://docs.swarms.world/en/latest/swarms_cloud/swarms_api/) |
+| GitHub | [kyegomez/swarms](https://github.com/kyegomez/swarms) |
+| GitHub (Rust)| [The-Swarm-Corporation/swarms-rs](https://github.com/The-Swarm-Corporation/swarms-rs) |
+| Chat UI | [swarms.world/platform/chat](https://swarms.world/platform/chat) |
+| Marketplace | [swarms.world](https://swarms.world) |
+| Startup App | [Apply Here](https://www.swarms.xyz/programs/startups) |
+| Discord | [Join Now](https://discord.gg/jM3Z6M9uMq) |
+| Telegram | [Group Chat](https://t.me/swarmsgroupchat) |
+| Twitter/X | [@swarms_corp](https://x.com/swarms_corp) |
+| Blog | [medium.com/@kyeg](https://medium.com/@kyeg) |
+
+---
+
+> π Swarms is building the agentic internet. Join the movement and build the future with us.
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 14382b49..b83e7f63 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -370,6 +370,11 @@ nav:
- Overview: "swarms_rs/overview.md"
- Agents: "swarms_rs/agents.md"
+
+ - Governance:
+ - Resources: "governance/main.md"
+ - Tokenomics: "web3/token.md"
+
# - Prompts API:
# - Add Prompts: "swarms_platform/prompts/add_prompt.md"
diff --git a/docs/web3/token.md b/docs/web3/token.md
new file mode 100644
index 00000000..2117bbaa
--- /dev/null
+++ b/docs/web3/token.md
@@ -0,0 +1,143 @@
+
+# $swarms Tokenomics
+
+**Empowering the Agentic Revolution**
+Token Contract Address: `74SBV4zDXxTRgv1pEMoECskKBkZHc2yGPnc7GYVepump`
+
+> You can buy $swarms on most marketplaces:
+> **Pump.fun**, **Kraken**, **Bitget**, **Binance**, **OKX**, and more.
+
+---
+
+## π¦ Overview
+
+- **Token Name:** Swarms Coin
+- **Ticker:** `$swarms`
+- **Blockchain:** Solana
+- **Utility:** Powering the agentic economy.
+
+---
+
+## π Initial Token Distribution
+
+| Allocation | Percentage |
+|-----------------|------------|
+| π§ **Team** | 3% |
+| π **Public Sale** | 97% |
+
+> β οΈ At launch, only **2%** was reserved for the team β among the **smallest allocations in DAO history**.
+
+---
+
+## π£ A Message from the Team
+
+!!! quote
+ When we launched $swarms, we prioritized community ownership by allocating just 2% to the team.
+ Our intent was radical decentralization. But that decision has created unintended consequences.
+
+### β Challenges We Faced
+
+- **Market manipulation** by whales and exchanges
+- **Unsustainable funding** for innovation and ops
+- **Malicious actors undermining decentralization**
+
+---
+
+## π Our Proposed Solution
+
+We are initiating a **DAO governance proposal** to:
+
+=== "Key Reforms"
+
+- π **Increase team allocation to 10%**
+ Secure operational longevity and attract top contributors.
+
+- π± **Launch an ecosystem grants program**
+ Incentivize developers building agentic tools and infra.
+
+- π‘ **Combat token manipulation**
+ Deploy anti-whale policies and explore token lockups.
+
+- π€ **Strengthen community dev initiatives**
+ Support contributor bounties, governance tooling, and hackathons.
+
+> This proposal isnβt about centralizing power β it's about protecting and empowering the **Swarms ecosystem**.
+
+---
+
+## πΈ Contribute to Swarms DAO
+
+To expand our ecosystem, grow the core team, and bring agentic AI to the world, we invite all community members to **invest directly in Swarms DAO**.
+
+Send **$swarms** or **SOL** to our official treasury address:
+
+```plaintext
+πͺ DAO Treasury Wallet:
+7MaX4muAn8ZQREJxnupm8sgokwFHujgrGfH9Qn81BuEV
+```
+
+!!! success "Every contribution matters"
+ Whether itβs 1 $swarms or 1000 SOL β youβre helping fund a decentralized future.
+
+> You may use most wallets and platforms supporting Solana to send tokens.
+
+---
+
+## π§ Why Invest?
+
+Your contributions fund:
+
+- Expansion of the **Swarms core team**
+- Development of **open-source AI agent tooling**
+- Community **grants** and contributor **bounties**
+- Anti-manipulation strategies & decentralized governance tools
+
+---
+
+## π How to Get Involved
+
+[](https://dao.swarms.world)
+[](https://investors.swarms.world)
+
+### π You can:
+- Vote on governance proposals
+
+- Submit development or funding proposals
+
+- Share $swarms with your network
+
+- Build with our upcoming agent SDKs
+
+- Contribute to the mission of agentic decentralization
+
+---
+
+## π Quick Summary
+
+| Key Metric | Value |
+|----------------------------|------------------|
+| **Token Symbol** | `$swarms` |
+| **Blockchain** | Solana |
+| **Initial Team Allocation**| 3% (Proposed 10%)|
+| **Public Distribution** | 97% |
+| **DAO Wallet** | `7MaX4muAn8ZQREJxnupm8sgokwFHujgrGfH9Qn81BuEV` |
+| **DAO Governance** | [dao.swarms.world](https://dao.swarms.world) |
+
+---
+
+## π Useful Links
+
+- [DAO Governance Portal][dao]
+
+- [Investor Information][investors]
+
+- [Official Site][site]
+
+- [Join Swarms on Discord][discord]
+
+[dao]: https://dao.swarms.world/
+[investors]: https://investors.swarms.world/
+[site]: https://swarms.world/
+[discord]: https://discord.gg/swarms
+```
+
diff --git a/pyproject.toml b/pyproject.toml
index 58fc54eb..528ab04b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -5,7 +5,8 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
-version = "7.7.1"
+
+version = "7.7.2"
description = "Swarms - TGSC"
license = "MIT"
authors = ["Kye Gomez "]
diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py
index af50cca5..022f2e23 100644
--- a/swarms/structs/agent.py
+++ b/swarms/structs/agent.py
@@ -1074,7 +1074,8 @@ class Agent:
def _handle_run_error(self, error: any):
process_thread = threading.Thread(
target=self.__handle_run_error,
- args=(error, ),
+
+ args=(error,),
daemon=True,
)
process_thread.start()
diff --git a/swarms/tools/mcp_client.py b/swarms/tools/mcp_client.py
index c424b925..e6032921 100644
--- a/swarms/tools/mcp_client.py
+++ b/swarms/tools/mcp_client.py
@@ -44,6 +44,7 @@ async def _execute_mcp_tool(
parameters: Dict[Any, Any] = None,
output_type: Literal["str", "dict"] = "str",
timeout: float = 30.0,
+
*args,
**kwargs,
) -> Dict[Any, Any]:
@@ -74,15 +75,20 @@ async def _execute_mcp_tool(
async def execute_mcp_tool(
+
url: str,
tool_name: str = None,
method: Literal["stdio", "sse"] = "sse",
parameters: Dict[Any, Any] = None,
output_type: Literal["str", "dict"] = "str",
) -> Dict[Any, Any]:
- return await _execute_mcp_tool(
- url=url,
- method=method,
- parameters=parameters,
- output_type=output_type,
+
+ return asyncio.run(
+ _execute_mcp_tool(
+ url=url,
+ tool_name=tool_name,
+ method=method,
+ parameters=parameters,
+ output_type=output_type,
+ )
)
diff --git a/tests/agent_exec_benchmark.py b/tests/agent_exec_benchmark.py
new file mode 100644
index 00000000..11872304
--- /dev/null
+++ b/tests/agent_exec_benchmark.py
@@ -0,0 +1,284 @@
+import asyncio
+import concurrent.futures
+import json
+import os
+import psutil
+import datetime
+from pathlib import Path
+from typing import List, Dict, Any, Optional
+from swarms.structs.agent import Agent
+from loguru import logger
+
+
+class AgentBenchmark:
+ def __init__(
+ self,
+ num_iterations: int = 5,
+ output_dir: str = "benchmark_results",
+ ):
+ self.num_iterations = num_iterations
+ self.output_dir = Path(output_dir)
+ self.output_dir.mkdir(exist_ok=True)
+
+ # Use process pool for CPU-bound tasks
+ self.process_pool = concurrent.futures.ProcessPoolExecutor(
+ max_workers=min(os.cpu_count(), 4)
+ )
+
+ # Use thread pool for I/O-bound tasks
+ self.thread_pool = concurrent.futures.ThreadPoolExecutor(
+ max_workers=min(os.cpu_count() * 2, 8)
+ )
+
+ self.default_queries = [
+ "Conduct an analysis of the best real undervalued ETFs",
+ "What are the top performing tech stocks this quarter?",
+ "Analyze current market trends in renewable energy sector",
+ "Compare Bitcoin and Ethereum investment potential",
+ "Evaluate the risk factors in emerging markets",
+ ]
+
+ self.agent = self._initialize_agent()
+ self.process = psutil.Process()
+
+ # Cache for storing repeated query results
+ self._query_cache = {}
+
+ def _initialize_agent(self) -> Agent:
+ return Agent(
+ agent_name="Financial-Analysis-Agent",
+ agent_description="Personal finance advisor agent",
+ # system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
+ max_loops=1,
+ model_name="gpt-4o-mini",
+ dynamic_temperature_enabled=True,
+ interactive=False,
+ )
+
+ def _get_system_metrics(self) -> Dict[str, float]:
+ # Optimized system metrics collection
+ return {
+ "cpu_percent": self.process.cpu_percent(),
+ "memory_mb": self.process.memory_info().rss / 1024 / 1024,
+ }
+
+ def _calculate_statistics(
+ self, values: List[float]
+ ) -> Dict[str, float]:
+ if not values:
+ return {}
+
+ sorted_values = sorted(values)
+ n = len(sorted_values)
+ mean_val = sum(values) / n
+
+ stats = {
+ "mean": mean_val,
+ "median": sorted_values[n // 2],
+ "min": sorted_values[0],
+ "max": sorted_values[-1],
+ }
+
+ # Only calculate stdev if we have enough values
+ if n > 1:
+ stats["std_dev"] = (
+ sum((x - mean_val) ** 2 for x in values) / n
+ ) ** 0.5
+
+ return {k: round(v, 3) for k, v in stats.items()}
+
+ async def process_iteration(
+ self, query: str, iteration: int
+ ) -> Dict[str, Any]:
+ """Process a single iteration of a query"""
+ try:
+ # Check cache for repeated queries
+ cache_key = f"{query}_{iteration}"
+ if cache_key in self._query_cache:
+ return self._query_cache[cache_key]
+
+ iteration_start = datetime.datetime.now()
+ pre_metrics = self._get_system_metrics()
+
+ # Run the agent
+ try:
+ self.agent.run(query)
+ success = True
+ except Exception as e:
+ str(e)
+ success = False
+
+ execution_time = (
+ datetime.datetime.now() - iteration_start
+ ).total_seconds()
+ post_metrics = self._get_system_metrics()
+
+ result = {
+ "execution_time": execution_time,
+ "success": success,
+ "pre_metrics": pre_metrics,
+ "post_metrics": post_metrics,
+ "iteration_data": {
+ "iteration": iteration + 1,
+ "execution_time": round(execution_time, 3),
+ "success": success,
+ "system_metrics": {
+ "pre": pre_metrics,
+ "post": post_metrics,
+ },
+ },
+ }
+
+ # Cache the result
+ self._query_cache[cache_key] = result
+ return result
+
+ except Exception as e:
+ logger.error(f"Error in iteration {iteration}: {e}")
+ raise
+
+ async def run_benchmark(
+ self, queries: Optional[List[str]] = None
+ ) -> Dict[str, Any]:
+ """Run the benchmark asynchronously"""
+ queries = queries or self.default_queries
+ benchmark_data = {
+ "metadata": {
+ "timestamp": datetime.datetime.now().isoformat(),
+ "num_iterations": self.num_iterations,
+ "agent_config": {
+ "model_name": self.agent.model_name,
+ "max_loops": self.agent.max_loops,
+ },
+ },
+ "results": {},
+ }
+
+ async def process_query(query: str):
+ query_results = {
+ "execution_times": [],
+ "system_metrics": [],
+ "iterations": [],
+ }
+
+ # Process iterations concurrently
+ tasks = [
+ self.process_iteration(query, i)
+ for i in range(self.num_iterations)
+ ]
+ iteration_results = await asyncio.gather(*tasks)
+
+ for result in iteration_results:
+ query_results["execution_times"].append(
+ result["execution_time"]
+ )
+ query_results["system_metrics"].append(
+ result["post_metrics"]
+ )
+ query_results["iterations"].append(
+ result["iteration_data"]
+ )
+
+ # Calculate statistics
+ query_results["statistics"] = {
+ "execution_time": self._calculate_statistics(
+ query_results["execution_times"]
+ ),
+ "memory_usage": self._calculate_statistics(
+ [
+ m["memory_mb"]
+ for m in query_results["system_metrics"]
+ ]
+ ),
+ "cpu_usage": self._calculate_statistics(
+ [
+ m["cpu_percent"]
+ for m in query_results["system_metrics"]
+ ]
+ ),
+ }
+
+ return query, query_results
+
+ # Execute all queries concurrently
+ query_tasks = [process_query(query) for query in queries]
+ query_results = await asyncio.gather(*query_tasks)
+
+ for query, results in query_results:
+ benchmark_data["results"][query] = results
+
+ return benchmark_data
+
+ def save_results(self, benchmark_data: Dict[str, Any]) -> str:
+ """Save benchmark results efficiently"""
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+ filename = (
+ self.output_dir / f"benchmark_results_{timestamp}.json"
+ )
+
+ # Write results in a single operation
+ with open(filename, "w") as f:
+ json.dump(benchmark_data, f, indent=2)
+
+ logger.info(f"Benchmark results saved to: {filename}")
+ return str(filename)
+
+ def print_summary(self, results: Dict[str, Any]):
+ """Print a summary of the benchmark results"""
+ print("\n=== Benchmark Summary ===")
+ for query, data in results["results"].items():
+ print(f"\nQuery: {query[:50]}...")
+ stats = data["statistics"]["execution_time"]
+ print(f"Average time: {stats['mean']:.2f}s")
+ print(
+ f"Memory usage (avg): {data['statistics']['memory_usage']['mean']:.1f}MB"
+ )
+ print(
+ f"CPU usage (avg): {data['statistics']['cpu_usage']['mean']:.1f}%"
+ )
+
+ async def run_with_timeout(
+ self, timeout: int = 300
+ ) -> Dict[str, Any]:
+ """Run benchmark with timeout"""
+ try:
+ return await asyncio.wait_for(
+ self.run_benchmark(), timeout
+ )
+ except asyncio.TimeoutError:
+ logger.error(
+ f"Benchmark timed out after {timeout} seconds"
+ )
+ raise
+
+ def cleanup(self):
+ """Cleanup resources"""
+ self.process_pool.shutdown()
+ self.thread_pool.shutdown()
+ self._query_cache.clear()
+
+
+async def main():
+ try:
+ # Create and run benchmark
+ benchmark = AgentBenchmark(num_iterations=1)
+
+ # Run benchmark with timeout
+ results = await benchmark.run_with_timeout(timeout=300)
+
+ # Save results
+ benchmark.save_results(results)
+
+ # Print summary
+ benchmark.print_summary(results)
+
+ except Exception as e:
+ logger.error(f"Benchmark failed: {e}")
+ finally:
+ # Cleanup resources
+ benchmark.cleanup()
+
+
+if __name__ == "__main__":
+ # Run the async main function
+ asyncio.run(main())