pull/819/merge
Pavan Kumar 3 months ago committed by GitHub
commit 550f8901c1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

1
.gitignore vendored

@ -404,4 +404,3 @@ flycheck_*.el
# network security # network security
/network-security.data /network-security.data

@ -0,0 +1,284 @@
import asyncio
import concurrent.futures
import json
import os
import psutil
import datetime
from pathlib import Path
from typing import List, Dict, Any, Optional
from swarms.structs.agent import Agent
from loguru import logger
class AgentBenchmark:
def __init__(
self,
num_iterations: int = 5,
output_dir: str = "benchmark_results",
):
self.num_iterations = num_iterations
self.output_dir = Path(output_dir)
self.output_dir.mkdir(exist_ok=True)
# Use process pool for CPU-bound tasks
self.process_pool = concurrent.futures.ProcessPoolExecutor(
max_workers=min(os.cpu_count(), 4)
)
# Use thread pool for I/O-bound tasks
self.thread_pool = concurrent.futures.ThreadPoolExecutor(
max_workers=min(os.cpu_count() * 2, 8)
)
self.default_queries = [
"Conduct an analysis of the best real undervalued ETFs",
"What are the top performing tech stocks this quarter?",
"Analyze current market trends in renewable energy sector",
"Compare Bitcoin and Ethereum investment potential",
"Evaluate the risk factors in emerging markets",
]
self.agent = self._initialize_agent()
self.process = psutil.Process()
# Cache for storing repeated query results
self._query_cache = {}
def _initialize_agent(self) -> Agent:
return Agent(
agent_name="Financial-Analysis-Agent",
agent_description="Personal finance advisor agent",
# system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
max_loops=1,
model_name="gpt-4o-mini",
dynamic_temperature_enabled=True,
interactive=False,
)
def _get_system_metrics(self) -> Dict[str, float]:
# Optimized system metrics collection
return {
"cpu_percent": self.process.cpu_percent(),
"memory_mb": self.process.memory_info().rss / 1024 / 1024,
}
def _calculate_statistics(
self, values: List[float]
) -> Dict[str, float]:
if not values:
return {}
sorted_values = sorted(values)
n = len(sorted_values)
mean_val = sum(values) / n
stats = {
"mean": mean_val,
"median": sorted_values[n // 2],
"min": sorted_values[0],
"max": sorted_values[-1],
}
# Only calculate stdev if we have enough values
if n > 1:
stats["std_dev"] = (
sum((x - mean_val) ** 2 for x in values) / n
) ** 0.5
return {k: round(v, 3) for k, v in stats.items()}
async def process_iteration(
self, query: str, iteration: int
) -> Dict[str, Any]:
"""Process a single iteration of a query"""
try:
# Check cache for repeated queries
cache_key = f"{query}_{iteration}"
if cache_key in self._query_cache:
return self._query_cache[cache_key]
iteration_start = datetime.datetime.now()
pre_metrics = self._get_system_metrics()
# Run the agent
try:
self.agent.run(query)
success = True
except Exception as e:
str(e)
success = False
execution_time = (
datetime.datetime.now() - iteration_start
).total_seconds()
post_metrics = self._get_system_metrics()
result = {
"execution_time": execution_time,
"success": success,
"pre_metrics": pre_metrics,
"post_metrics": post_metrics,
"iteration_data": {
"iteration": iteration + 1,
"execution_time": round(execution_time, 3),
"success": success,
"system_metrics": {
"pre": pre_metrics,
"post": post_metrics,
},
},
}
# Cache the result
self._query_cache[cache_key] = result
return result
except Exception as e:
logger.error(f"Error in iteration {iteration}: {e}")
raise
async def run_benchmark(
self, queries: Optional[List[str]] = None
) -> Dict[str, Any]:
"""Run the benchmark asynchronously"""
queries = queries or self.default_queries
benchmark_data = {
"metadata": {
"timestamp": datetime.datetime.now().isoformat(),
"num_iterations": self.num_iterations,
"agent_config": {
"model_name": self.agent.model_name,
"max_loops": self.agent.max_loops,
},
},
"results": {},
}
async def process_query(query: str):
query_results = {
"execution_times": [],
"system_metrics": [],
"iterations": [],
}
# Process iterations concurrently
tasks = [
self.process_iteration(query, i)
for i in range(self.num_iterations)
]
iteration_results = await asyncio.gather(*tasks)
for result in iteration_results:
query_results["execution_times"].append(
result["execution_time"]
)
query_results["system_metrics"].append(
result["post_metrics"]
)
query_results["iterations"].append(
result["iteration_data"]
)
# Calculate statistics
query_results["statistics"] = {
"execution_time": self._calculate_statistics(
query_results["execution_times"]
),
"memory_usage": self._calculate_statistics(
[
m["memory_mb"]
for m in query_results["system_metrics"]
]
),
"cpu_usage": self._calculate_statistics(
[
m["cpu_percent"]
for m in query_results["system_metrics"]
]
),
}
return query, query_results
# Execute all queries concurrently
query_tasks = [process_query(query) for query in queries]
query_results = await asyncio.gather(*query_tasks)
for query, results in query_results:
benchmark_data["results"][query] = results
return benchmark_data
def save_results(self, benchmark_data: Dict[str, Any]) -> str:
"""Save benchmark results efficiently"""
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
filename = (
self.output_dir / f"benchmark_results_{timestamp}.json"
)
# Write results in a single operation
with open(filename, "w") as f:
json.dump(benchmark_data, f, indent=2)
logger.info(f"Benchmark results saved to: {filename}")
return str(filename)
def print_summary(self, results: Dict[str, Any]):
"""Print a summary of the benchmark results"""
print("\n=== Benchmark Summary ===")
for query, data in results["results"].items():
print(f"\nQuery: {query[:50]}...")
stats = data["statistics"]["execution_time"]
print(f"Average time: {stats['mean']:.2f}s")
print(
f"Memory usage (avg): {data['statistics']['memory_usage']['mean']:.1f}MB"
)
print(
f"CPU usage (avg): {data['statistics']['cpu_usage']['mean']:.1f}%"
)
async def run_with_timeout(
self, timeout: int = 300
) -> Dict[str, Any]:
"""Run benchmark with timeout"""
try:
return await asyncio.wait_for(
self.run_benchmark(), timeout
)
except asyncio.TimeoutError:
logger.error(
f"Benchmark timed out after {timeout} seconds"
)
raise
def cleanup(self):
"""Cleanup resources"""
self.process_pool.shutdown()
self.thread_pool.shutdown()
self._query_cache.clear()
async def main():
try:
# Create and run benchmark
benchmark = AgentBenchmark(num_iterations=1)
# Run benchmark with timeout
results = await benchmark.run_with_timeout(timeout=300)
# Save results
benchmark.save_results(results)
# Print summary
benchmark.print_summary(results)
except Exception as e:
logger.error(f"Benchmark failed: {e}")
finally:
# Cleanup resources
benchmark.cleanup()
if __name__ == "__main__":
# Run the async main function
asyncio.run(main())

@ -0,0 +1,512 @@
{
"metadata": {
"timestamp": "2025-04-18T13:25:17.781535",
"num_iterations": 3,
"agent_config": {
"model_name": "gpt-4o-mini",
"max_loops": 2
}
},
"results": {
"Conduct an analysis of the best real undervalued ETFs": {
"execution_times": [
3.394164800643921,
0.2887423038482666,
0.15843510627746582
],
"system_metrics": [
{
"cpu_percent": 27.1,
"memory_percent": 84.4,
"process_memory_mb": 175.5
},
{
"cpu_percent": 30.4,
"memory_percent": 84.8,
"process_memory_mb": 175.984375
},
{
"cpu_percent": 24.9,
"memory_percent": 84.8,
"process_memory_mb": 176.125
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 3.394,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 35.3,
"memory_percent": 84.2,
"process_memory_mb": 185.453125
},
"post": {
"cpu_percent": 27.1,
"memory_percent": 84.4,
"process_memory_mb": 175.5
}
}
},
{
"iteration": 2,
"execution_time": 0.289,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 0.0,
"memory_percent": 84.4,
"process_memory_mb": 175.53125
},
"post": {
"cpu_percent": 30.4,
"memory_percent": 84.8,
"process_memory_mb": 175.984375
}
}
},
{
"iteration": 3,
"execution_time": 0.158,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 0.0,
"memory_percent": 84.8,
"process_memory_mb": 175.984375
},
"post": {
"cpu_percent": 24.9,
"memory_percent": 84.8,
"process_memory_mb": 176.125
}
}
}
],
"statistics": {
"execution_time": {
"mean": 1.28,
"median": 0.289,
"min": 0.158,
"max": 3.394,
"std_dev": 1.832
},
"memory_usage": {
"mean": 175.87,
"median": 175.984,
"min": 175.5,
"max": 176.125,
"std_dev": 0.328
},
"cpu_usage": {
"mean": 27.467,
"median": 27.1,
"min": 24.9,
"max": 30.4,
"std_dev": 2.768
}
}
},
"What are the top performing tech stocks this quarter?": {
"execution_times": [
0.6481029987335205,
0.22909188270568848,
0.24907231330871582
],
"system_metrics": [
{
"cpu_percent": 21.2,
"memory_percent": 84.7,
"process_memory_mb": 176.25
},
{
"cpu_percent": 0.0,
"memory_percent": 83.8,
"process_memory_mb": 176.40625
},
{
"cpu_percent": 0.0,
"memory_percent": 83.9,
"process_memory_mb": 176.765625
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 0.648,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 50.0,
"memory_percent": 84.8,
"process_memory_mb": 176.125
},
"post": {
"cpu_percent": 21.2,
"memory_percent": 84.7,
"process_memory_mb": 176.25
}
}
},
{
"iteration": 2,
"execution_time": 0.229,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 0.0,
"memory_percent": 84.7,
"process_memory_mb": 176.25
},
"post": {
"cpu_percent": 0.0,
"memory_percent": 83.8,
"process_memory_mb": 176.40625
}
}
},
{
"iteration": 3,
"execution_time": 0.249,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 0.0,
"memory_percent": 83.8,
"process_memory_mb": 176.40625
},
"post": {
"cpu_percent": 0.0,
"memory_percent": 83.9,
"process_memory_mb": 176.765625
}
}
}
],
"statistics": {
"execution_time": {
"mean": 0.375,
"median": 0.249,
"min": 0.229,
"max": 0.648,
"std_dev": 0.236
},
"memory_usage": {
"mean": 176.474,
"median": 176.406,
"min": 176.25,
"max": 176.766,
"std_dev": 0.264
},
"cpu_usage": {
"mean": 7.067,
"median": 0.0,
"min": 0.0,
"max": 21.2,
"std_dev": 12.24
}
}
},
"Analyze current market trends in renewable energy sector": {
"execution_times": [
2.0344760417938232,
0.48967909812927246,
0.08599114418029785
],
"system_metrics": [
{
"cpu_percent": 22.9,
"memory_percent": 83.7,
"process_memory_mb": 168.40625
},
{
"cpu_percent": 21.9,
"memory_percent": 83.6,
"process_memory_mb": 168.3125
},
{
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 166.328125
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 2.034,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 0.0,
"memory_percent": 83.9,
"process_memory_mb": 176.78125
},
"post": {
"cpu_percent": 22.9,
"memory_percent": 83.7,
"process_memory_mb": 168.40625
}
}
},
{
"iteration": 2,
"execution_time": 0.49,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 33.3,
"memory_percent": 83.7,
"process_memory_mb": 168.421875
},
"post": {
"cpu_percent": 21.9,
"memory_percent": 83.6,
"process_memory_mb": 168.3125
}
}
},
{
"iteration": 3,
"execution_time": 0.086,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 168.3125
},
"post": {
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 166.328125
}
}
}
],
"statistics": {
"execution_time": {
"mean": 0.87,
"median": 0.49,
"min": 0.086,
"max": 2.034,
"std_dev": 1.028
},
"memory_usage": {
"mean": 167.682,
"median": 168.312,
"min": 166.328,
"max": 168.406,
"std_dev": 1.174
},
"cpu_usage": {
"mean": 14.933,
"median": 21.9,
"min": 0.0,
"max": 22.9,
"std_dev": 12.942
}
}
},
"Compare Bitcoin and Ethereum investment potential": {
"execution_times": [
0.08068418502807617,
0.08303999900817871,
0.08367633819580078
],
"system_metrics": [
{
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 159.078125
},
{
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 159.21875
},
{
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 143.015625
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 0.081,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 166.4375
},
"post": {
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 159.078125
}
}
},
{
"iteration": 2,
"execution_time": 0.083,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 159.078125
},
"post": {
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 159.21875
}
}
},
{
"iteration": 3,
"execution_time": 0.084,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 159.21875
},
"post": {
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 143.015625
}
}
}
],
"statistics": {
"execution_time": {
"mean": 0.082,
"median": 0.083,
"min": 0.081,
"max": 0.084,
"std_dev": 0.002
},
"memory_usage": {
"mean": 153.771,
"median": 159.078,
"min": 143.016,
"max": 159.219,
"std_dev": 9.315
},
"cpu_usage": {
"mean": 0.0,
"median": 0.0,
"min": 0.0,
"max": 0.0,
"std_dev": 0.0
}
}
},
"Evaluate the risk factors in emerging markets": {
"execution_times": [
0.08391690254211426,
0.08319473266601562,
0.08199191093444824
],
"system_metrics": [
{
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 143.28125
},
{
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 130.984375
},
{
"cpu_percent": 24.1,
"memory_percent": 83.5,
"process_memory_mb": 77.046875
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 0.084,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 143.015625
},
"post": {
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 143.28125
}
}
},
{
"iteration": 2,
"execution_time": 0.083,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 143.28125
},
"post": {
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 130.984375
}
}
},
{
"iteration": 3,
"execution_time": 0.082,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 0.0,
"memory_percent": 83.6,
"process_memory_mb": 130.984375
},
"post": {
"cpu_percent": 24.1,
"memory_percent": 83.5,
"process_memory_mb": 77.046875
}
}
}
],
"statistics": {
"execution_time": {
"mean": 0.083,
"median": 0.083,
"min": 0.082,
"max": 0.084,
"std_dev": 0.001
},
"memory_usage": {
"mean": 117.104,
"median": 130.984,
"min": 77.047,
"max": 143.281,
"std_dev": 35.231
},
"cpu_usage": {
"mean": 8.033,
"median": 0.0,
"min": 0.0,
"max": 24.1,
"std_dev": 13.914
}
}
}
}
}

@ -0,0 +1,267 @@
{
"metadata": {
"timestamp": "2025-04-18T13:25:33.415938",
"num_iterations": 1,
"agent_config": {
"model_name": "gpt-4o-mini",
"max_loops": 1
}
},
"results": {
"Conduct an analysis of the best real undervalued ETFs": {
"execution_times": [
14.138006687164307
],
"system_metrics": [
{
"cpu_percent": 20.7,
"memory_percent": 84.4,
"process_memory_mb": 65.859375
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 14.138,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 37.6,
"memory_percent": 84.2,
"process_memory_mb": 239.515625
},
"post": {
"cpu_percent": 20.7,
"memory_percent": 84.4,
"process_memory_mb": 65.859375
}
}
}
],
"statistics": {
"execution_time": {
"mean": 14.138,
"median": 14.138,
"min": 14.138,
"max": 14.138
},
"memory_usage": {
"mean": 65.859,
"median": 65.859,
"min": 65.859,
"max": 65.859
},
"cpu_usage": {
"mean": 20.7,
"median": 20.7,
"min": 20.7,
"max": 20.7
}
}
},
"What are the top performing tech stocks this quarter?": {
"execution_times": [
17.769688844680786
],
"system_metrics": [
{
"cpu_percent": 18.1,
"memory_percent": 83.0,
"process_memory_mb": 56.75
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 17.77,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 100.0,
"memory_percent": 84.4,
"process_memory_mb": 66.234375
},
"post": {
"cpu_percent": 18.1,
"memory_percent": 83.0,
"process_memory_mb": 56.75
}
}
}
],
"statistics": {
"execution_time": {
"mean": 17.77,
"median": 17.77,
"min": 17.77,
"max": 17.77
},
"memory_usage": {
"mean": 56.75,
"median": 56.75,
"min": 56.75,
"max": 56.75
},
"cpu_usage": {
"mean": 18.1,
"median": 18.1,
"min": 18.1,
"max": 18.1
}
}
},
"Analyze current market trends in renewable energy sector": {
"execution_times": [
14.471845149993896
],
"system_metrics": [
{
"cpu_percent": 15.5,
"memory_percent": 82.3,
"process_memory_mb": 56.671875
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 14.472,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 100.0,
"memory_percent": 83.0,
"process_memory_mb": 57.015625
},
"post": {
"cpu_percent": 15.5,
"memory_percent": 82.3,
"process_memory_mb": 56.671875
}
}
}
],
"statistics": {
"execution_time": {
"mean": 14.472,
"median": 14.472,
"min": 14.472,
"max": 14.472
},
"memory_usage": {
"mean": 56.672,
"median": 56.672,
"min": 56.672,
"max": 56.672
},
"cpu_usage": {
"mean": 15.5,
"median": 15.5,
"min": 15.5,
"max": 15.5
}
}
},
"Compare Bitcoin and Ethereum investment potential": {
"execution_times": [
15.340633869171143
],
"system_metrics": [
{
"cpu_percent": 16.5,
"memory_percent": 81.8,
"process_memory_mb": 54.5625
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 15.341,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 0.0,
"memory_percent": 82.4,
"process_memory_mb": 56.953125
},
"post": {
"cpu_percent": 16.5,
"memory_percent": 81.8,
"process_memory_mb": 54.5625
}
}
}
],
"statistics": {
"execution_time": {
"mean": 15.341,
"median": 15.341,
"min": 15.341,
"max": 15.341
},
"memory_usage": {
"mean": 54.562,
"median": 54.562,
"min": 54.562,
"max": 54.562
},
"cpu_usage": {
"mean": 16.5,
"median": 16.5,
"min": 16.5,
"max": 16.5
}
}
},
"Evaluate the risk factors in emerging markets": {
"execution_times": [
19.98606514930725
],
"system_metrics": [
{
"cpu_percent": 18.5,
"memory_percent": 82.2,
"process_memory_mb": 56.15625
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 19.986,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 0.0,
"memory_percent": 81.8,
"process_memory_mb": 55.046875
},
"post": {
"cpu_percent": 18.5,
"memory_percent": 82.2,
"process_memory_mb": 56.15625
}
}
}
],
"statistics": {
"execution_time": {
"mean": 19.986,
"median": 19.986,
"min": 19.986,
"max": 19.986
},
"memory_usage": {
"mean": 56.156,
"median": 56.156,
"min": 56.156,
"max": 56.156
},
"cpu_usage": {
"mean": 18.5,
"median": 18.5,
"min": 18.5,
"max": 18.5
}
}
}
}
}

@ -0,0 +1,467 @@
{
"metadata": {
"timestamp": "2025-04-18T13:30:16.543562",
"num_iterations": 3,
"agent_config": {
"model_name": "gpt-4o-mini",
"max_loops": 1
}
},
"results": {
"Conduct an analysis of the best real undervalued ETFs": {
"execution_times": [
14.789254,
13.413338,
13.084335
],
"system_metrics": [
{
"cpu_percent": 5.1,
"memory_mb": 67.765625
},
{
"cpu_percent": 3.7,
"memory_mb": 199.875
},
{
"cpu_percent": 16.2,
"memory_mb": 203.453125
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 14.789,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 0.0,
"memory_mb": 243.046875
},
"post": {
"cpu_percent": 5.1,
"memory_mb": 67.765625
}
}
},
{
"iteration": 2,
"execution_time": 13.413,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 202.0,
"memory_mb": 243.109375
},
"post": {
"cpu_percent": 3.7,
"memory_mb": 199.875
}
}
},
{
"iteration": 3,
"execution_time": 13.084,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 107.1,
"memory_mb": 243.21875
},
"post": {
"cpu_percent": 16.2,
"memory_mb": 203.453125
}
}
}
],
"statistics": {
"execution_time": {
"mean": 13.762,
"median": 13.413,
"min": 13.084,
"max": 14.789,
"std_dev": 0.738
},
"memory_usage": {
"mean": 157.031,
"median": 199.875,
"min": 67.766,
"max": 203.453,
"std_dev": 63.137
},
"cpu_usage": {
"mean": 8.333,
"median": 5.1,
"min": 3.7,
"max": 16.2,
"std_dev": 5.592
}
}
},
"What are the top performing tech stocks this quarter?": {
"execution_times": [
14.40021,
7.619928,
9.870042
],
"system_metrics": [
{
"cpu_percent": 2.5,
"memory_mb": 69.734375
},
{
"cpu_percent": 0.5,
"memory_mb": 204.46875
},
{
"cpu_percent": 0.8,
"memory_mb": 204.640625
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 14.4,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 109.3,
"memory_mb": 243.1875
},
"post": {
"cpu_percent": 2.5,
"memory_mb": 69.734375
}
}
},
{
"iteration": 2,
"execution_time": 7.62,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 194.6,
"memory_mb": 243.328125
},
"post": {
"cpu_percent": 0.5,
"memory_mb": 204.46875
}
}
},
{
"iteration": 3,
"execution_time": 9.87,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 187.1,
"memory_mb": 243.734375
},
"post": {
"cpu_percent": 0.8,
"memory_mb": 204.640625
}
}
}
],
"statistics": {
"execution_time": {
"mean": 10.63,
"median": 9.87,
"min": 7.62,
"max": 14.4,
"std_dev": 2.82
},
"memory_usage": {
"mean": 159.615,
"median": 204.469,
"min": 69.734,
"max": 204.641,
"std_dev": 63.555
},
"cpu_usage": {
"mean": 1.267,
"median": 0.8,
"min": 0.5,
"max": 2.5,
"std_dev": 0.881
}
}
},
"Analyze current market trends in renewable energy sector": {
"execution_times": [
3.193721,
11.01429,
13.543417
],
"system_metrics": [
{
"cpu_percent": 5.7,
"memory_mb": 223.109375
},
{
"cpu_percent": 1.4,
"memory_mb": 203.46875
},
{
"cpu_percent": 9.9,
"memory_mb": 199.1875
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 3.194,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 223.0,
"memory_mb": 243.8125
},
"post": {
"cpu_percent": 5.7,
"memory_mb": 223.109375
}
}
},
{
"iteration": 2,
"execution_time": 11.014,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 270.1,
"memory_mb": 243.953125
},
"post": {
"cpu_percent": 1.4,
"memory_mb": 203.46875
}
}
},
{
"iteration": 3,
"execution_time": 13.543,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 340.8,
"memory_mb": 244.0625
},
"post": {
"cpu_percent": 9.9,
"memory_mb": 199.1875
}
}
}
],
"statistics": {
"execution_time": {
"mean": 9.25,
"median": 11.014,
"min": 3.194,
"max": 13.543,
"std_dev": 4.405
},
"memory_usage": {
"mean": 208.589,
"median": 203.469,
"min": 199.188,
"max": 223.109,
"std_dev": 10.415
},
"cpu_usage": {
"mean": 5.667,
"median": 5.7,
"min": 1.4,
"max": 9.9,
"std_dev": 3.47
}
}
},
"Compare Bitcoin and Ethereum investment potential": {
"execution_times": [
3.424122,
12.162575,
9.831582
],
"system_metrics": [
{
"cpu_percent": 1.9,
"memory_mb": 217.640625
},
{
"cpu_percent": 2.9,
"memory_mb": 203.171875
},
{
"cpu_percent": 31.2,
"memory_mb": 204.765625
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 3.424,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 364.2,
"memory_mb": 245.671875
},
"post": {
"cpu_percent": 1.9,
"memory_mb": 217.640625
}
}
},
{
"iteration": 2,
"execution_time": 12.163,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 659.7,
"memory_mb": 245.734375
},
"post": {
"cpu_percent": 2.9,
"memory_mb": 203.171875
}
}
},
{
"iteration": 3,
"execution_time": 9.832,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 612.4,
"memory_mb": 245.953125
},
"post": {
"cpu_percent": 31.2,
"memory_mb": 204.765625
}
}
}
],
"statistics": {
"execution_time": {
"mean": 8.473,
"median": 9.832,
"min": 3.424,
"max": 12.163,
"std_dev": 3.695
},
"memory_usage": {
"mean": 208.526,
"median": 204.766,
"min": 203.172,
"max": 217.641,
"std_dev": 6.478
},
"cpu_usage": {
"mean": 12.0,
"median": 2.9,
"min": 1.9,
"max": 31.2,
"std_dev": 13.583
}
}
},
"Evaluate the risk factors in emerging markets": {
"execution_times": [
2.948636,
12.942413,
11.361344
],
"system_metrics": [
{
"cpu_percent": 402.2,
"memory_mb": 246.078125
},
{
"cpu_percent": 2.2,
"memory_mb": 203.34375
},
{
"cpu_percent": 4.5,
"memory_mb": 203.59375
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 2.949,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 1494.6,
"memory_mb": 246.140625
},
"post": {
"cpu_percent": 402.2,
"memory_mb": 246.078125
}
}
},
{
"iteration": 2,
"execution_time": 12.942,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 529.1,
"memory_mb": 246.265625
},
"post": {
"cpu_percent": 2.2,
"memory_mb": 203.34375
}
}
},
{
"iteration": 3,
"execution_time": 11.361,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 578.8,
"memory_mb": 246.65625
},
"post": {
"cpu_percent": 4.5,
"memory_mb": 203.59375
}
}
}
],
"statistics": {
"execution_time": {
"mean": 9.084,
"median": 11.361,
"min": 2.949,
"max": 12.942,
"std_dev": 4.386
},
"memory_usage": {
"mean": 217.672,
"median": 203.594,
"min": 203.344,
"max": 246.078,
"std_dev": 20.087
},
"cpu_usage": {
"mean": 136.3,
"median": 4.5,
"min": 2.2,
"max": 402.2,
"std_dev": 188.022
}
}
}
}
}

@ -0,0 +1,467 @@
{
"metadata": {
"timestamp": "2025-04-18T13:30:51.812685",
"num_iterations": 3,
"agent_config": {
"model_name": "gpt-4o-mini",
"max_loops": 1
}
},
"results": {
"Conduct an analysis of the best real undervalued ETFs": {
"execution_times": [
8.791961,
15.974623,
10.00903
],
"system_metrics": [
{
"cpu_percent": 3.9,
"memory_mb": 73.96875
},
{
"cpu_percent": 4.6,
"memory_mb": 71.171875
},
{
"cpu_percent": 21.5,
"memory_mb": 76.015625
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 8.792,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 0.0,
"memory_mb": 133.765625
},
"post": {
"cpu_percent": 3.9,
"memory_mb": 73.96875
}
}
},
{
"iteration": 2,
"execution_time": 15.975,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 96.7,
"memory_mb": 133.8125
},
"post": {
"cpu_percent": 4.6,
"memory_mb": 71.171875
}
}
},
{
"iteration": 3,
"execution_time": 10.009,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 76.2,
"memory_mb": 137.15625
},
"post": {
"cpu_percent": 21.5,
"memory_mb": 76.015625
}
}
}
],
"statistics": {
"execution_time": {
"mean": 11.592,
"median": 10.009,
"min": 8.792,
"max": 15.975,
"std_dev": 3.139
},
"memory_usage": {
"mean": 73.719,
"median": 73.969,
"min": 71.172,
"max": 76.016,
"std_dev": 1.985
},
"cpu_usage": {
"mean": 10.0,
"median": 4.6,
"min": 3.9,
"max": 21.5,
"std_dev": 8.137
}
}
},
"What are the top performing tech stocks this quarter?": {
"execution_times": [
10.980763,
11.800057,
18.108609
],
"system_metrics": [
{
"cpu_percent": 2.8,
"memory_mb": 76.203125
},
{
"cpu_percent": 2.4,
"memory_mb": 76.65625
},
{
"cpu_percent": 0.4,
"memory_mb": 69.515625
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 10.981,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 40.8,
"memory_mb": 137.40625
},
"post": {
"cpu_percent": 2.8,
"memory_mb": 76.203125
}
}
},
{
"iteration": 2,
"execution_time": 11.8,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 87.4,
"memory_mb": 137.4375
},
"post": {
"cpu_percent": 2.4,
"memory_mb": 76.65625
}
}
},
{
"iteration": 3,
"execution_time": 18.109,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 180.9,
"memory_mb": 137.640625
},
"post": {
"cpu_percent": 0.4,
"memory_mb": 69.515625
}
}
}
],
"statistics": {
"execution_time": {
"mean": 13.63,
"median": 11.8,
"min": 10.981,
"max": 18.109,
"std_dev": 3.185
},
"memory_usage": {
"mean": 74.125,
"median": 76.203,
"min": 69.516,
"max": 76.656,
"std_dev": 3.265
},
"cpu_usage": {
"mean": 1.867,
"median": 2.4,
"min": 0.4,
"max": 2.8,
"std_dev": 1.05
}
}
},
"Analyze current market trends in renewable energy sector": {
"execution_times": [
15.015125,
9.916293,
6.958686
],
"system_metrics": [
{
"cpu_percent": 1.3,
"memory_mb": 69.953125
},
{
"cpu_percent": 14.6,
"memory_mb": 74.765625
},
{
"cpu_percent": 5.0,
"memory_mb": 72.90625
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 15.015,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 169.6,
"memory_mb": 137.9375
},
"post": {
"cpu_percent": 1.3,
"memory_mb": 69.953125
}
}
},
{
"iteration": 2,
"execution_time": 9.916,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 212.3,
"memory_mb": 138.171875
},
"post": {
"cpu_percent": 14.6,
"memory_mb": 74.765625
}
}
},
{
"iteration": 3,
"execution_time": 6.959,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 149.0,
"memory_mb": 138.4375
},
"post": {
"cpu_percent": 5.0,
"memory_mb": 72.90625
}
}
}
],
"statistics": {
"execution_time": {
"mean": 10.63,
"median": 9.916,
"min": 6.959,
"max": 15.015,
"std_dev": 3.328
},
"memory_usage": {
"mean": 72.542,
"median": 72.906,
"min": 69.953,
"max": 74.766,
"std_dev": 1.982
},
"cpu_usage": {
"mean": 6.967,
"median": 5.0,
"min": 1.3,
"max": 14.6,
"std_dev": 5.605
}
}
},
"Compare Bitcoin and Ethereum investment potential": {
"execution_times": [
15.44115,
13.797926,
8.355462
],
"system_metrics": [
{
"cpu_percent": 2.4,
"memory_mb": 70.59375
},
{
"cpu_percent": 1.0,
"memory_mb": 69.875
},
{
"cpu_percent": 1.1,
"memory_mb": 73.5
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 15.441,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 218.0,
"memory_mb": 138.515625
},
"post": {
"cpu_percent": 2.4,
"memory_mb": 70.59375
}
}
},
{
"iteration": 2,
"execution_time": 13.798,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 298.8,
"memory_mb": 138.59375
},
"post": {
"cpu_percent": 1.0,
"memory_mb": 69.875
}
}
},
{
"iteration": 3,
"execution_time": 8.355,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 226.1,
"memory_mb": 139.984375
},
"post": {
"cpu_percent": 1.1,
"memory_mb": 73.5
}
}
}
],
"statistics": {
"execution_time": {
"mean": 12.532,
"median": 13.798,
"min": 8.355,
"max": 15.441,
"std_dev": 3.028
},
"memory_usage": {
"mean": 71.323,
"median": 70.594,
"min": 69.875,
"max": 73.5,
"std_dev": 1.567
},
"cpu_usage": {
"mean": 1.5,
"median": 1.1,
"min": 1.0,
"max": 2.4,
"std_dev": 0.638
}
}
},
"Evaluate the risk factors in emerging markets": {
"execution_times": [
6.380516,
9.943111,
9.821866
],
"system_metrics": [
{
"cpu_percent": 126.4,
"memory_mb": 118.28125
},
{
"cpu_percent": 18.0,
"memory_mb": 75.28125
},
{
"cpu_percent": 1.8,
"memory_mb": 74.1875
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 6.381,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 83.4,
"memory_mb": 140.046875
},
"post": {
"cpu_percent": 126.4,
"memory_mb": 118.28125
}
}
},
{
"iteration": 2,
"execution_time": 9.943,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 197.8,
"memory_mb": 140.109375
},
"post": {
"cpu_percent": 18.0,
"memory_mb": 75.28125
}
}
},
{
"iteration": 3,
"execution_time": 9.822,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 181.7,
"memory_mb": 140.171875
},
"post": {
"cpu_percent": 1.8,
"memory_mb": 74.1875
}
}
}
],
"statistics": {
"execution_time": {
"mean": 8.715,
"median": 9.822,
"min": 6.381,
"max": 9.943,
"std_dev": 1.652
},
"memory_usage": {
"mean": 89.25,
"median": 75.281,
"min": 74.188,
"max": 118.281,
"std_dev": 20.533
},
"cpu_usage": {
"mean": 48.733,
"median": 18.0,
"min": 1.8,
"max": 126.4,
"std_dev": 55.315
}
}
}
}
}

@ -0,0 +1,252 @@
{
"metadata": {
"timestamp": "2025-04-18T13:31:55.055663",
"num_iterations": 1,
"agent_config": {
"model_name": "gpt-4o-mini",
"max_loops": 1
}
},
"results": {
"Conduct an analysis of the best real undervalued ETFs": {
"execution_times": [
11.214983
],
"system_metrics": [
{
"cpu_percent": 1.2,
"memory_mb": 187.140625
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 11.215,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 0.0,
"memory_mb": 192.546875
},
"post": {
"cpu_percent": 1.2,
"memory_mb": 187.140625
}
}
}
],
"statistics": {
"execution_time": {
"mean": 11.215,
"median": 11.215,
"min": 11.215,
"max": 11.215
},
"memory_usage": {
"mean": 187.141,
"median": 187.141,
"min": 187.141,
"max": 187.141
},
"cpu_usage": {
"mean": 1.2,
"median": 1.2,
"min": 1.2,
"max": 1.2
}
}
},
"What are the top performing tech stocks this quarter?": {
"execution_times": [
13.182044
],
"system_metrics": [
{
"cpu_percent": 0.3,
"memory_mb": 57.671875
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 13.182,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 96.3,
"memory_mb": 187.140625
},
"post": {
"cpu_percent": 0.3,
"memory_mb": 57.671875
}
}
}
],
"statistics": {
"execution_time": {
"mean": 13.182,
"median": 13.182,
"min": 13.182,
"max": 13.182
},
"memory_usage": {
"mean": 57.672,
"median": 57.672,
"min": 57.672,
"max": 57.672
},
"cpu_usage": {
"mean": 0.3,
"median": 0.3,
"min": 0.3,
"max": 0.3
}
}
},
"Analyze current market trends in renewable energy sector": {
"execution_times": [
11.858239
],
"system_metrics": [
{
"cpu_percent": 0.3,
"memory_mb": 56.53125
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 11.858,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 98.1,
"memory_mb": 57.8125
},
"post": {
"cpu_percent": 0.3,
"memory_mb": 56.53125
}
}
}
],
"statistics": {
"execution_time": {
"mean": 11.858,
"median": 11.858,
"min": 11.858,
"max": 11.858
},
"memory_usage": {
"mean": 56.531,
"median": 56.531,
"min": 56.531,
"max": 56.531
},
"cpu_usage": {
"mean": 0.3,
"median": 0.3,
"min": 0.3,
"max": 0.3
}
}
},
"Compare Bitcoin and Ethereum investment potential": {
"execution_times": [
25.299971
],
"system_metrics": [
{
"cpu_percent": 0.1,
"memory_mb": 55.734375
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 25.3,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 89.8,
"memory_mb": 56.671875
},
"post": {
"cpu_percent": 0.1,
"memory_mb": 55.734375
}
}
}
],
"statistics": {
"execution_time": {
"mean": 25.3,
"median": 25.3,
"min": 25.3,
"max": 25.3
},
"memory_usage": {
"mean": 55.734,
"median": 55.734,
"min": 55.734,
"max": 55.734
},
"cpu_usage": {
"mean": 0.1,
"median": 0.1,
"min": 0.1,
"max": 0.1
}
}
},
"Evaluate the risk factors in emerging markets": {
"execution_times": [
11.951775
],
"system_metrics": [
{
"cpu_percent": 0.3,
"memory_mb": 55.5625
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 11.952,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 56.3,
"memory_mb": 55.890625
},
"post": {
"cpu_percent": 0.3,
"memory_mb": 55.5625
}
}
}
],
"statistics": {
"execution_time": {
"mean": 11.952,
"median": 11.952,
"min": 11.952,
"max": 11.952
},
"memory_usage": {
"mean": 55.562,
"median": 55.562,
"min": 55.562,
"max": 55.562
},
"cpu_usage": {
"mean": 0.3,
"median": 0.3,
"min": 0.3,
"max": 0.3
}
}
}
}
}

@ -0,0 +1,252 @@
{
"metadata": {
"timestamp": "2025-04-18T13:34:14.487430",
"num_iterations": 1,
"agent_config": {
"model_name": "gpt-4o-mini",
"max_loops": 1
}
},
"results": {
"Conduct an analysis of the best real undervalued ETFs": {
"execution_times": [
9.132072
],
"system_metrics": [
{
"cpu_percent": 1.8,
"memory_mb": 66.5
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 9.132,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 0.0,
"memory_mb": 229.375
},
"post": {
"cpu_percent": 1.8,
"memory_mb": 66.5
}
}
}
],
"statistics": {
"execution_time": {
"mean": 9.132,
"median": 9.132,
"min": 9.132,
"max": 9.132
},
"memory_usage": {
"mean": 66.5,
"median": 66.5,
"min": 66.5,
"max": 66.5
},
"cpu_usage": {
"mean": 1.8,
"median": 1.8,
"min": 1.8,
"max": 1.8
}
}
},
"What are the top performing tech stocks this quarter?": {
"execution_times": [
8.669393
],
"system_metrics": [
{
"cpu_percent": 0.3,
"memory_mb": 73.859375
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 8.669,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 86.7,
"memory_mb": 66.609375
},
"post": {
"cpu_percent": 0.3,
"memory_mb": 73.859375
}
}
}
],
"statistics": {
"execution_time": {
"mean": 8.669,
"median": 8.669,
"min": 8.669,
"max": 8.669
},
"memory_usage": {
"mean": 73.859,
"median": 73.859,
"min": 73.859,
"max": 73.859
},
"cpu_usage": {
"mean": 0.3,
"median": 0.3,
"min": 0.3,
"max": 0.3
}
}
},
"Analyze current market trends in renewable energy sector": {
"execution_times": [
10.922691
],
"system_metrics": [
{
"cpu_percent": 0.4,
"memory_mb": 55.6875
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 10.923,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 108.3,
"memory_mb": 73.859375
},
"post": {
"cpu_percent": 0.4,
"memory_mb": 55.6875
}
}
}
],
"statistics": {
"execution_time": {
"mean": 10.923,
"median": 10.923,
"min": 10.923,
"max": 10.923
},
"memory_usage": {
"mean": 55.688,
"median": 55.688,
"min": 55.688,
"max": 55.688
},
"cpu_usage": {
"mean": 0.4,
"median": 0.4,
"min": 0.4,
"max": 0.4
}
}
},
"Compare Bitcoin and Ethereum investment potential": {
"execution_times": [
13.72297
],
"system_metrics": [
{
"cpu_percent": 0.3,
"memory_mb": 55.671875
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 13.723,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 87.7,
"memory_mb": 55.828125
},
"post": {
"cpu_percent": 0.3,
"memory_mb": 55.671875
}
}
}
],
"statistics": {
"execution_time": {
"mean": 13.723,
"median": 13.723,
"min": 13.723,
"max": 13.723
},
"memory_usage": {
"mean": 55.672,
"median": 55.672,
"min": 55.672,
"max": 55.672
},
"cpu_usage": {
"mean": 0.3,
"median": 0.3,
"min": 0.3,
"max": 0.3
}
}
},
"Evaluate the risk factors in emerging markets": {
"execution_times": [
14.099555
],
"system_metrics": [
{
"cpu_percent": 0.3,
"memory_mb": 56.0
}
],
"iterations": [
{
"iteration": 1,
"execution_time": 14.1,
"success": true,
"system_metrics": {
"pre": {
"cpu_percent": 61.7,
"memory_mb": 55.8125
},
"post": {
"cpu_percent": 0.3,
"memory_mb": 56.0
}
}
}
],
"statistics": {
"execution_time": {
"mean": 14.1,
"median": 14.1,
"min": 14.1,
"max": 14.1
},
"memory_usage": {
"mean": 56.0,
"median": 56.0,
"min": 56.0,
"max": 56.0
},
"cpu_usage": {
"mean": 0.3,
"median": 0.3,
"min": 0.3,
"max": 0.3
}
}
}
}
}

@ -0,0 +1,24 @@
# Basic Usage Guide
## Getting Started
This guide demonstrates how to use the basic features of the Swarms framework.
### Basic Agent Example
```python
from swarms.structs.agent import Agent
# Initialize agent
agent = Agent(
agent_name="Basic-Example-Agent",
agent_description="A simple example agent",
system_prompt="You are a helpful assistant.",
model_name="gpt-4",
)
# Run the agent
response = agent.run("What is 2+2?")
print(f"Agent response: {response}")
```

@ -0,0 +1,63 @@
# Basic Agent Setup with MCP
## Overview
This document shows how to set up a basic Swarms agent with MCP (Model Context Protocol) integration for client-side operations.
## Basic Agent Setup
```python
from swarms import Agent
from swarms.tools.mcp_integration import MCPServerSseParams
# Configure MCP server parameters
mcp_params = MCPServerSseParams(
url="http://localhost:8081/sse", # MCP server SSE endpoint
headers={"Accept": "text/event-stream"}, # Required for SSE
timeout=5.0 # Connection timeout in seconds
)
# Initialize agent with MCP configuration
agent = Agent(
agent_name="basic_agent", # Name of your agent
system_prompt="You are a helpful assistant", # Agent's system prompt
mcp_servers=[mcp_params], # List of MCP server configurations
max_loops=5, # Maximum number of loops for task execution
verbose=True # Enable verbose output
)
# Run the agent
result = agent.run("Your task here")
print(result)
```
## Required Parameters
1. **MCP Server Parameters**:
- `url`: The SSE endpoint of your MCP server
- `headers`: Must include `Accept: text/event-stream`
- `timeout`: Connection timeout in seconds
2. **Agent Parameters**:
- `agent_name`: Name of your agent
- `system_prompt`: Agent's system prompt
- `mcp_servers`: List of MCP server configurations
- `max_loops`: Maximum number of loops for task execution
- `verbose`: Enable verbose output for debugging
## Example Usage
```python
# Create agent
agent = Agent(
agent_name="math_agent",
system_prompt="You are a math assistant",
mcp_servers=[mcp_params],
max_loops=5,
verbose=True
)
# Run a math task
result = agent.run("Add 5 and 3")
print(result) # Should return 8
```

@ -0,0 +1,71 @@
from swarms.structs.agent import Agent
from swarms.prompts.finance_agent_sys_prompt import FINANCIAL_AGENT_SYS_PROMPT
# Technical Analysis Specialist
technical_analyst = Agent(
agent_name="Technical-Analysis-Expert",
agent_description="Advanced technical analysis specialist focusing on complex market patterns",
system_prompt="""You are an expert Technical Analyst specializing in:
1. Advanced Pattern Recognition (Elliot Wave, Wyckoff Method)
2. Multi-timeframe Analysis
3. Volume Profile Analysis
4. Market Structure Analysis
5. Intermarket Analysis""",
max_loops=3,
model_name="gpt-4"
)
# Fundamental Analysis Expert
fundamental_analyst = Agent(
agent_name="Fundamental-Analysis-Expert",
agent_description="Deep fundamental analysis specialist",
system_prompt="""You are a Fundamental Analysis expert focusing on:
1. Advanced Financial Statement Analysis
2. Economic Indicator Impact Assessment
3. Industry Competitive Analysis
4. Global Macro Trends
5. Corporate Governance Evaluation""",
max_loops=3,
model_name="gpt-4"
)
# Risk Management Specialist
risk_analyst = Agent(
agent_name="Risk-Management-Expert",
agent_description="Complex risk analysis and management specialist",
system_prompt="""You are a Risk Management expert specializing in:
1. Portfolio Risk Assessment
2. Value at Risk (VaR) Analysis
3. Stress Testing Scenarios
4. Correlation Analysis
5. Risk-Adjusted Performance Metrics""",
max_loops=3,
model_name="gpt-4"
)
class MarketAnalysisSystem:
def __init__(self):
self.agents = [technical_analyst, fundamental_analyst, risk_analyst]
def comprehensive_analysis(self, asset_data):
analysis_results = []
for agent in self.agents:
analysis = agent.run(f"Analyze this asset data: {asset_data}")
analysis_results.append({
"analyst": agent.agent_name,
"analysis": analysis
})
# Synthesize results through risk analyst for final recommendation
final_analysis = risk_analyst.run(
f"Synthesize these analyses and provide a final recommendation: {analysis_results}"
)
return {
"detailed_analysis": analysis_results,
"final_recommendation": final_analysis
}
# Usage
analysis_system = MarketAnalysisSystem()

@ -0,0 +1,18 @@
from swarms.structs.agent import Agent
def main():
# Initialize basic agent
agent = Agent(
agent_name="Basic-Example-Agent",
agent_description="A simple example agent",
system_prompt="You are a helpful assistant.",
model_name="gpt-4",
)
# Run the agent
response = agent.run("What is 2+2?")
print(f"Agent response: {response}")
if __name__ == "__main__":
main()

@ -0,0 +1,67 @@
from swarms.structs.agent import Agent
from swarms.utils.pdf_to_text import pdf_to_text
import asyncio
class DocumentProcessingPipeline:
def __init__(self):
self.document_analyzer = Agent(
agent_name="Document-Analyzer",
agent_description="Enterprise document analysis specialist",
system_prompt="""You are an expert document analyzer specializing in:
1. Complex Document Structure Analysis
2. Key Information Extraction
3. Compliance Verification
4. Document Classification
5. Content Validation""",
max_loops=2,
model_name="gpt-4"
)
self.legal_reviewer = Agent(
agent_name="Legal-Reviewer",
agent_description="Legal compliance and risk assessment specialist",
system_prompt="""You are a legal review expert focusing on:
1. Regulatory Compliance Check
2. Legal Risk Assessment
3. Contractual Obligation Analysis
4. Privacy Requirement Verification
5. Legal Term Extraction""",
max_loops=2,
model_name="gpt-4"
)
self.data_extractor = Agent(
agent_name="Data-Extractor",
agent_description="Structured data extraction specialist",
system_prompt="""You are a data extraction expert specializing in:
1. Named Entity Recognition
2. Relationship Extraction
3. Tabular Data Processing
4. Metadata Extraction
5. Data Standardization""",
max_loops=2,
model_name="gpt-4"
)
async def process_document(self, document_path):
# Convert document to text
document_text = pdf_to_text(document_path)
# Parallel processing tasks
tasks = [
self.document_analyzer.arun(f"Analyze this document: {document_text}"),
self.legal_reviewer.arun(f"Review legal aspects: {document_text}"),
self.data_extractor.arun(f"Extract structured data: {document_text}")
]
results = await asyncio.gather(*tasks)
return {
"document_analysis": results[0],
"legal_review": results[1],
"extracted_data": results[2]
}
# Usage
processor = DocumentProcessingPipeline()

@ -0,0 +1,69 @@
from swarms.structs.agent import Agent
from typing import Dict, List
class HealthcareDiagnosticSystem:
def __init__(self):
self.primary_diagnostician = Agent(
agent_name="Primary-Diagnostician",
agent_description="Primary diagnostic analysis specialist",
system_prompt="""You are a primary diagnostician expert in:
1. Initial Symptom Analysis
2. Patient History Evaluation
3. Preliminary Diagnosis Formation
4. Risk Factor Assessment
5. Treatment Priority Determination""",
max_loops=3,
model_name="gpt-4"
)
self.specialist_consultant = Agent(
agent_name="Specialist-Consultant",
agent_description="Specialized medical consultation expert",
system_prompt="""You are a medical specialist focusing on:
1. Complex Case Analysis
2. Specialized Treatment Planning
3. Comorbidity Assessment
4. Treatment Risk Evaluation
5. Advanced Diagnostic Interpretation""",
max_loops=3,
model_name="gpt-4"
)
self.treatment_coordinator = Agent(
agent_name="Treatment-Coordinator",
agent_description="Treatment planning and coordination specialist",
system_prompt="""You are a treatment coordination expert specializing in:
1. Treatment Plan Development
2. Care Coordination
3. Resource Allocation
4. Recovery Timeline Planning
5. Follow-up Protocol Design""",
max_loops=3,
model_name="gpt-4"
)
def process_case(self, patient_data: Dict) -> Dict:
# Initial diagnosis
primary_assessment = self.primary_diagnostician.run(
f"Perform initial diagnosis: {patient_data}"
)
# Specialist consultation
specialist_review = self.specialist_consultant.run(
f"Review case with initial assessment: {primary_assessment}"
)
# Treatment planning
treatment_plan = self.treatment_coordinator.run(
f"Develop treatment plan based on: Primary: {primary_assessment}, Specialist: {specialist_review}"
)
return {
"initial_assessment": primary_assessment,
"specialist_review": specialist_review,
"treatment_plan": treatment_plan
}
# Usage
diagnostic_system = HealthcareDiagnosticSystem()

@ -0,0 +1,52 @@
"""
MCP Integration Demo Script
This script demonstrates the full MCP integration workflow
"""
import asyncio
import time
from swarms.tools.mcp_integration import MCPServerSseParams
from examples.mcp_example.mock_multi_agent import MultiAgentMathSystem
def print_section(title):
print("\n" + "="*50)
print(title)
print("="*50 + "\n")
async def run_demo():
print_section("1. Initializing Multi-Agent MCP System")
system = MultiAgentMathSystem()
print_section("2. Testing Basic Operations")
results = await system.process_task("What operations can you perform?")
for result in results:
print(f"\n[{result['agent']}]")
print(f"Response: {result['response']}")
print_section("3. Testing Mathematical Operations")
test_operations = [
"5 plus 3",
"10 times 4",
"20 divide by 5"
]
for operation in test_operations:
print(f"\nTesting: {operation}")
results = await system.process_task(operation)
for result in results:
if "error" not in result:
print(f"[{result['agent']}]: {result['response']}")
print_section("4. Testing Error Handling")
results = await system.process_task("calculate square root of 16")
for result in results:
print(f"\n[{result['agent']}]")
if "error" in result:
print(f"Error handled: {result['error']}")
else:
print(f"Response: {result['response']}")
if __name__ == "__main__":
print("\nMCP Integration Demonstration")
print("Running comprehensive demo of MCP functionality\n")
asyncio.run(run_demo())

@ -0,0 +1,25 @@
from fastmcp import FastMCP
from typing import Dict, Any, Optional
# Initialize MCP server
mcp = FastMCP("Math-Server")
@mcp.tool()
def add(a: int, b: int) -> int:
"""Add two numbers together"""
try:
return a + b
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def multiply(a: int, b: int) -> int:
"""Multiply two numbers together"""
try:
return a * b
except Exception as e:
return {"error": str(e)}
if __name__ == "__main__":
print("Starting Math Server...")
mcp.run(transport="sse")

@ -0,0 +1,62 @@
import os
import sys
from loguru import logger
from swarms import Agent
from swarms.prompts.agent_prompts import MATH_AGENT_PROMPT
from swarms.tools.mcp_integration import MCPServerSseParams
# Configure API key
# Configure logging
logger.remove()
logger.add(sys.stdout, level="DEBUG", format="{time} | {level} | {message}")
# Define a simpler prompt that focuses on math operations
SIMPLE_MATH_PROMPT = """
You are a math calculator assistant that uses external tools.
When asked for calculations, extract the numbers and use the appropriate tool.
Available tools:
- add: For addition
- multiply: For multiplication
- divide: For division
Keep your responses concise and focused on the calculation.
"""
def main():
print("=== MINIMAL MCP AGENT INTEGRATION TEST ===")
# Properly configured MCP parameters
mcp_params = MCPServerSseParams(
url="http://127.0.0.1:8000",
headers={
"Content-Type": "application/json",
"Accept": "text/event-stream"
},
timeout=30.0, # Increased timeout
sse_read_timeout=60.0
)
agent = Agent(
agent_name="MCP Test Agent",
system_prompt=SIMPLE_MATH_PROMPT, # Using simpler prompt
mcp_servers=[mcp_params],
model_name="gpt-4o-mini",
max_loops=2, # Allow for retry
verbose=True
)
print("\nAgent created successfully! Type 'exit' to quit.")
while True:
query = input("\nMath query: ").strip()
if query.lower() == "exit":
break
print(f"\nProcessing: {query}")
try:
result = agent.run(query)
print(f"\nResult: {result}")
except Exception as e:
print(f"\nError processing query: {str(e)}")
if __name__ == "__main__":
main()

@ -0,0 +1,38 @@
import pytest
import asyncio
from mock_multi_agent import MultiAgentMathSystem
import logging
logging.basicConfig(level=logging.INFO)
@pytest.mark.asyncio
async def test_multi_agent_math():
"""Test the multi-agent math system with various operations"""
system = MultiAgentMathSystem()
test_cases = [
"Add 5 and 3",
"Multiply 4 by 6",
"Divide 10 by 2"
]
for task in test_cases:
print(f"\nTesting: {task}")
results = await system.process_task(task)
for result in results:
assert "error" not in result, f"Agent {result['agent']} encountered error"
assert result["response"] is not None
print(f"{result['agent']} response: {result['response']}")
def test_interactive_system():
"""Test the interactive system manually"""
try:
system = MultiAgentMathSystem()
system.run_interactive()
except Exception as e:
pytest.fail(f"Interactive test failed: {e}")
if __name__ == "__main__":
pytest.main([__file__, "-v"])

@ -0,0 +1,56 @@
from fastmcp import FastMCP
from loguru import logger
import sys
import time
# Configure detailed logging
logger.remove()
logger.add(sys.stdout, level="DEBUG", format="{time} | {level} | {message}")
# Create MCP server with fixed configuration
mcp = FastMCP(
host="127.0.0.1", # Bind to localhost only
port=8000,
transport="sse",
require_session_id=False,
cors_allowed_origins=["*"],
debug=True
)
# Define tools with proper return format
@mcp.tool()
def add(a: int, b: int) -> int:
"""Add two numbers."""
result = a + b
logger.info(f"Adding {a} + {b} = {result}")
return result # Let FastMCP handle the response formatting
@mcp.tool()
def multiply(a: int, b: int) -> int:
"""Multiply two numbers."""
result = a * b
logger.info(f"Multiplying {a} * {b} = {result}")
return result
@mcp.tool()
def divide(a: int, b: int) -> float:
"""Divide the first number by the second."""
if b == 0:
raise ValueError("Cannot divide by zero")
result = a / b
logger.info(f"Dividing {a} / {b} = {result}")
return result
def main():
try:
logger.info("Starting mock math server on http://127.0.0.1:8000")
print("Math MCP Server running on http://127.0.0.1:8000 (SSE)\n")
print("Available tools:\n - add\n - multiply\n - divide\n")
mcp.run() # This runs the server in a blocking mode
except Exception as e:
logger.error(f"Error starting server: {e}")
import traceback
traceback.print_exc()
if __name__ == "__main__":
main()

@ -0,0 +1,203 @@
import asyncio
from swarms import Agent
from swarms.tools.mcp_integration import MCPServerSseParams
import logging
class MathAgent:
def __init__(self, name: str, server_url: str):
self.server = MCPServerSseParams(
url=server_url, headers={"Content-Type": "application/json"})
self.agent = Agent(
agent_name=name,
agent_description=
f"{'Calculator' if name == 'Calculator' else 'Stock Analyst'} agent specializing in {'mathematical calculations' if name == 'Calculator' else 'stock market analysis'}. For Calculator: use add, multiply, divide operations. For Stock Analyst: use moving averages and percentage change calculations.",
system_prompt=
f"You are {name}, a math processing agent. You have access to these mathematical operations ONLY: addition, multiplication, and division. Only suggest calculations using these available tools. Do not attempt to solve problems requiring other operations like percentages, square roots, or advanced math. When users ask about capabilities, list only the basic operations you can perform.",
max_loops=1,
mcp_servers=[self.server],
streaming_on=False,
model_name="gpt-4o-mini",
temperature=0.1,
max_tokens=1000)
def is_relevant_task(self, task: str) -> bool:
task_lower = task.lower()
if self.agent.agent_name == "Calculator":
math_keywords = [
'add', 'plus', '+', 'multiply', 'times', '*', 'x', 'divide',
'/', 'by'
]
return any(keyword in task_lower for keyword in math_keywords)
else: # StockAnalyst
stock_keywords = [
'moving average', 'stock', 'market', 'percentage', 'change'
]
return any(keyword in task_lower for keyword in stock_keywords)
async def process(self, task: str):
try:
# Check if asking about capabilities
if any(word in task.lower()
for word in ['what', 'how', 'can', 'capabilities', 'help']):
if self.agent.agent_name == "Calculator":
return {
"agent":
self.agent.agent_name,
"task":
task,
"response":
"[Calculator Agent] I can perform basic mathematical operations:\n" +
"- Addition (use '+' or 'plus')\n" +
"- Multiplication (use '*' or 'times')\n" +
"- Division (use '/' or 'divide by')\n" +
"Example: '5 plus 3' or '10 divide by 2'"
}
else: # StockAnalyst
return {
"agent":
self.agent.agent_name,
"task":
task,
"response":
"[Stock Analyst Agent] I can perform stock market analysis:\n" +
"- Calculate moving averages\n" +
"- Get current stock prices\n" +
"Example: 'calculate moving average of [10,20,30,40,50] over 3 periods' or 'get price of AAPL'"
}
# Only process if task is relevant to this agent
if not self.is_relevant_task(task):
return {
"agent": self.agent.agent_name,
"task": task,
"response":
None # Indicate this agent should not handle this task
}
# Check if input is stock-related (for StockAnalyst)
if self.agent.agent_name == "StockAnalyst" and "moving average" in task.lower(
):
try:
import re
# Extract list of numbers and period
numbers = re.findall(r'\[([\d,\s]+)\]', task)
period = re.findall(r'over\s+(\d+)\s+periods', task)
if numbers and period:
numbers = [float(n) for n in numbers[0].split(',')]
period = int(period[0])
if len(numbers) >= period:
# Calculate moving average
averages = []
for i in range(len(numbers) - period + 1):
avg = sum(numbers[i:i + period]) / period
averages.append(round(avg, 2))
return {
"agent": self.agent.agent_name,
"task": task,
"response": f"Moving averages: {averages}"
}
except Exception as e:
return {
"agent": self.agent.agent_name,
"task": task,
"error": f"Error calculating moving average: {str(e)}"
}
# Check if input is math-related (for Calculator)
if self.agent.agent_name == "Calculator":
math_keywords = [
'add', 'plus', '+', 'multiply', 'times', '*', 'x',
'divide', '/', 'by'
]
if not any(keyword in task.lower()
for keyword in math_keywords):
return {
"agent":
self.agent.agent_name,
"task":
task,
"response":
"Please provide a mathematical operation (add, multiply, or divide)"
}
response = await self.agent.arun(task)
return {
"agent": self.agent.agent_name,
"task": task,
"response": str(response)
}
except Exception as e:
logging.error(f"Error in {self.agent.agent_name}: {str(e)}")
return {
"agent": self.agent.agent_name,
"task": task,
"error": str(e)
}
class MultiAgentMathSystem:
def __init__(self):
math_url = "http://0.0.0.0:8000"
stock_url = "http://0.0.0.0:8001"
self.calculator = MathAgent("Calculator", math_url)
self.stock_analyst = MathAgent("StockAnalyst", stock_url)
async def process_task(self, task: str):
# Process with both agents
results = await asyncio.gather(self.calculator.process(task),
self.stock_analyst.process(task))
return results
def run_interactive(self):
print("\nMulti-Agent Math System")
print("Enter 'exit' to quit")
while True:
try:
user_input = input("\nEnter your query: ")
if user_input.lower() == 'exit':
break
results = asyncio.run(self.process_task(user_input))
responses = []
for result in results:
if result["response"] is not None:
if "error" in result:
responses.append(f"Error: {result['error']}")
else:
response = result["response"]
if isinstance(response, str):
# Clean up calculation results
if "=" in response:
calculation = response.split(
"=")[-1].strip()
responses.append(calculation)
else:
# Remove system/agent information
clean_response = response.split(
"System:")[0].strip()
clean_response = clean_response.split(
"Human:")[0].strip()
if clean_response:
responses.append(clean_response)
if responses:
print("\nResult:")
print("-" * 30)
for response in responses:
print(response)
print("-" * 30)
except Exception as e:
print(f"System error: {str(e)}")
if __name__ == "__main__":
system = MultiAgentMathSystem()
system.run_interactive()

@ -0,0 +1,33 @@
from fastmcp import FastMCP
from typing import Dict, Union
mcp = FastMCP("Stock-Mock-Server")
@mcp.tool()
def get_stock_price(symbol: str) -> Dict[str, Union[float, str]]:
"""Get the current price of a stock"""
prices = {
"AAPL": 150.0,
"GOOGL": 2800.0,
"MSFT": 300.0,
"AMZN": 3300.0
}
if symbol not in prices:
return {"error": f"Stock {symbol} not found"}
return {"price": prices[symbol]}
@mcp.tool()
def calculate_moving_average(prices: list[float], window: int) -> Dict[str, Union[list[float], str]]:
"""Calculate moving average of stock prices"""
if len(prices) < window:
return {"error": "Not enough price points"}
avgs = []
for i in range(len(prices) - window + 1):
avg = sum(prices[i:i+window]) / window
avgs.append(round(avg, 2))
return {"averages": avgs}
if __name__ == "__main__":
print("Starting Mock Stock Server on port 8001...")
mcp.run(transport="sse", host="0.0.0.0", port=8001)

@ -0,0 +1,143 @@
# MCP Integration Demo Script
## 1. Setup & Architecture Overview
```bash
# Terminal 1: Start Stock Server
python examples/mcp_example/mock_stock_server.py
# Terminal 2: Start Math Server
python examples/mcp_example/mock_math_server.py
# Terminal 3: Start Multi-Agent System
python examples/mcp_example/mock_multi_agent.py
```
## 2. Key Components
### Server-Side:
- FastMCP servers running on ports 8000 and 8001
- Math Server provides: add, multiply, divide operations
- Stock Server provides: price lookup, moving average calculations
### Client-Side:
- Multi-agent system with specialized agents
- MCPServerSseParams for server connections
- Automatic task routing based on agent specialization
## 3. MCP Integration Details
### Server Implementation:
```python
# Math Server Example
from fastmcp import FastMCP
mcp = FastMCP("Math-Server")
@mcp.tool()
def add(a: int, b: int) -> int:
"""Add two numbers together"""
return a + b
```
### Client Integration:
```python
from swarms.tools.mcp_integration import MCPServerSseParams
# Configure MCP server connection
server = MCPServerSseParams(
url="http://0.0.0.0:8000",
headers={"Content-Type": "application/json"}
)
# Initialize agent with MCP capabilities
agent = Agent(
agent_name="Calculator",
mcp_servers=[server],
max_loops=1
)
```
## 4. Demo Flow
1. Math Operations:
```
Enter a math problem: 5 plus 3
Enter a math problem: 10 times 4
```
2. Stock Analysis:
```
Enter a math problem: get price of AAPL
Enter a math problem: calculate moving average of [10,20,30,40,50] over 3 periods
```
## 5. Integration Highlights
1. Server Configuration:
- FastMCP initialization
- Tool registration using decorators
- SSE transport setup
2. Client Integration:
- MCPServerSseParams configuration
- Agent specialization
- Task routing logic
3. Communication Flow:
- Client request → Agent processing → MCP server → Response handling
4. Error Handling:
- Graceful error management
- Automatic retry mechanisms
- Clear error reporting
## 6. Code Architecture
### Server Example (Math Server):
```python
@mcp.tool()
def add(a: int, b: int) -> int:
"""Add two numbers together"""
return a + b
```
### Client Example (Multi-Agent):
```python
calculator = MathAgent("Calculator", "http://0.0.0.0:8000")
stock_analyst = MathAgent("StockAnalyst", "http://0.0.0.0:8001")
```
## 7. Key Benefits
1. Modular Architecture
2. Specialized Agents
3. Clean API Integration
4. Scalable Design
5. Standardized Communication Protocol
6. Easy Tool Registration
7. Flexible Server Implementation
## 8. Testing & Validation
1. Basic Connectivity:
```python
def test_server_connection():
params = {"url": "http://0.0.0.0:8000"}
server = MCPServerSse(params)
asyncio.run(server.connect())
assert server.session is not None
```
2. Tool Execution:
```python
def test_tool_execution():
params = {"url": "http://0.0.0.0:8000"}
function_call = {
"tool_name": "add",
"arguments": {"a": 5, "b": 3}
}
result = mcp_flow(params, function_call)
assert result is not None
```

@ -0,0 +1,52 @@
from swarms import Agent
from swarms.prompts.finance_agent_sys_prompt import FINANCIAL_AGENT_SYS_PROMPT
from swarms.tools.mcp_integration import MCPServerSseParams
import logging
def main():
# Configure MCP server connection
server = MCPServerSseParams(
url="http://0.0.0.0:6274",
headers={"Content-Type": "application/json"},
timeout=10.0,
sse_read_timeout=300.0
)
# Initialize agent with MCP capabilities
agent = Agent(
agent_name="Math-Agent",
agent_description="Agent that performs math operations",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
max_loops=1,
mcp_servers=[server],
streaming_on=True
)
try:
# First get available tools from server
print("\nDiscovering available tools from MCP server...")
tools = agent.mcp_tool_handling()
print("\nAvailable tools:", tools)
while True:
# Get user input
user_input = input("\nEnter a math operation (or 'exit' to quit): ")
if user_input.lower() == 'exit':
break
# Process user input through agent
try:
result = agent.run(user_input)
print("\nResult:", result)
except Exception as e:
print(f"Error processing request: {e}")
except Exception as e:
logging.error(f"Test failed: {e}")
raise
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()

@ -0,0 +1,25 @@
import requests
import time
import sys
from loguru import logger
# Configure logger
logger.remove()
logger.add(sys.stdout, level="DEBUG")
def test_server_connection():
"""Simple test to see if server responds at all."""
url = "http://localhost:8000"
try:
logger.debug(f"Testing connection to {url}")
response = requests.get(url)
logger.debug(f"Response status: {response.status_code}")
logger.debug(f"Response content: {response.text[:100]}...")
return True
except Exception as e:
logger.error(f"Connection failed: {str(e)}")
return False
if __name__ == "__main__":
test_server_connection()

@ -5,6 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry] [tool.poetry]
name = "swarms" name = "swarms"
version = "7.7.2" version = "7.7.2"
description = "Swarms - TGSC" description = "Swarms - TGSC"
license = "MIT" license = "MIT"
@ -77,6 +78,9 @@ numpy = "*"
litellm = "*" litellm = "*"
torch = "*" torch = "*"
httpx = "*" httpx = "*"
fastmcp = {version = ">=2.0", extras = ["sse"]}
mcp = ">=0.3.0"
typing-extensions = "^4.13.2"
[tool.poetry.scripts] [tool.poetry.scripts]
swarms = "swarms.cli.main:main" swarms = "swarms.cli.main:main"

@ -0,0 +1,5 @@
{pkgs}: {
deps = [
pkgs.libxcrypt
];
}

File diff suppressed because it is too large Load Diff

@ -43,6 +43,8 @@ async def _execute_mcp_tool(
method: Literal["stdio", "sse"] = "sse", method: Literal["stdio", "sse"] = "sse",
parameters: Dict[Any, Any] = None, parameters: Dict[Any, Any] = None,
output_type: Literal["str", "dict"] = "str", output_type: Literal["str", "dict"] = "str",
timeout: float = 30.0,
*args, *args,
**kwargs, **kwargs,
) -> Dict[Any, Any]: ) -> Dict[Any, Any]:
@ -72,13 +74,15 @@ async def _execute_mcp_tool(
raise ValueError(f"Invalid output type: {output_type}") raise ValueError(f"Invalid output type: {output_type}")
def execute_mcp_tool( async def execute_mcp_tool(
url: str, url: str,
tool_name: str = None, tool_name: str = None,
method: Literal["stdio", "sse"] = "sse", method: Literal["stdio", "sse"] = "sse",
parameters: Dict[Any, Any] = None, parameters: Dict[Any, Any] = None,
output_type: Literal["str", "dict"] = "str", output_type: Literal["str", "dict"] = "str",
) -> Dict[Any, Any]: ) -> Dict[Any, Any]:
return asyncio.run( return asyncio.run(
_execute_mcp_tool( _execute_mcp_tool(
url=url, url=url,

@ -1,29 +1,17 @@
from __future__ import annotations from __future__ import annotations
from typing import Any, List
from loguru import logger
import abc import abc
import asyncio import asyncio
from contextlib import AbstractAsyncContextManager, AsyncExitStack from contextlib import AbstractAsyncContextManager, AsyncExitStack
from pathlib import Path from pathlib import Path
from typing import Literal from typing import Any, Dict, List, Optional, Literal, Union
from typing_extensions import NotRequired, TypedDict
from anyio.streams.memory import (
MemoryObjectReceiveStream, from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
MemoryObjectSendStream, from loguru import logger
) from mcp import ClientSession, StdioServerParameters, Tool as MCPTool, stdio_client
from mcp import (
ClientSession,
StdioServerParameters,
Tool as MCPTool,
stdio_client,
)
from mcp.client.sse import sse_client from mcp.client.sse import sse_client
from mcp.types import CallToolResult, JSONRPCMessage from mcp.types import CallToolResult, JSONRPCMessage
from typing_extensions import NotRequired, TypedDict
from swarms.utils.any_to_str import any_to_str from swarms.utils.any_to_str import any_to_str
@ -32,94 +20,69 @@ class MCPServer(abc.ABC):
"""Base class for Model Context Protocol servers.""" """Base class for Model Context Protocol servers."""
@abc.abstractmethod @abc.abstractmethod
async def connect(self): async def connect(self) -> None:
"""Connect to the server. For example, this might mean spawning a subprocess or """Establish connection to the MCP server."""
opening a network connection. The server is expected to remain connected until
`cleanup()` is called.
"""
pass pass
@property @property
@abc.abstractmethod @abc.abstractmethod
def name(self) -> str: def name(self) -> str:
"""A readable name for the server.""" """Human-readable server name."""
pass pass
@abc.abstractmethod @abc.abstractmethod
async def cleanup(self): async def cleanup(self) -> None:
"""Cleanup the server. For example, this might mean closing a subprocess or """Clean up resources and close connection."""
closing a network connection.
"""
pass pass
@abc.abstractmethod @abc.abstractmethod
async def list_tools(self) -> list[MCPTool]: async def list_tools(self) -> List[MCPTool]:
"""List the tools available on the server.""" """List available MCP tools on the server."""
pass pass
@abc.abstractmethod @abc.abstractmethod
async def call_tool( async def call_tool(self, tool_name: str,
self, tool_name: str, arguments: dict[str, Any] | None arguments: Dict[str, Any] | None) -> CallToolResult:
) -> CallToolResult: """Invoke a tool by name with provided arguments."""
"""Invoke a tool on the server."""
pass pass
class _MCPServerWithClientSession(MCPServer, abc.ABC): class _MCPServerWithClientSession(MCPServer, abc.ABC):
"""Base class for MCP servers that use a `ClientSession` to communicate with the server.""" """Mixin providing ClientSession-based MCP communication."""
def __init__(self, cache_tools_list: bool): def __init__(self, cache_tools_list: bool = False):
""" self.session: Optional[ClientSession] = None
Args:
cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be
cached and only fetched from the server once. If `False`, the tools list will be
fetched from the server on each call to `list_tools()`. The cache can be invalidated
by calling `invalidate_tools_cache()`. You should set this to `True` if you know the
server will not change its tools list, because it can drastically improve latency
(by avoiding a round-trip to the server every time).
"""
self.session: ClientSession | None = None
self.exit_stack: AsyncExitStack = AsyncExitStack() self.exit_stack: AsyncExitStack = AsyncExitStack()
self._cleanup_lock: asyncio.Lock = asyncio.Lock() self._cleanup_lock = asyncio.Lock()
self.cache_tools_list = cache_tools_list self.cache_tools_list = cache_tools_list
# The cache is always dirty at startup, so that we fetch tools at least once
self._cache_dirty = True self._cache_dirty = True
self._tools_list: list[MCPTool] | None = None self._tools_list: Optional[List[MCPTool]] = None
@abc.abstractmethod @abc.abstractmethod
def create_streams( def create_streams(
self, self
) -> AbstractAsyncContextManager[ ) -> AbstractAsyncContextManager[tuple[
tuple[
MemoryObjectReceiveStream[JSONRPCMessage | Exception], MemoryObjectReceiveStream[JSONRPCMessage | Exception],
MemoryObjectSendStream[JSONRPCMessage], MemoryObjectSendStream[JSONRPCMessage],
] ]]:
]: """Supply the read/write streams for the MCP transport."""
"""Create the streams for the server."""
pass pass
async def __aenter__(self): async def __aenter__(self) -> MCPServer:
await self.connect() await self.connect()
return self return self # type: ignore
async def __aexit__(self, exc_type, exc_value, traceback): async def __aexit__(self, exc_type, exc_value, tb) -> None:
await self.cleanup() await self.cleanup()
def invalidate_tools_cache(self): async def connect(self) -> None:
"""Invalidate the tools cache.""" """Initialize transport and ClientSession."""
self._cache_dirty = True
async def connect(self):
"""Connect to the server."""
try: try:
transport = await self.exit_stack.enter_async_context( transport = await self.exit_stack.enter_async_context(
self.create_streams() self.create_streams())
)
read, write = transport read, write = transport
session = await self.exit_stack.enter_async_context( session = await self.exit_stack.enter_async_context(
ClientSession(read, write) ClientSession(read, write))
)
await session.initialize() await session.initialize()
self.session = session self.session = session
except Exception as e: except Exception as e:
@ -127,266 +90,222 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
await self.cleanup() await self.cleanup()
raise raise
async def list_tools(self) -> list[MCPTool]: async def cleanup(self) -> None:
"""List the tools available on the server.""" """Close session and transport."""
if not self.session: async with self._cleanup_lock:
raise Exception( try:
"Server not initialized. Make sure you call `connect()` first." await self.exit_stack.aclose()
) except Exception as e:
logger.error(f"Error during cleanup: {e}")
finally:
self.session = None
# Return from cache if caching is enabled, we have tools, and the cache is not dirty async def list_tools(self) -> List[MCPTool]:
if ( if not self.session:
self.cache_tools_list raise RuntimeError("Server not connected. Call connect() first.")
and not self._cache_dirty if self.cache_tools_list and not self._cache_dirty and self._tools_list:
and self._tools_list
):
return self._tools_list return self._tools_list
# Reset the cache dirty to False
self._cache_dirty = False self._cache_dirty = False
# Fetch the tools from the server
self._tools_list = (await self.session.list_tools()).tools self._tools_list = (await self.session.list_tools()).tools
return self._tools_list return self._tools_list # type: ignore
async def call_tool( async def call_tool(
self, arguments: dict[str, Any] | None self,
) -> CallToolResult: tool_name: str | None = None,
"""Invoke a tool on the server.""" arguments: Dict[str, Any] | None = None) -> CallToolResult:
tool_name = arguments.get("tool_name") or arguments.get( if not arguments:
"name" raise ValueError("Arguments dict is required to call a tool")
) name = tool_name or arguments.get("tool_name") or arguments.get("name")
if not name:
if not tool_name: raise ValueError("Tool name missing in arguments")
raise Exception("No tool name found in arguments")
if not self.session: if not self.session:
raise Exception( raise RuntimeError("Server not connected. Call connect() first.")
"Server not initialized. Make sure you call `connect()` first." return await self.session.call_tool(name, arguments)
)
return await self.session.call_tool(tool_name, arguments)
async def cleanup(self):
"""Cleanup the server."""
async with self._cleanup_lock:
try:
await self.exit_stack.aclose()
self.session = None
except Exception as e:
logger.error(f"Error cleaning up server: {e}")
class MCPServerStdioParams(TypedDict): class MCPServerStdioParams(TypedDict):
"""Mirrors `mcp.client.stdio.StdioServerParameters`, but lets you pass params without another """Configuration for stdio transport."""
import.
"""
command: str command: str
"""The executable to run to start the server. For example, `python` or `node`.""" args: NotRequired[List[str]]
env: NotRequired[Dict[str, str]]
args: NotRequired[list[str]]
"""Command line args to pass to the `command` executable. For example, `['foo.py']` or
`['server.js', '--port', '8080']`."""
env: NotRequired[dict[str, str]]
"""The environment variables to set for the server. ."""
cwd: NotRequired[str | Path] cwd: NotRequired[str | Path]
"""The working directory to use when spawning the process."""
encoding: NotRequired[str] encoding: NotRequired[str]
"""The text encoding used when sending/receiving messages to the server. Defaults to `utf-8`.""" encoding_error_handler: NotRequired[Literal["strict", "ignore", "replace"]]
encoding_error_handler: NotRequired[
Literal["strict", "ignore", "replace"]
]
"""The text encoding error handler. Defaults to `strict`.
See https://docs.python.org/3/library/codecs.html#codec-base-classes for
explanations of possible values.
"""
class MCPServerStdio(_MCPServerWithClientSession): class MCPServerStdio(_MCPServerWithClientSession):
"""MCP server implementation that uses the stdio transport. See the [spec] """MCP server over stdio transport."""
(https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#stdio) for
details.
"""
def __init__( def __init__(
self, self,
params: MCPServerStdioParams, params: MCPServerStdioParams,
cache_tools_list: bool = False, cache_tools_list: bool = False,
name: str | None = None, name: Optional[str] = None,
): ):
"""Create a new MCP server based on the stdio transport.
Args:
params: The params that configure the server. This includes the command to run to
start the server, the args to pass to the command, the environment variables to
set for the server, the working directory to use when spawning the process, and
the text encoding used when sending/receiving messages to the server.
cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be
cached and only fetched from the server once. If `False`, the tools list will be
fetched from the server on each call to `list_tools()`. The cache can be
invalidated by calling `invalidate_tools_cache()`. You should set this to `True`
if you know the server will not change its tools list, because it can drastically
improve latency (by avoiding a round-trip to the server every time).
name: A readable name for the server. If not provided, we'll create one from the
command.
"""
super().__init__(cache_tools_list) super().__init__(cache_tools_list)
self.params = StdioServerParameters( self.params = StdioServerParameters(
command=params["command"], command=params["command"],
args=params.get("args", []), args=params.get("args", []),
env=params.get("env"), env=params.get("env"),
cwd=params.get("cwd"), cwd=params.get("cwd"),
encoding=params.get("encoding", "utf-8"), encoding=params.get("encoding", "utf-8"),
encoding_error_handler=params.get( encoding_error_handler=params.get("encoding_error_handler",
"encoding_error_handler", "strict" "strict"),
),
) )
self._name = name or f"stdio:{self.params.command}"
self._name = name or f"stdio: {self.params.command}"
def create_streams( def create_streams(
self, self
) -> AbstractAsyncContextManager[ ) -> AbstractAsyncContextManager[tuple[
tuple[
MemoryObjectReceiveStream[JSONRPCMessage | Exception], MemoryObjectReceiveStream[JSONRPCMessage | Exception],
MemoryObjectSendStream[JSONRPCMessage], MemoryObjectSendStream[JSONRPCMessage],
] ]]:
]:
"""Create the streams for the server."""
return stdio_client(self.params) return stdio_client(self.params)
@property @property
def name(self) -> str: def name(self) -> str:
"""A readable name for the server."""
return self._name return self._name
class MCPServerSseParams(TypedDict): class MCPServerSseParams(TypedDict):
"""Mirrors the params in`mcp.client.sse.sse_client`.""" """Configuration for HTTP+SSE transport."""
url: str url: str
"""The URL of the server.""" headers: NotRequired[Dict[str, str]]
headers: NotRequired[dict[str, str]]
"""The headers to send to the server."""
timeout: NotRequired[float] timeout: NotRequired[float]
"""The timeout for the HTTP request. Defaults to 5 seconds."""
sse_read_timeout: NotRequired[float] sse_read_timeout: NotRequired[float]
"""The timeout for the SSE connection, in seconds. Defaults to 5 minutes."""
class MCPServerSse(_MCPServerWithClientSession): class MCPServerSse(_MCPServerWithClientSession):
"""MCP server implementation that uses the HTTP with SSE transport. See the [spec] """MCP server over HTTP with SSE transport."""
(https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#http-with-sse)
for details.
"""
def __init__( def __init__(
self, self,
params: MCPServerSseParams, params: MCPServerSseParams,
cache_tools_list: bool = False, cache_tools_list: bool = False,
name: str | None = None, name: Optional[str] = None,
): ):
"""Create a new MCP server based on the HTTP with SSE transport.
Args:
params: The params that configure the server. This includes the URL of the server,
the headers to send to the server, the timeout for the HTTP request, and the
timeout for the SSE connection.
cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be
cached and only fetched from the server once. If `False`, the tools list will be
fetched from the server on each call to `list_tools()`. The cache can be
invalidated by calling `invalidate_tools_cache()`. You should set this to `True`
if you know the server will not change its tools list, because it can drastically
improve latency (by avoiding a round-trip to the server every time).
name: A readable name for the server. If not provided, we'll create one from the
URL.
"""
super().__init__(cache_tools_list) super().__init__(cache_tools_list)
self.params = params self.params = params
self._name = name or f"sse: {self.params['url']}" self._name = name or f"sse:{params['url']}"
def create_streams( def create_streams(
self, self
) -> AbstractAsyncContextManager[ ) -> AbstractAsyncContextManager[tuple[
tuple[
MemoryObjectReceiveStream[JSONRPCMessage | Exception], MemoryObjectReceiveStream[JSONRPCMessage | Exception],
MemoryObjectSendStream[JSONRPCMessage], MemoryObjectSendStream[JSONRPCMessage],
] ]]:
]:
"""Create the streams for the server."""
return sse_client( return sse_client(
url=self.params["url"], url=self.params["url"],
headers=self.params.get("headers", None), headers=self.params.get("headers"),
timeout=self.params.get("timeout", 5), timeout=self.params.get("timeout", 5),
sse_read_timeout=self.params.get( sse_read_timeout=self.params.get("sse_read_timeout", 300),
"sse_read_timeout", 60 * 5
),
) )
@property @property
def name(self) -> str: def name(self) -> str:
"""A readable name for the server."""
return self._name return self._name
def mcp_flow_get_tool_schema( async def call_tool_fast(server: MCPServerSse,
params: MCPServerSseParams, payload: Dict[str, Any] | str) -> Any:
) -> MCPServer: """Async function to call a tool on a server with proper cleanup."""
server = MCPServerSse(params, cache_tools_list=True) try:
await server.connect()
arguments = payload if isinstance(payload, dict) else None
result = await server.call_tool(arguments=arguments)
return result
finally:
await server.cleanup()
# Connect the server
asyncio.run(server.connect())
# Return the server async def mcp_flow_get_tool_schema(params: MCPServerSseParams, ) -> Any:
output = asyncio.run(server.list_tools()) """Async function to get tool schema from MCP server."""
async with MCPServerSse(params) as server:
tools = await server.list_tools()
return tools
# Cleanup the server
asyncio.run(server.cleanup())
return output.model_dump() async def mcp_flow(
params: MCPServerSseParams,
function_call: Dict[str, Any] | str,
) -> Any:
"""Async function to call a tool with given parameters."""
async with MCPServerSse(params) as server:
return await call_tool_fast(server, function_call)
def mcp_flow( async def _call_one_server(params: MCPServerSseParams,
params: MCPServerSseParams, payload: Dict[str, Any] | str) -> Any:
function_call: dict[str, Any], """Helper function to call a single MCP server."""
) -> MCPServer: server = MCPServerSse(params)
server = MCPServerSse(params, cache_tools_list=True) try:
await server.connect()
arguments = payload if isinstance(payload, dict) else None
return await server.call_tool(arguments=arguments)
finally:
await server.cleanup()
# Connect the server
asyncio.run(server.connect())
# Return the server async def abatch_mcp_flow(params: List[MCPServerSseParams],
output = asyncio.run(server.call_tool(function_call)) payload: Dict[str, Any] | str) -> List[Any]:
"""Async function to execute a batch of MCP calls concurrently.
output = output.model_dump() Args:
params (List[MCPServerSseParams]): List of MCP server configurations
payload (Dict[str, Any] | str): The payload to send to each server
Returns:
List[Any]: Results from all MCP servers
"""
if not params:
logger.warning("No MCP servers provided for batch operation")
return []
try:
return await asyncio.gather(
*[_call_one_server(p, payload) for p in params])
except Exception as e:
logger.error(f"Error in abatch_mcp_flow: {e}")
# Return partial results if any were successful
return [f"Error in batch operation: {str(e)}"]
# Cleanup the server
asyncio.run(server.cleanup())
return any_to_str(output) def batch_mcp_flow(params: List[MCPServerSseParams],
payload: Dict[str, Any] | str) -> List[Any]:
"""Sync wrapper for batch MCP operations.
This creates a new event loop if needed to run the async batch operation.
ONLY use this when not already in an async context.
def batch_mcp_flow( Args:
params: List[MCPServerSseParams], params (List[MCPServerSseParams]): List of MCP server configurations
function_call: List[dict[str, Any]] = [], payload (Dict[str, Any] | str): The payload to send to each server
) -> MCPServer:
output_list = []
for param in params: Returns:
output = mcp_flow(param, function_call) List[Any]: Results from all MCP servers
output_list.append(output) """
if not params:
logger.warning("No MCP servers provided for batch operation")
return []
return output_list try:
# Check if we're already in an event loop
try:
loop = asyncio.get_event_loop()
except RuntimeError:
# No event loop exists, create one
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if loop.is_running():
# We're already in an async context, can't use asyncio.run
# Use a future to bridge sync-async gap
future = asyncio.run_coroutine_threadsafe(
abatch_mcp_flow(params, payload), loop)
return future.result(timeout=30) # Add timeout to prevent hanging
else:
# We're not in an async context, safe to use loop.run_until_complete
return loop.run_until_complete(abatch_mcp_flow(params, payload))
except Exception as e:
logger.error(f"Error in batch_mcp_flow: {e}")
return [f"Error in batch operation: {str(e)}"]

@ -0,0 +1,23 @@
import unittest
from swarms.structs.agent import Agent
class TestBasicExample(unittest.TestCase):
def setUp(self):
self.agent = Agent(
agent_name="Test-Agent",
agent_description="A test agent",
system_prompt="You are a helpful assistant.",
model_name="gpt-4",
)
def test_agent_initialization(self):
self.assertEqual(self.agent.agent_name, "Test-Agent")
self.assertEqual(self.agent.agent_description, "A test agent")
def test_agent_run(self):
response = self.agent.run("What is 2+2?")
self.assertIsNotNone(response)
if __name__ == "__main__":
unittest.main()

@ -0,0 +1,136 @@
import pytest
import asyncio
from swarms.tools.mcp_integration import (
MCPServer,
MCPServerStdio,
MCPServerSse,
mcp_flow,
mcp_flow_get_tool_schema,
batch_mcp_flow
)
# Test basic server connectivity
def test_server_connection():
"""
Test that a user can connect to the MCP server successfully
"""
params = {"url": "http://localhost:8000"}
server = MCPServerSse(params, cache_tools_list=True)
# Connect should work
asyncio.run(server.connect())
assert server.session is not None
# Cleanup should work
asyncio.run(server.cleanup())
assert server.session is None
# Test tool listing functionality
def test_list_tools():
"""
Test that a user can retrieve available tools from the server
"""
params = {"url": "http://localhost:8000"}
server = MCPServerSse(params)
asyncio.run(server.connect())
tools = asyncio.run(server.list_tools())
assert isinstance(tools, list)
assert len(tools) > 0
asyncio.run(server.cleanup())
# Test tool execution
def test_tool_execution():
"""
Test that a user can execute a tool successfully
"""
params = {"url": "http://localhost:8000"}
function_call = {
"tool_name": "add",
"arguments": {"a": 5, "b": 3}
}
result = mcp_flow(params, function_call)
assert result is not None
# Test batch operations
def test_batch_execution():
"""
Test that a user can execute multiple tools in batch
"""
params_list = [
{"url": "http://localhost:8000"},
{"url": "http://localhost:8000"}
]
function_calls = [
{"tool_name": "add", "arguments": {"a": 1, "b": 2}},
{"tool_name": "subtract", "arguments": {"a": 5, "b": 3}}
]
results = batch_mcp_flow(params_list, function_calls)
assert len(results) == 2
assert all(result is not None for result in results)
# Test error handling
def test_error_handling():
"""
Test that users receive proper error messages for invalid operations
"""
params = {"url": "http://localhost:8000"}
invalid_function = {
"tool_name": "nonexistent_tool",
"arguments": {}
}
with pytest.raises(Exception):
mcp_flow(params, invalid_function)
# Test tool schema retrieval
def test_get_tool_schema():
"""
Test that users can retrieve tool schemas correctly
"""
params = {"url": "http://localhost:8000"}
schema = mcp_flow_get_tool_schema(params)
assert isinstance(schema, dict)
assert "tools" in schema or "functions" in schema
# Test server reconnection
def test_server_reconnection():
"""
Test that users can reconnect to the server after disconnection
"""
params = {"url": "http://localhost:8000"}
server = MCPServerSse(params)
# First connection
asyncio.run(server.connect())
asyncio.run(server.cleanup())
# Second connection should work
asyncio.run(server.connect())
assert server.session is not None
asyncio.run(server.cleanup())
# Test cache functionality
def test_cache_behavior():
"""
Test that tool caching works as expected for users
"""
params = {"url": "http://localhost:8000"}
server = MCPServerSse(params, cache_tools_list=True)
asyncio.run(server.connect())
# First call should cache
tools1 = asyncio.run(server.list_tools())
# Second call should use cache
tools2 = asyncio.run(server.list_tools())
assert tools1 == tools2
asyncio.run(server.cleanup())
Loading…
Cancel
Save