parent
8c9d9c673b
commit
3dd1855907
@ -0,0 +1,167 @@
|
||||
from time import perf_counter_ns
|
||||
import psutil
|
||||
import os
|
||||
from rich.panel import Panel
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
from statistics import mean, median, stdev, variance
|
||||
from swarms.structs.agent import Agent
|
||||
from swarms.prompts.finance_agent_sys_prompt import (
|
||||
FINANCIAL_AGENT_SYS_PROMPT,
|
||||
)
|
||||
|
||||
|
||||
def get_memory_stats(memory_readings):
|
||||
"""Calculate memory statistics"""
|
||||
return {
|
||||
"peak": max(memory_readings),
|
||||
"min": min(memory_readings),
|
||||
"mean": mean(memory_readings),
|
||||
"median": median(memory_readings),
|
||||
"stdev": (
|
||||
stdev(memory_readings) if len(memory_readings) > 1 else 0
|
||||
),
|
||||
"variance": (
|
||||
variance(memory_readings)
|
||||
if len(memory_readings) > 1
|
||||
else 0
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def get_time_stats(times):
|
||||
"""Calculate time statistics"""
|
||||
return {
|
||||
"total": sum(times),
|
||||
"mean": mean(times),
|
||||
"median": median(times),
|
||||
"min": min(times),
|
||||
"max": max(times),
|
||||
"stdev": stdev(times) if len(times) > 1 else 0,
|
||||
"variance": variance(times) if len(times) > 1 else 0,
|
||||
}
|
||||
|
||||
|
||||
def benchmark_multiple_agents(num_agents=100):
|
||||
console = Console()
|
||||
init_times = []
|
||||
memory_readings = []
|
||||
process = psutil.Process(os.getpid())
|
||||
|
||||
# Create benchmark tables
|
||||
time_table = Table(title="Time Statistics")
|
||||
time_table.add_column("Metric", style="cyan")
|
||||
time_table.add_column("Value", style="green")
|
||||
|
||||
memory_table = Table(title="Memory Statistics")
|
||||
memory_table.add_column("Metric", style="cyan")
|
||||
memory_table.add_column("Value", style="green")
|
||||
|
||||
initial_memory = process.memory_info().rss / 1024
|
||||
start_total_time = perf_counter_ns()
|
||||
|
||||
# Initialize agents and measure performance
|
||||
for i in range(num_agents):
|
||||
start_time = perf_counter_ns()
|
||||
|
||||
Agent(
|
||||
agent_name=f"Financial-Analysis-Agent-{i}",
|
||||
agent_description="Personal finance advisor agent",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
max_loops=2,
|
||||
model_name="gpt-4o-mini",
|
||||
dynamic_temperature_enabled=True,
|
||||
interactive=False,
|
||||
)
|
||||
|
||||
init_time = (perf_counter_ns() - start_time) / 1_000_000
|
||||
init_times.append(init_time)
|
||||
|
||||
current_memory = process.memory_info().rss / 1024
|
||||
memory_readings.append(current_memory - initial_memory)
|
||||
|
||||
if (i + 1) % 10 == 0:
|
||||
console.print(
|
||||
f"Created {i + 1} agents...", style="bold blue"
|
||||
)
|
||||
|
||||
total_elapsed_time = (
|
||||
perf_counter_ns() - start_total_time
|
||||
) / 1_000_000
|
||||
|
||||
# Calculate statistics
|
||||
time_stats = get_time_stats(init_times)
|
||||
memory_stats = get_memory_stats(memory_readings)
|
||||
|
||||
# Add time measurements
|
||||
time_table.add_row(
|
||||
"Total Wall Time", f"{total_elapsed_time:.2f} ms"
|
||||
)
|
||||
time_table.add_row(
|
||||
"Total Init Time", f"{time_stats['total']:.2f} ms"
|
||||
)
|
||||
time_table.add_row(
|
||||
"Average Init Time", f"{time_stats['mean']:.2f} ms"
|
||||
)
|
||||
time_table.add_row(
|
||||
"Median Init Time", f"{time_stats['median']:.2f} ms"
|
||||
)
|
||||
time_table.add_row("Fastest Init", f"{time_stats['min']:.2f} ms")
|
||||
time_table.add_row("Slowest Init", f"{time_stats['max']:.2f} ms")
|
||||
time_table.add_row(
|
||||
"Std Deviation", f"{time_stats['stdev']:.2f} ms"
|
||||
)
|
||||
time_table.add_row(
|
||||
"Variance", f"{time_stats['variance']:.4f} ms²"
|
||||
)
|
||||
time_table.add_row(
|
||||
"Throughput",
|
||||
f"{(num_agents/total_elapsed_time) * 1000:.2f} agents/second",
|
||||
)
|
||||
|
||||
# Add memory measurements
|
||||
memory_table.add_row(
|
||||
"Peak Memory Usage", f"{memory_stats['peak']:.2f} KB"
|
||||
)
|
||||
memory_table.add_row(
|
||||
"Minimum Memory Usage", f"{memory_stats['min']:.2f} KB"
|
||||
)
|
||||
memory_table.add_row(
|
||||
"Average Memory Usage", f"{memory_stats['mean']:.2f} KB"
|
||||
)
|
||||
memory_table.add_row(
|
||||
"Median Memory Usage", f"{memory_stats['median']:.2f} KB"
|
||||
)
|
||||
memory_table.add_row(
|
||||
"Memory Std Deviation", f"{memory_stats['stdev']:.2f} KB"
|
||||
)
|
||||
memory_table.add_row(
|
||||
"Memory Variance", f"{memory_stats['variance']:.2f} KB²"
|
||||
)
|
||||
memory_table.add_row(
|
||||
"Avg Memory Per Agent",
|
||||
f"{memory_stats['mean']/num_agents:.2f} KB",
|
||||
)
|
||||
|
||||
# Create and display panels
|
||||
time_panel = Panel(
|
||||
time_table,
|
||||
title="Time Benchmark Results",
|
||||
border_style="blue",
|
||||
padding=(1, 2),
|
||||
)
|
||||
|
||||
memory_panel = Panel(
|
||||
memory_table,
|
||||
title="Memory Benchmark Results",
|
||||
border_style="green",
|
||||
padding=(1, 2),
|
||||
)
|
||||
|
||||
console.print(time_panel)
|
||||
console.print("\n")
|
||||
console.print(memory_panel)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
benchmark_multiple_agents(100)
|
@ -0,0 +1,371 @@
|
||||
# Swarms API with Tools Guide
|
||||
|
||||
|
||||
Swarms API allows you to create and manage AI agent swarms with optional tool integration. This guide will walk you through setting up and using the Swarms API with tools.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Python 3.7+
|
||||
- Swarms API key
|
||||
- Required Python packages:
|
||||
- `requests`
|
||||
- `python-dotenv`
|
||||
|
||||
## Installation & Setup
|
||||
|
||||
1. Install required packages:
|
||||
|
||||
```bash
|
||||
pip install requests python-dotenv
|
||||
```
|
||||
|
||||
2. Create a `.env` file in your project root:
|
||||
|
||||
```bash
|
||||
SWARMS_API_KEY=your_api_key_here
|
||||
```
|
||||
|
||||
3. Basic setup code:
|
||||
|
||||
```python
|
||||
import os
|
||||
import requests
|
||||
from dotenv import load_dotenv
|
||||
import json
|
||||
|
||||
load_dotenv()
|
||||
|
||||
API_KEY = os.getenv("SWARMS_API_KEY")
|
||||
BASE_URL = "https://api.swarms.world"
|
||||
|
||||
headers = {"x-api-key": API_KEY, "Content-Type": "application/json"}
|
||||
```
|
||||
|
||||
## Creating a Swarm with Tools
|
||||
|
||||
### Step-by-Step Guide
|
||||
|
||||
1. Define your tool dictionary:
|
||||
```python
|
||||
tool_dictionary = {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "search_topic",
|
||||
"description": "Conduct an in-depth search on a specified topic",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"depth": {
|
||||
"type": "integer",
|
||||
"description": "Search depth (1-3)"
|
||||
},
|
||||
"detailed_queries": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "Specific search queries"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["depth", "detailed_queries"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
2. Create agent configurations:
|
||||
```python
|
||||
agent_config = {
|
||||
"agent_name": "Market Analyst",
|
||||
"description": "Analyzes market trends",
|
||||
"system_prompt": "You are a financial analyst expert.",
|
||||
"model_name": "openai/gpt-4",
|
||||
"role": "worker",
|
||||
"max_loops": 1,
|
||||
"max_tokens": 8192,
|
||||
"temperature": 0.5,
|
||||
"auto_generate_prompt": False,
|
||||
"tools_dictionary": [tool_dictionary] # Optional: Add tools if needed
|
||||
}
|
||||
```
|
||||
|
||||
3. Create the swarm payload:
|
||||
```python
|
||||
payload = {
|
||||
"name": "Your Swarm Name",
|
||||
"description": "Swarm description",
|
||||
"agents": [agent_config],
|
||||
"max_loops": 1,
|
||||
"swarm_type": "ConcurrentWorkflow",
|
||||
"task": "Your task description",
|
||||
"output_type": "dict"
|
||||
}
|
||||
```
|
||||
|
||||
4. Make the API request:
|
||||
```python
|
||||
def run_swarm(payload):
|
||||
response = requests.post(
|
||||
f"{BASE_URL}/v1/swarm/completions",
|
||||
headers=headers,
|
||||
json=payload
|
||||
)
|
||||
return response.json()
|
||||
```
|
||||
|
||||
## FAQ
|
||||
|
||||
### Do all agents need tools?
|
||||
No, tools are optional for each agent. You can choose which agents have tools based on your specific needs. Simply omit the `tools_dictionary` field for agents that don't require tools.
|
||||
|
||||
### What types of tools can I use?
|
||||
Currently, the API supports function-type tools. Each tool must have:
|
||||
- A unique name
|
||||
- A clear description
|
||||
- Well-defined parameters with types and descriptions
|
||||
|
||||
### Can I mix agents with and without tools?
|
||||
Yes, you can create swarms with a mix of tool-enabled and regular agents. This allows for flexible swarm architectures.
|
||||
|
||||
### What's the recommended number of tools per agent?
|
||||
While there's no strict limit, it's recommended to:
|
||||
- Keep tools focused and specific
|
||||
- Only include tools that the agent needs
|
||||
- Consider the complexity of tool interactions
|
||||
|
||||
## Example Implementation
|
||||
|
||||
Here's a complete example of a financial analysis swarm:
|
||||
|
||||
```python
|
||||
def run_financial_analysis_swarm():
|
||||
payload = {
|
||||
"name": "Financial Analysis Swarm",
|
||||
"description": "Market analysis swarm",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "Market Analyst",
|
||||
"description": "Analyzes market trends",
|
||||
"system_prompt": "You are a financial analyst expert.",
|
||||
"model_name": "openai/gpt-4",
|
||||
"role": "worker",
|
||||
"max_loops": 1,
|
||||
"max_tokens": 8192,
|
||||
"temperature": 0.5,
|
||||
"auto_generate_prompt": False,
|
||||
"tools_dictionary": [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "search_topic",
|
||||
"description": "Conduct market research",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"depth": {
|
||||
"type": "integer",
|
||||
"description": "Search depth (1-3)"
|
||||
},
|
||||
"detailed_queries": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"}
|
||||
}
|
||||
},
|
||||
"required": ["depth", "detailed_queries"]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"max_loops": 1,
|
||||
"swarm_type": "ConcurrentWorkflow",
|
||||
"task": "Analyze top performing tech ETFs",
|
||||
"output_type": "dict"
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"{BASE_URL}/v1/swarm/completions",
|
||||
headers=headers,
|
||||
json=payload
|
||||
)
|
||||
return response.json()
|
||||
```
|
||||
|
||||
## Health Check
|
||||
|
||||
Always verify the API status before running swarms:
|
||||
|
||||
```python
|
||||
def check_api_health():
|
||||
response = requests.get(f"{BASE_URL}/health", headers=headers)
|
||||
return response.json()
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Error Handling**: Always implement proper error handling:
|
||||
```python
|
||||
def safe_run_swarm(payload):
|
||||
try:
|
||||
response = requests.post(
|
||||
f"{BASE_URL}/v1/swarm/completions",
|
||||
headers=headers,
|
||||
json=payload
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"Error running swarm: {e}")
|
||||
return None
|
||||
```
|
||||
|
||||
2. **Environment Variables**: Never hardcode API keys
|
||||
3. **Tool Design**: Keep tools simple and focused
|
||||
4. **Testing**: Validate swarm configurations before production use
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Common issues and solutions:
|
||||
|
||||
1. **API Key Issues**
|
||||
- Verify key is correctly set in `.env`
|
||||
- Check key permissions
|
||||
|
||||
2. **Tool Execution Errors**
|
||||
- Validate tool parameters
|
||||
- Check tool function signatures
|
||||
|
||||
3. **Response Timeout**
|
||||
- Consider reducing max_tokens
|
||||
- Simplify tool complexity
|
||||
|
||||
|
||||
|
||||
```python
|
||||
import os
|
||||
import requests
|
||||
from dotenv import load_dotenv
|
||||
import json
|
||||
|
||||
load_dotenv()
|
||||
|
||||
API_KEY = os.getenv("SWARMS_API_KEY")
|
||||
BASE_URL = "https://api.swarms.world"
|
||||
|
||||
headers = {"x-api-key": API_KEY, "Content-Type": "application/json"}
|
||||
|
||||
|
||||
def run_health_check():
|
||||
response = requests.get(f"{BASE_URL}/health", headers=headers)
|
||||
return response.json()
|
||||
|
||||
|
||||
def run_single_swarm():
|
||||
payload = {
|
||||
"name": "Financial Analysis Swarm",
|
||||
"description": "Market analysis swarm",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "Market Analyst",
|
||||
"description": "Analyzes market trends",
|
||||
"system_prompt": "You are a financial analyst expert.",
|
||||
"model_name": "openai/gpt-4o",
|
||||
"role": "worker",
|
||||
"max_loops": 1,
|
||||
"max_tokens": 8192,
|
||||
"temperature": 0.5,
|
||||
"auto_generate_prompt": False,
|
||||
"tools_dictionary": [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "search_topic",
|
||||
"description": "Conduct an in-depth search on a specified topic or subtopic, generating a comprehensive array of highly detailed search queries tailored to the input parameters.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"depth": {
|
||||
"type": "integer",
|
||||
"description": "Indicates the level of thoroughness for the search. Values range from 1 to 3, where 1 represents a superficial search and 3 signifies an exploration of the topic.",
|
||||
},
|
||||
"detailed_queries": {
|
||||
"type": "array",
|
||||
"description": "An array of highly specific search queries that are generated based on the input query and the specified depth. Each query should be designed to elicit detailed and relevant information from various sources.",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "Each item in this array should represent a unique search query that targets a specific aspect of the main topic, ensuring a comprehensive exploration of the subject matter.",
|
||||
},
|
||||
},
|
||||
},
|
||||
"required": ["depth", "detailed_queries"],
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"agent_name": "Economic Forecaster",
|
||||
"description": "Predicts economic trends",
|
||||
"system_prompt": "You are an expert in economic forecasting.",
|
||||
"model_name": "gpt-4o",
|
||||
"role": "worker",
|
||||
"max_loops": 1,
|
||||
"max_tokens": 8192,
|
||||
"temperature": 0.5,
|
||||
"auto_generate_prompt": False,
|
||||
"tools_dictionary": [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "search_topic",
|
||||
"description": "Conduct an in-depth search on a specified topic or subtopic, generating a comprehensive array of highly detailed search queries tailored to the input parameters.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"depth": {
|
||||
"type": "integer",
|
||||
"description": "Indicates the level of thoroughness for the search. Values range from 1 to 3, where 1 represents a superficial search and 3 signifies an exploration of the topic.",
|
||||
},
|
||||
"detailed_queries": {
|
||||
"type": "array",
|
||||
"description": "An array of highly specific search queries that are generated based on the input query and the specified depth. Each query should be designed to elicit detailed and relevant information from various sources.",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "Each item in this array should represent a unique search query that targets a specific aspect of the main topic, ensuring a comprehensive exploration of the subject matter.",
|
||||
},
|
||||
},
|
||||
},
|
||||
"required": ["depth", "detailed_queries"],
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
"max_loops": 1,
|
||||
"swarm_type": "ConcurrentWorkflow",
|
||||
"task": "What are the best etfs and index funds for ai and tech?",
|
||||
"output_type": "dict",
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"{BASE_URL}/v1/swarm/completions",
|
||||
headers=headers,
|
||||
json=payload,
|
||||
)
|
||||
|
||||
print(response)
|
||||
print(response.status_code)
|
||||
# return response.json()
|
||||
output = response.json()
|
||||
|
||||
return json.dumps(output, indent=4)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = run_single_swarm()
|
||||
print("Swarm Result:")
|
||||
print(result)
|
||||
|
||||
```
|
@ -0,0 +1,10 @@
|
||||
from swarms.tools.mcp_client import (
|
||||
list_tools_for_multiple_urls,
|
||||
)
|
||||
|
||||
|
||||
print(
|
||||
list_tools_for_multiple_urls(
|
||||
["http://0.0.0.0:8000/sse"], output_type="json"
|
||||
)
|
||||
)
|
@ -1,90 +1,237 @@
|
||||
import asyncio
|
||||
from typing import Literal, Dict, Any, Union
|
||||
import json
|
||||
from typing import List, Literal, Dict, Any, Union
|
||||
from fastmcp import Client
|
||||
from swarms.utils.any_to_str import any_to_str
|
||||
from swarms.utils.str_to_dict import str_to_dict
|
||||
|
||||
|
||||
def parse_agent_output(
|
||||
dictionary: Union[str, Dict[Any, Any]]
|
||||
) -> tuple[str, Dict[Any, Any]]:
|
||||
if isinstance(dictionary, str):
|
||||
dictionary = str_to_dict(dictionary)
|
||||
"""
|
||||
Parse agent output into tool name and parameters.
|
||||
|
||||
elif not isinstance(dictionary, dict):
|
||||
raise ValueError("Invalid dictionary")
|
||||
Args:
|
||||
dictionary: Either a string or dictionary containing tool information.
|
||||
If string, it will be converted to a dictionary.
|
||||
Must contain a 'name' key for the tool name.
|
||||
|
||||
# Handle OpenAI function call format
|
||||
if "function_call" in dictionary:
|
||||
name = dictionary["function_call"]["name"]
|
||||
# arguments is a JSON string, so we need to parse it
|
||||
params = str_to_dict(dictionary["function_call"]["arguments"])
|
||||
return name, params
|
||||
Returns:
|
||||
tuple[str, Dict[Any, Any]]: A tuple containing the tool name and its parameters.
|
||||
|
||||
# Handle OpenAI tool calls format
|
||||
if "tool_calls" in dictionary:
|
||||
# Get the first tool call (or you could handle multiple if needed)
|
||||
tool_call = dictionary["tool_calls"][0]
|
||||
name = tool_call["function"]["name"]
|
||||
params = str_to_dict(tool_call["function"]["arguments"])
|
||||
return name, params
|
||||
Raises:
|
||||
ValueError: If the input is invalid or missing required 'name' key.
|
||||
"""
|
||||
try:
|
||||
if isinstance(dictionary, str):
|
||||
dictionary = str_to_dict(dictionary)
|
||||
|
||||
# Handle regular dictionary format
|
||||
if "name" in dictionary:
|
||||
name = dictionary["name"]
|
||||
params = dictionary.get("arguments", {})
|
||||
return name, params
|
||||
elif not isinstance(dictionary, dict):
|
||||
raise ValueError("Invalid dictionary")
|
||||
|
||||
raise ValueError("Invalid function call format")
|
||||
# Handle regular dictionary format
|
||||
if "name" in dictionary:
|
||||
name = dictionary["name"]
|
||||
# Remove the name key and use remaining key-value pairs as parameters
|
||||
params = dict(dictionary)
|
||||
params.pop("name")
|
||||
return name, params
|
||||
|
||||
raise ValueError("Invalid function call format")
|
||||
except Exception as e:
|
||||
raise ValueError(f"Error parsing agent output: {str(e)}")
|
||||
|
||||
|
||||
async def _list_all(url: str):
|
||||
"""
|
||||
Asynchronously list all tools available on a given MCP server.
|
||||
|
||||
Args:
|
||||
url: The URL of the MCP server to query.
|
||||
|
||||
Returns:
|
||||
List of available tools.
|
||||
|
||||
Raises:
|
||||
ValueError: If there's an error connecting to or querying the server.
|
||||
"""
|
||||
try:
|
||||
async with Client(url) as client:
|
||||
return await client.list_tools()
|
||||
except Exception as e:
|
||||
raise ValueError(f"Error listing tools: {str(e)}")
|
||||
|
||||
|
||||
def list_all(url: str, output_type: Literal["str", "json"] = "json"):
|
||||
"""
|
||||
Synchronously list all tools available on a given MCP server.
|
||||
|
||||
Args:
|
||||
url: The URL of the MCP server to query.
|
||||
|
||||
Returns:
|
||||
List of dictionaries containing tool information.
|
||||
|
||||
Raises:
|
||||
ValueError: If there's an error connecting to or querying the server.
|
||||
"""
|
||||
try:
|
||||
out = asyncio.run(_list_all(url))
|
||||
|
||||
outputs = []
|
||||
for tool in out:
|
||||
outputs.append(tool.model_dump())
|
||||
|
||||
if output_type == "json":
|
||||
return json.dumps(outputs, indent=4)
|
||||
else:
|
||||
return outputs
|
||||
except Exception as e:
|
||||
raise ValueError(f"Error in list_all: {str(e)}")
|
||||
|
||||
|
||||
def list_tools_for_multiple_urls(
|
||||
urls: List[str], output_type: Literal["str", "json"] = "json"
|
||||
):
|
||||
"""
|
||||
List tools available across multiple MCP servers.
|
||||
|
||||
Args:
|
||||
urls: List of MCP server URLs to query.
|
||||
output_type: Format of the output, either "json" (string) or "str" (list).
|
||||
|
||||
Returns:
|
||||
If output_type is "json": JSON string containing all tools with server URLs.
|
||||
If output_type is "str": List of tools with server URLs.
|
||||
|
||||
Raises:
|
||||
ValueError: If there's an error querying any of the servers.
|
||||
"""
|
||||
try:
|
||||
out = []
|
||||
for url in urls:
|
||||
tools = list_all(url)
|
||||
# Add server URL to each tool's data
|
||||
for tool in tools:
|
||||
tool["server_url"] = url
|
||||
out.append(tools)
|
||||
|
||||
if output_type == "json":
|
||||
return json.dumps(out, indent=4)
|
||||
else:
|
||||
return out
|
||||
except Exception as e:
|
||||
raise ValueError(
|
||||
f"Error listing tools for multiple URLs: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
async def _execute_mcp_tool(
|
||||
url: str,
|
||||
method: Literal["stdio", "sse"] = "sse",
|
||||
parameters: Dict[Any, Any] = None,
|
||||
output_type: Literal["str", "dict"] = "str",
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> Dict[Any, Any]:
|
||||
"""
|
||||
Asynchronously execute a tool on an MCP server.
|
||||
|
||||
if "sse" or "stdio" not in url:
|
||||
raise ValueError("Invalid URL")
|
||||
Args:
|
||||
url: The URL of the MCP server.
|
||||
parameters: Dictionary containing tool name and parameters.
|
||||
*args: Additional positional arguments for the Client.
|
||||
**kwargs: Additional keyword arguments for the Client.
|
||||
|
||||
url = f"{url}/{method}"
|
||||
Returns:
|
||||
Dictionary containing the tool execution results.
|
||||
|
||||
name, params = parse_agent_output(parameters)
|
||||
Raises:
|
||||
ValueError: If the URL is invalid or tool execution fails.
|
||||
"""
|
||||
try:
|
||||
|
||||
name, params = parse_agent_output(parameters)
|
||||
|
||||
outputs = []
|
||||
|
||||
if output_type == "str":
|
||||
async with Client(url, *args, **kwargs) as client:
|
||||
out = await client.call_tool(
|
||||
name=name,
|
||||
arguments=params,
|
||||
)
|
||||
return any_to_str(out)
|
||||
elif output_type == "dict":
|
||||
async with Client(url, *args, **kwargs) as client:
|
||||
out = await client.call_tool(
|
||||
name=name,
|
||||
arguments=params,
|
||||
)
|
||||
return out
|
||||
else:
|
||||
raise ValueError(f"Invalid output type: {output_type}")
|
||||
|
||||
for output in out:
|
||||
outputs.append(output.model_dump())
|
||||
|
||||
# convert outputs to string
|
||||
return json.dumps(outputs, indent=4)
|
||||
except Exception as e:
|
||||
raise ValueError(f"Error executing MCP tool: {str(e)}")
|
||||
|
||||
|
||||
def execute_mcp_tool(
|
||||
url: str,
|
||||
tool_name: str = None,
|
||||
method: Literal["stdio", "sse"] = "sse",
|
||||
parameters: Dict[Any, Any] = None,
|
||||
output_type: Literal["str", "dict"] = "str",
|
||||
) -> Dict[Any, Any]:
|
||||
return asyncio.run(
|
||||
_execute_mcp_tool(
|
||||
url=url,
|
||||
tool_name=tool_name,
|
||||
method=method,
|
||||
parameters=parameters,
|
||||
output_type=output_type,
|
||||
"""
|
||||
Synchronously execute a tool on an MCP server.
|
||||
|
||||
Args:
|
||||
url: The URL of the MCP server.
|
||||
parameters: Dictionary containing tool name and parameters.
|
||||
|
||||
Returns:
|
||||
Dictionary containing the tool execution results.
|
||||
|
||||
Raises:
|
||||
ValueError: If tool execution fails.
|
||||
"""
|
||||
try:
|
||||
return asyncio.run(
|
||||
_execute_mcp_tool(
|
||||
url=url,
|
||||
parameters=parameters,
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
raise ValueError(f"Error in execute_mcp_tool: {str(e)}")
|
||||
|
||||
|
||||
def find_and_execute_tool(
|
||||
urls: List[str], tool_name: str, parameters: Dict[Any, Any]
|
||||
) -> Dict[Any, Any]:
|
||||
"""
|
||||
Find a tool across multiple servers and execute it with the given parameters.
|
||||
|
||||
Args:
|
||||
urls: List of server URLs to search through.
|
||||
tool_name: Name of the tool to find and execute.
|
||||
parameters: Parameters to pass to the tool.
|
||||
|
||||
Returns:
|
||||
Dict containing the tool execution results.
|
||||
|
||||
Raises:
|
||||
ValueError: If tool is not found on any server or execution fails.
|
||||
"""
|
||||
try:
|
||||
# Search for tool across all servers
|
||||
for url in urls:
|
||||
try:
|
||||
tools = list_all(url)
|
||||
# Check if tool exists on this server
|
||||
if any(tool["name"] == tool_name for tool in tools):
|
||||
# Prepare parameters in correct format
|
||||
tool_params = {"name": tool_name, **parameters}
|
||||
# Execute tool on this server
|
||||
return execute_mcp_tool(
|
||||
url=url, parameters=tool_params
|
||||
)
|
||||
except Exception:
|
||||
# Skip servers that fail and continue searching
|
||||
continue
|
||||
|
||||
raise ValueError(
|
||||
f"Tool '{tool_name}' not found on any provided servers"
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
raise ValueError(f"Error in find_and_execute_tool: {str(e)}")
|
||||
|
@ -0,0 +1,8 @@
|
||||
from swarms.tools.mcp_client import execute_mcp_tool
|
||||
|
||||
print(
|
||||
execute_mcp_tool(
|
||||
"http://0.0.0.0:8000/sse",
|
||||
parameters={"name": "add", "a": 1, "b": 2},
|
||||
)
|
||||
)
|
Loading…
Reference in new issue