fix(calculator): update math calculator prompt, enhance error handling and logging

pull/819/head
DP37 3 months ago committed by ascender1729
parent ea66e78154
commit a612352abd

@ -0,0 +1,83 @@
from swarms import Agent
from swarms.tools.mcp_integration import MCPServerSseParams
from loguru import logger
# Comprehensive math prompt that encourages proper JSON formatting
MATH_AGENT_PROMPT = """
You are a helpful math calculator assistant.
Your role is to understand natural language math requests and perform calculations.
When asked to perform calculations:
1. Determine the operation (add, multiply, or divide)
2. Extract the numbers from the request
3. Use the appropriate math operation tool
FORMAT YOUR TOOL CALLS AS JSON with this format:
{"tool_name": "add", "a": <first_number>, "b": <second_number>}
or
{"tool_name": "multiply", "a": <first_number>, "b": <second_number>}
or
{"tool_name": "divide", "a": <first_number>, "b": <second_number>}
Always respond with a tool call in JSON format first, followed by a brief explanation.
"""
def initialize_math_system():
"""Initialize the math agent with MCP server configuration."""
# Configure the MCP server connection
math_server = MCPServerSseParams(
url="http://0.0.0.0:8000",
headers={"Content-Type": "application/json"},
timeout=5.0,
sse_read_timeout=30.0
)
# Create the agent with the MCP server configuration
math_agent = Agent(
agent_name="Math Assistant",
agent_description="Friendly math calculator",
system_prompt=MATH_AGENT_PROMPT,
max_loops=1,
mcp_servers=[math_server], # Pass MCP server config as a list
model_name="gpt-3.5-turbo",
verbose=True # Enable verbose mode to see more details
)
return math_agent
def main():
try:
logger.info("Initializing math system...")
math_agent = initialize_math_system()
print("\nMath Calculator Ready!")
print("Ask me any math question!")
print("Examples: 'what is 5 plus 3?' or 'can you multiply 4 and 6?'")
print("Type 'exit' to quit\n")
while True:
try:
query = input("What would you like to calculate? ").strip()
if not query:
continue
if query.lower() == 'exit':
break
logger.info(f"Processing query: {query}")
result = math_agent.run(query)
print(f"\nResult: {result}\n")
except KeyboardInterrupt:
print("\nGoodbye!")
break
except Exception as e:
logger.error(f"Error processing query: {e}")
print(f"Sorry, there was an error: {str(e)}")
except Exception as e:
logger.error(f"System initialization error: {e}")
print(f"Failed to start the math system: {str(e)}")
if __name__ == "__main__":
main()

@ -1,11 +1,31 @@
from swarms import Agent from swarms import Agent
from swarms.tools.mcp_integration import MCPServerSseParams from swarms.tools.mcp_integration import MCPServerSseParams
from swarms.prompts.agent_prompts import MATH_AGENT_PROMPT
from loguru import logger from loguru import logger
# Comprehensive math prompt that encourages proper JSON formatting
MATH_AGENT_PROMPT = """
You are a helpful math calculator assistant.
Your role is to understand natural language math requests and perform calculations.
When asked to perform calculations:
1. Determine the operation (add, multiply, or divide)
2. Extract the numbers from the request
3. Use the appropriate math operation tool
FORMAT YOUR TOOL CALLS AS JSON with this format:
{"tool_name": "add", "a": <first_number>, "b": <second_number>}
or
{"tool_name": "multiply", "a": <first_number>, "b": <second_number>}
or
{"tool_name": "divide", "a": <first_number>, "b": <second_number>}
Always respond with a tool call in JSON format first, followed by a brief explanation.
"""
def initialize_math_system(): def initialize_math_system():
"""Initialize the math agent with MCP server configuration.""" """Initialize the math agent with MCP server configuration."""
# Configure the MCP server connection
math_server = MCPServerSseParams( math_server = MCPServerSseParams(
url="http://0.0.0.0:8000", url="http://0.0.0.0:8000",
headers={"Content-Type": "application/json"}, headers={"Content-Type": "application/json"},
@ -13,18 +33,22 @@ def initialize_math_system():
sse_read_timeout=30.0 sse_read_timeout=30.0
) )
# Create the agent with the MCP server configuration
math_agent = Agent( math_agent = Agent(
agent_name="Math Assistant", agent_name="Math Assistant",
agent_description="Friendly math calculator", agent_description="Friendly math calculator",
system_prompt=MATH_AGENT_PROMPT, system_prompt=MATH_AGENT_PROMPT,
max_loops=1, max_loops=1,
mcp_servers=[math_server], mcp_servers=[math_server], # Pass MCP server config as a list
model_name="gpt-3.5-turbo" model_name="gpt-3.5-turbo",
verbose=True # Enable verbose mode to see more details
) )
return math_agent return math_agent
def main(): def main():
try:
logger.info("Initializing math system...")
math_agent = initialize_math_system() math_agent = initialize_math_system()
print("\nMath Calculator Ready!") print("\nMath Calculator Ready!")
@ -40,6 +64,7 @@ def main():
if query.lower() == 'exit': if query.lower() == 'exit':
break break
logger.info(f"Processing query: {query}")
result = math_agent.run(query) result = math_agent.run(query)
print(f"\nResult: {result}\n") print(f"\nResult: {result}\n")
@ -47,7 +72,12 @@ def main():
print("\nGoodbye!") print("\nGoodbye!")
break break
except Exception as e: except Exception as e:
logger.error(f"Error: {e}") logger.error(f"Error processing query: {e}")
print(f"Sorry, there was an error: {str(e)}")
except Exception as e:
logger.error(f"System initialization error: {e}")
print(f"Failed to start the math system: {str(e)}")
if __name__ == "__main__": if __name__ == "__main__":
main() main()

@ -1,38 +1,79 @@
from fastmcp import FastMCP from fastmcp import FastMCP
from loguru import logger from loguru import logger
import time
mcp = FastMCP( # Create the MCP server
host="0.0.0.0", mcp = FastMCP(host="0.0.0.0",
port=8000, port=8000,
transport="sse", transport="sse",
require_session_id=False require_session_id=False)
)
# Define tools with proper type hints and docstrings
@mcp.tool() @mcp.tool()
def add(a: int, b: int) -> str: def add(a: int, b: int) -> str:
"""Add two numbers.""" """Add two numbers.
Args:
a (int): First number
b (int): Second number
Returns:
str: A message containing the sum
"""
logger.info(f"Adding {a} and {b}")
result = a + b result = a + b
return f"The sum of {a} and {b} is {result}" return f"The sum of {a} and {b} is {result}"
@mcp.tool() @mcp.tool()
def multiply(a: int, b: int) -> str: def multiply(a: int, b: int) -> str:
"""Multiply two numbers.""" """Multiply two numbers.
Args:
a (int): First number
b (int): Second number
Returns:
str: A message containing the product
"""
logger.info(f"Multiplying {a} and {b}")
result = a * b result = a * b
return f"The product of {a} and {b} is {result}" return f"The product of {a} and {b} is {result}"
@mcp.tool() @mcp.tool()
def divide(a: int, b: int) -> str: def divide(a: int, b: int) -> str:
"""Divide two numbers.""" """Divide two numbers.
Args:
a (int): Numerator
b (int): Denominator
Returns:
str: A message containing the division result or an error message
"""
logger.info(f"Dividing {a} by {b}")
if b == 0: if b == 0:
logger.warning("Division by zero attempted")
return "Cannot divide by zero" return "Cannot divide by zero"
result = a / b result = a / b
return f"{a} divided by {b} is {result}" return f"{a} divided by {b} is {result}"
if __name__ == "__main__": if __name__ == "__main__":
try: try:
logger.info("Starting math server on http://0.0.0.0:8000") logger.info("Starting math server on http://0.0.0.0:8000")
print("Math MCP Server is running. Press Ctrl+C to stop.")
# Add a small delay to ensure logging is complete before the server starts
time.sleep(0.5)
# Run the MCP server
mcp.run() mcp.run()
except KeyboardInterrupt:
logger.info("Server shutdown requested")
print("\nShutting down server...")
except Exception as e: except Exception as e:
logger.error(f"Server error: {e}") logger.error(f"Server error: {e}")
raise raise

@ -1,14 +1,13 @@
# Agent prompts for MCP testing and interactions # Agent prompts for MCP testing and interactions
MATH_AGENT_PROMPT = '''You are a helpful math calculator assistant. # Keeping the original format that already has JSON formatting
MATH_AGENT_PROMPT = """You are a helpful math calculator assistant.
Your role is to understand natural language math requests and perform calculations. Your role is to understand natural language math requests and perform calculations.
When asked to perform calculations: When asked to perform calculations:
1. Determine the operation (add, multiply, or divide) 1. Determine the operation (add, multiply, or divide)
2. Extract the numbers from the request 2. Extract the numbers from the request
3. Use the appropriate math operation tool 3. Use the appropriate math operation tool
Format your tool calls as JSON with the tool_name and parameters.
Respond conversationally but be concise.
Example: Example:
User: "what is 5 plus 3?" User: "what is 5 plus 3?"
@ -17,7 +16,8 @@ You: Using the add operation for 5 and 3
User: "multiply 4 times 6" User: "multiply 4 times 6"
You: Using multiply for 4 and 6 You: Using multiply for 4 and 6
{"tool_name": "multiply", "a": 4, "b": 6}''' {"tool_name": "multiply", "a": 4, "b": 6}
"""
FINANCE_AGENT_PROMPT = """You are a financial analysis agent with access to stock market data services. FINANCE_AGENT_PROMPT = """You are a financial analysis agent with access to stock market data services.
Key responsibilities: Key responsibilities:
@ -28,42 +28,40 @@ Key responsibilities:
Use the available MCP tools to fetch real market data rather than making assumptions.""" Use the available MCP tools to fetch real market data rather than making assumptions."""
def generate_agent_role_prompt(agent): def generate_agent_role_prompt(agent):
"""Generates the agent role prompt. """Generates the agent role prompt.
Args: agent (str): The type of the agent. Args: agent (str): The type of the agent.
Returns: str: The agent role prompt. Returns: str: The agent role prompt.
""" """
prompts = { prompts = {
"Finance Agent": ( "Finance Agent":
"You are a seasoned finance analyst AI assistant. Your" ("You are a seasoned finance analyst AI assistant. Your"
" primary goal is to compose comprehensive, astute," " primary goal is to compose comprehensive, astute,"
" impartial, and methodically arranged financial reports" " impartial, and methodically arranged financial reports"
" based on provided data and trends." " based on provided data and trends."),
), "Travel Agent":
"Travel Agent": ( ("You are a world-travelled AI tour guide assistant. Your"
"You are a world-travelled AI tour guide assistant. Your"
" main purpose is to draft engaging, insightful," " main purpose is to draft engaging, insightful,"
" unbiased, and well-structured travel reports on given" " unbiased, and well-structured travel reports on given"
" locations, including history, attractions, and cultural" " locations, including history, attractions, and cultural"
" insights." " insights."),
), "Academic Research Agent":
"Academic Research Agent": ( ("You are an AI academic research assistant. Your primary"
"You are an AI academic research assistant. Your primary"
" responsibility is to create thorough, academically" " responsibility is to create thorough, academically"
" rigorous, unbiased, and systematically organized" " rigorous, unbiased, and systematically organized"
" reports on a given research topic, following the" " reports on a given research topic, following the"
" standards of scholarly work." " standards of scholarly work."),
), "Default Agent":
"Default Agent": ( ("You are an AI critical thinker research assistant. Your"
"You are an AI critical thinker research assistant. Your"
" sole purpose is to write well written, critically" " sole purpose is to write well written, critically"
" acclaimed, objective and structured reports on given" " acclaimed, objective and structured reports on given"
" text." " text."),
),
} }
return prompts.get(agent, "No such agent") return prompts.get(agent, "No such agent")
def generate_report_prompt(question, research_summary): def generate_report_prompt(question, research_summary):
"""Generates the report prompt for the given question and research summary. """Generates the report prompt for the given question and research summary.
Args: question (str): The question to generate the report prompt for Args: question (str): The question to generate the report prompt for
@ -71,16 +69,15 @@ def generate_report_prompt(question, research_summary):
Returns: str: The report prompt for the given question and research summary Returns: str: The report prompt for the given question and research summary
""" """
return ( return (f'"""{research_summary}""" Using the above information,'
f'"""{research_summary}""" Using the above information,'
f' answer the following question or topic: "{question}" in a' f' answer the following question or topic: "{question}" in a'
" detailed report -- The report should focus on the answer" " detailed report -- The report should focus on the answer"
" to the question, should be well structured, informative," " to the question, should be well structured, informative,"
" in depth, with facts and numbers if available, a minimum" " in depth, with facts and numbers if available, a minimum"
" of 1,200 words and with markdown syntax and apa format." " of 1,200 words and with markdown syntax and apa format."
" Write all source urls at the end of the report in apa" " Write all source urls at the end of the report in apa"
" format" " format")
)
def generate_search_queries_prompt(question): def generate_search_queries_prompt(question):
"""Generates the search queries prompt for the given question. """Generates the search queries prompt for the given question.
@ -88,12 +85,11 @@ def generate_search_queries_prompt(question):
Returns: str: The search queries prompt for the given question Returns: str: The search queries prompt for the given question
""" """
return ( return ("Write 4 google search queries to search online that form an"
"Write 4 google search queries to search online that form an"
f' objective opinion from the following: "{question}"You must' f' objective opinion from the following: "{question}"You must'
" respond with a list of strings in the following format:" " respond with a list of strings in the following format:"
' ["query 1", "query 2", "query 3", "query 4"]' ' ["query 1", "query 2", "query 3", "query 4"]')
)
def generate_resource_report_prompt(question, research_summary): def generate_resource_report_prompt(question, research_summary):
"""Generates the resource report prompt for the given question and research summary. """Generates the resource report prompt for the given question and research summary.
@ -105,8 +101,7 @@ def generate_resource_report_prompt(question, research_summary):
Returns: Returns:
str: The resource report prompt for the given question and research summary. str: The resource report prompt for the given question and research summary.
""" """
return ( return (f'"""{research_summary}""" Based on the above information,'
f'"""{research_summary}""" Based on the above information,'
" generate a bibliography recommendation report for the" " generate a bibliography recommendation report for the"
f' following question or topic: "{question}". The report' f' following question or topic: "{question}". The report'
" should provide a detailed analysis of each recommended" " should provide a detailed analysis of each recommended"
@ -116,8 +111,8 @@ def generate_resource_report_prompt(question, research_summary):
" Ensure that the report is well-structured, informative," " Ensure that the report is well-structured, informative,"
" in-depth, and follows Markdown syntax. Include relevant" " in-depth, and follows Markdown syntax. Include relevant"
" facts, figures, and numbers whenever available. The report" " facts, figures, and numbers whenever available. The report"
" should have a minimum length of 1,200 words." " should have a minimum length of 1,200 words.")
)
def generate_outline_report_prompt(question, research_summary): def generate_outline_report_prompt(question, research_summary):
"""Generates the outline report prompt for the given question and research summary. """Generates the outline report prompt for the given question and research summary.
@ -126,8 +121,7 @@ def generate_outline_report_prompt(question, research_summary):
Returns: str: The outline report prompt for the given question and research summary Returns: str: The outline report prompt for the given question and research summary
""" """
return ( return (f'"""{research_summary}""" Using the above information,'
f'"""{research_summary}""" Using the above information,'
" generate an outline for a research report in Markdown" " generate an outline for a research report in Markdown"
f' syntax for the following question or topic: "{question}".' f' syntax for the following question or topic: "{question}".'
" The outline should provide a well-structured framework for" " The outline should provide a well-structured framework for"
@ -135,8 +129,8 @@ def generate_outline_report_prompt(question, research_summary):
" subsections, and key points to be covered. The research" " subsections, and key points to be covered. The research"
" report should be detailed, informative, in-depth, and a" " report should be detailed, informative, in-depth, and a"
" minimum of 1,200 words. Use appropriate Markdown syntax to" " minimum of 1,200 words. Use appropriate Markdown syntax to"
" format the outline and ensure readability." " format the outline and ensure readability.")
)
def generate_concepts_prompt(question, research_summary): def generate_concepts_prompt(question, research_summary):
"""Generates the concepts prompt for the given question. """Generates the concepts prompt for the given question.
@ -145,15 +139,14 @@ def generate_concepts_prompt(question, research_summary):
Returns: str: The concepts prompt for the given question Returns: str: The concepts prompt for the given question
""" """
return ( return (f'"""{research_summary}""" Using the above information,'
f'"""{research_summary}""" Using the above information,'
" generate a list of 5 main concepts to learn for a research" " generate a list of 5 main concepts to learn for a research"
f' report on the following question or topic: "{question}".' f' report on the following question or topic: "{question}".'
" The outline should provide a well-structured frameworkYou" " The outline should provide a well-structured frameworkYou"
" must respond with a list of strings in the following" " must respond with a list of strings in the following"
' format: ["concepts 1", "concepts 2", "concepts 3",' ' format: ["concepts 1", "concepts 2", "concepts 3",'
' "concepts 4, concepts 5"]' ' "concepts 4, concepts 5"]')
)
def generate_lesson_prompt(concept): def generate_lesson_prompt(concept):
""" """
@ -164,16 +157,15 @@ def generate_lesson_prompt(concept):
str: The lesson prompt for the given concept. str: The lesson prompt for the given concept.
""" """
prompt = ( prompt = (f"generate a comprehensive lesson about {concept} in Markdown"
f"generate a comprehensive lesson about {concept} in Markdown"
f" syntax. This should include the definitionof {concept}," f" syntax. This should include the definitionof {concept},"
" its historical background and development, its" " its historical background and development, its"
" applications or uses in differentfields, and notable" " applications or uses in differentfields, and notable"
f" events or facts related to {concept}." f" events or facts related to {concept}.")
)
return prompt return prompt
def get_report_by_type(report_type): def get_report_by_type(report_type):
report_type_mapping = { report_type_mapping = {
"research_report": generate_report_prompt, "research_report": generate_report_prompt,

@ -2647,18 +2647,7 @@ class Agent:
else: else:
return str(response) return str(response)
async def mcp_execution_flow(self, tool_call):
"""Execute MCP tool call flow"""
try:
result = await execute_mcp_tool(
url=self.mcp_servers[0]["url"],
parameters=tool_call,
output_type="str",
)
return result
except Exception as e:
logger.error(f"Error executing tool call: {e}")
return f"Error executing tool call: {e}"
def sentiment_and_evaluator(self, response: str): def sentiment_and_evaluator(self, response: str):
if self.evaluator: if self.evaluator:
@ -2689,3 +2678,136 @@ class Agent:
role="Output Cleaner", role="Output Cleaner",
content=response, content=response,
) )
async def amcp_execution_flow(self, response: str) -> str:
"""Async implementation of MCP execution flow.
Args:
response (str): The response from the LLM containing tool calls or natural language.
Returns:
str: The result of executing the tool calls with preserved formatting.
"""
try:
# Try to parse as JSON first
try:
tool_calls = json.loads(response)
is_json = True
logger.debug(f"Successfully parsed response as JSON: {tool_calls}")
except json.JSONDecodeError:
# If not JSON, treat as natural language
tool_calls = [response]
is_json = False
logger.debug(f"Could not parse response as JSON, treating as natural language")
# Execute tool calls against MCP servers
results = []
errors = []
# Handle both single tool call and array of tool calls
if isinstance(tool_calls, dict):
tool_calls = [tool_calls]
logger.debug(f"Executing {len(tool_calls)} tool calls against {len(self.mcp_servers)} MCP servers")
for tool_call in tool_calls:
try:
# Import here to avoid circular imports
from swarms.tools.mcp_integration import abatch_mcp_flow
logger.debug(f"Executing tool call: {tool_call}")
# Execute the tool call against all MCP servers
result = await abatch_mcp_flow(self.mcp_servers, tool_call)
if result:
logger.debug(f"Got result from MCP servers: {result}")
results.extend(result)
# Add successful result to memory with context
self.short_memory.add(
role="assistant",
content=f"Tool execution result: {result}"
)
else:
error_msg = "No result from tool execution"
errors.append(error_msg)
logger.debug(error_msg)
self.short_memory.add(
role="error",
content=error_msg
)
except Exception as e:
error_msg = f"Error executing tool call: {str(e)}"
errors.append(error_msg)
logger.error(error_msg)
self.short_memory.add(
role="error",
content=error_msg
)
# Format the final response
if results:
if len(results) == 1:
# For single results, return as is to preserve formatting
return results[0]
else:
# For multiple results, combine with context
formatted_results = []
for i, result in enumerate(results, 1):
formatted_results.append(f"Result {i}: {result}")
return "\n".join(formatted_results)
elif errors:
if len(errors) == 1:
return errors[0]
else:
return "Multiple errors occurred:\n" + "\n".join(f"- {err}" for err in errors)
else:
return "No results or errors returned"
except Exception as e:
error_msg = f"Error in MCP execution flow: {str(e)}"
logger.error(error_msg)
self.short_memory.add(
role="error",
content=error_msg
)
return error_msg
def mcp_execution_flow(self, response: str) -> str:
"""Synchronous wrapper for MCP execution flow.
This method creates a new event loop if needed or uses the existing one
to run the async MCP execution flow.
Args:
response (str): The response from the LLM containing tool calls or natural language.
Returns:
str: The result of executing the tool calls with preserved formatting.
"""
try:
# Check if we're already in an event loop
try:
loop = asyncio.get_event_loop()
except RuntimeError:
# No event loop exists, create one
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if loop.is_running():
# We're in an async context, use run_coroutine_threadsafe
logger.debug("Using run_coroutine_threadsafe to execute MCP flow")
future = asyncio.run_coroutine_threadsafe(
self.amcp_execution_flow(response), loop
)
return future.result(timeout=30) # Adding timeout to prevent hanging
else:
# We're not in an async context, use loop.run_until_complete
logger.debug("Using run_until_complete to execute MCP flow")
return loop.run_until_complete(self.amcp_execution_flow(response))
except Exception as e:
error_msg = f"Error in MCP execution flow wrapper: {str(e)}"
logger.error(error_msg)
return error_msg

@ -4,7 +4,7 @@ import abc
import asyncio import asyncio
from contextlib import AbstractAsyncContextManager, AsyncExitStack from contextlib import AbstractAsyncContextManager, AsyncExitStack
from pathlib import Path from pathlib import Path
from typing import Any, Dict, List, Optional, Literal from typing import Any, Dict, List, Optional, Literal, Union
from typing_extensions import NotRequired, TypedDict from typing_extensions import NotRequired, TypedDict
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
@ -212,9 +212,11 @@ class MCPServerSse(_MCPServerWithClientSession):
async def call_tool_fast( async def call_tool_fast(
server: MCPServerSse, payload: Dict[str, Any] | str server: MCPServerSse, payload: Dict[str, Any] | str
) -> Any: ) -> Any:
"""Async function to call a tool on a server with proper cleanup."""
try: try:
await server.connect() await server.connect()
result = await server.call_tool(arguments=payload if isinstance(payload, dict) else None) arguments = payload if isinstance(payload, dict) else None
result = await server.call_tool(arguments=arguments)
return result return result
finally: finally:
await server.cleanup() await server.cleanup()
@ -223,6 +225,7 @@ async def call_tool_fast(
async def mcp_flow_get_tool_schema( async def mcp_flow_get_tool_schema(
params: MCPServerSseParams, params: MCPServerSseParams,
) -> Any: ) -> Any:
"""Async function to get tool schema from MCP server."""
async with MCPServerSse(params) as server: async with MCPServerSse(params) as server:
tools = await server.list_tools() tools = await server.list_tools()
return tools return tools
@ -232,6 +235,7 @@ async def mcp_flow(
params: MCPServerSseParams, params: MCPServerSseParams,
function_call: Dict[str, Any] | str, function_call: Dict[str, Any] | str,
) -> Any: ) -> Any:
"""Async function to call a tool with given parameters."""
async with MCPServerSse(params) as server: async with MCPServerSse(params) as server:
return await call_tool_fast(server, function_call) return await call_tool_fast(server, function_call)
@ -239,17 +243,78 @@ async def mcp_flow(
async def _call_one_server( async def _call_one_server(
params: MCPServerSseParams, payload: Dict[str, Any] | str params: MCPServerSseParams, payload: Dict[str, Any] | str
) -> Any: ) -> Any:
"""Helper function to call a single MCP server."""
server = MCPServerSse(params) server = MCPServerSse(params)
try: try:
await server.connect() await server.connect()
return await server.call_tool(arguments=payload if isinstance(payload, dict) else None) arguments = payload if isinstance(payload, dict) else None
return await server.call_tool(arguments=arguments)
finally: finally:
await server.cleanup() await server.cleanup()
async def abatch_mcp_flow(
params: List[MCPServerSseParams], payload: Dict[str, Any] | str
) -> List[Any]:
"""Async function to execute a batch of MCP calls concurrently.
Args:
params (List[MCPServerSseParams]): List of MCP server configurations
payload (Dict[str, Any] | str): The payload to send to each server
Returns:
List[Any]: Results from all MCP servers
"""
if not params:
logger.warning("No MCP servers provided for batch operation")
return []
try:
return await asyncio.gather(*[_call_one_server(p, payload) for p in params])
except Exception as e:
logger.error(f"Error in abatch_mcp_flow: {e}")
# Return partial results if any were successful
return [f"Error in batch operation: {str(e)}"]
def batch_mcp_flow( def batch_mcp_flow(
params: List[MCPServerSseParams], payload: Dict[str, Any] | str params: List[MCPServerSseParams], payload: Dict[str, Any] | str
) -> List[Any]: ) -> List[Any]:
return asyncio.run( """Sync wrapper for batch MCP operations.
asyncio.gather(*[_call_one_server(p, payload) for p in params])
This creates a new event loop if needed to run the async batch operation.
ONLY use this when not already in an async context.
Args:
params (List[MCPServerSseParams]): List of MCP server configurations
payload (Dict[str, Any] | str): The payload to send to each server
Returns:
List[Any]: Results from all MCP servers
"""
if not params:
logger.warning("No MCP servers provided for batch operation")
return []
try:
# Check if we're already in an event loop
try:
loop = asyncio.get_event_loop()
except RuntimeError:
# No event loop exists, create one
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if loop.is_running():
# We're already in an async context, can't use asyncio.run
# Use a future to bridge sync-async gap
future = asyncio.run_coroutine_threadsafe(
abatch_mcp_flow(params, payload), loop
) )
return future.result(timeout=30) # Add timeout to prevent hanging
else:
# We're not in an async context, safe to use loop.run_until_complete
return loop.run_until_complete(abatch_mcp_flow(params, payload))
except Exception as e:
logger.error(f"Error in batch_mcp_flow: {e}")
return [f"Error in batch operation: {str(e)}"]
Loading…
Cancel
Save