fix: debug MCP server setup and enhance response handling logic

- Corrected initialization issues in `math_server.py` and `calc_server.py`
- Improved response formatting and delivery in `multi_server_test.py`
- Resolved issue where agent output showed raw stream wrapper instead of actual response
pull/819/head
Pavan Kumar 2 days ago committed by ascender1729
parent c75fa97cb7
commit 885a50c985

@ -29,5 +29,5 @@ def percentage(value: float, percent: float) -> float:
if __name__ == "__main__":
print("Starting Calculation Server on port 6275...")
llm = LiteLLM(system_prompt="You are a financial calculation expert.")
mcp.run(transport="sse", host="0.0.0.0", port=6275)
llm = LiteLLM(model_name="gpt-4", system_prompt="You are a financial calculation expert.", temperature=0.3)
mcp.run(transport="sse", port=6275)

@ -66,5 +66,5 @@ def divide(a: float, b: float) -> float:
if __name__ == "__main__":
print("Starting Math Server on port 6274...")
llm = LiteLLM() # This will use the default model
mcp.run(transport="sse", host="0.0.0.0", port=6274)
llm = LiteLLM(model_name="gpt-4", temperature=0.3)
mcp.run(transport="sse", port=6274)

@ -89,10 +89,13 @@ def main():
print("-" * 50)
else:
response = super_agent.run(user_input)
if isinstance(response, str):
print("\nSuper Agent Response:")
print("-" * 50)
print(f"Response: {response}")
print(f"Calculation Result: {response}")
print("-" * 50)
else:
print("\nError: Unexpected response format")
except KeyboardInterrupt:
print("\nExiting gracefully...")

Loading…
Cancel
Save