diff --git a/examples/mcp_example/calc_server.py b/examples/mcp_example/calc_server.py index 07d7391b..cfae9e01 100644 --- a/examples/mcp_example/calc_server.py +++ b/examples/mcp_example/calc_server.py @@ -29,5 +29,5 @@ def percentage(value: float, percent: float) -> float: if __name__ == "__main__": print("Starting Calculation Server on port 6275...") - llm = LiteLLM(system_prompt="You are a financial calculation expert.") - mcp.run(transport="sse", host="0.0.0.0", port=6275) + llm = LiteLLM(model_name="gpt-4", system_prompt="You are a financial calculation expert.", temperature=0.3) + mcp.run(transport="sse", port=6275) diff --git a/examples/mcp_example/math_server.py b/examples/mcp_example/math_server.py index e2b25156..bed976cb 100644 --- a/examples/mcp_example/math_server.py +++ b/examples/mcp_example/math_server.py @@ -66,5 +66,5 @@ def divide(a: float, b: float) -> float: if __name__ == "__main__": print("Starting Math Server on port 6274...") - llm = LiteLLM() # This will use the default model - mcp.run(transport="sse", host="0.0.0.0", port=6274) \ No newline at end of file + llm = LiteLLM(model_name="gpt-4", temperature=0.3) + mcp.run(transport="sse", port=6274) \ No newline at end of file diff --git a/examples/mcp_example/multi_server_test.py b/examples/mcp_example/multi_server_test.py index 21c2a098..114522b6 100644 --- a/examples/mcp_example/multi_server_test.py +++ b/examples/mcp_example/multi_server_test.py @@ -89,10 +89,13 @@ def main(): print("-" * 50) else: response = super_agent.run(user_input) - print("\nSuper Agent Response:") - print("-" * 50) - print(f"Response: {response}") - print("-" * 50) + if isinstance(response, str): + print("\nSuper Agent Response:") + print("-" * 50) + print(f"Calculation Result: {response}") + print("-" * 50) + else: + print("\nError: Unexpected response format") except KeyboardInterrupt: print("\nExiting gracefully...")