chore: set model name to 'gpt-4o-mini' in server and test configs

- Updated `math_server.py` and `test_integration.py` to explicitly use 'gpt-4o-mini'
- Ensures consistent model configuration across test and runtime environments
pull/819/head
Pavan Kumar 3 days ago committed by ascender1729
parent 3fe1ec1581
commit d30ca92421

@ -36,4 +36,6 @@ def calculate_percentage(part: float, whole: float) -> float:
if __name__ == "__main__": if __name__ == "__main__":
print("Starting Math Server on port 6274...") print("Starting Math Server on port 6274...")
mcp.run(transport="sse", host="0.0.0.0", port=6274) # Initialize LiteLLM with specific model
llm = LiteLLM(model_name="gpt-4o-mini")
mcp.run(transport="sse", host="0.0.0.0", port=6274)

@ -21,7 +21,8 @@ def setup_agent(name: str, description: str, servers: list) -> Agent:
system_prompt="You are a math assistant. Process mathematical operations using the provided MCP tools.", system_prompt="You are a math assistant. Process mathematical operations using the provided MCP tools.",
max_loops=1, max_loops=1,
mcp_servers=servers, mcp_servers=servers,
streaming_on=False streaming_on=False,
model_name="gpt-4o-mini" # Added model_name here
) )
def main(): def main():

Loading…
Cancel
Save