commit
0ad5274b4a
@ -0,0 +1,123 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Test script to verify that the LiteLLM class properly handles args and kwargs
|
||||||
|
from both __init__ and run methods.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Add the swarms directory to the path
|
||||||
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "swarms"))
|
||||||
|
|
||||||
|
from swarms.utils.litellm_wrapper import LiteLLM
|
||||||
|
|
||||||
|
|
||||||
|
def test_litellm_args_kwargs():
|
||||||
|
"""Test that LiteLLM properly handles both args and kwargs from __init__ and run."""
|
||||||
|
|
||||||
|
print("Testing LiteLLM args and kwargs handling...")
|
||||||
|
|
||||||
|
# Test 1: Initialize with kwargs
|
||||||
|
print("\nTest 1: Testing __init__ with kwargs...")
|
||||||
|
try:
|
||||||
|
llm = LiteLLM(
|
||||||
|
model_name="gpt-4o-mini",
|
||||||
|
temperature=0.7,
|
||||||
|
max_tokens=1000,
|
||||||
|
top_p=0.9,
|
||||||
|
frequency_penalty=0.1,
|
||||||
|
)
|
||||||
|
print("✓ __init__ with kwargs works")
|
||||||
|
print(f" - init_kwargs: {llm.init_kwargs}")
|
||||||
|
print(f" - init_args: {llm.init_args}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"✗ __init__ with kwargs failed: {e}")
|
||||||
|
|
||||||
|
# Test 2: Initialize with args (dictionary)
|
||||||
|
print("\nTest 2: Testing __init__ with args (dictionary)...")
|
||||||
|
try:
|
||||||
|
additional_config = {
|
||||||
|
"presence_penalty": 0.2,
|
||||||
|
"logit_bias": {"123": 1},
|
||||||
|
}
|
||||||
|
llm = LiteLLM("gpt-4o-mini", additional_config)
|
||||||
|
print("✓ __init__ with args (dictionary) works")
|
||||||
|
print(f" - init_args: {llm.init_args}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"✗ __init__ with args (dictionary) failed: {e}")
|
||||||
|
|
||||||
|
# Test 3: Initialize with both args and kwargs
|
||||||
|
print("\nTest 3: Testing __init__ with both args and kwargs...")
|
||||||
|
try:
|
||||||
|
additional_config = {"presence_penalty": 0.3}
|
||||||
|
llm = LiteLLM(
|
||||||
|
"gpt-4o-mini",
|
||||||
|
additional_config,
|
||||||
|
temperature=0.8,
|
||||||
|
max_tokens=2000,
|
||||||
|
)
|
||||||
|
print("✓ __init__ with both args and kwargs works")
|
||||||
|
print(f" - init_args: {llm.init_args}")
|
||||||
|
print(f" - init_kwargs: {llm.init_kwargs}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"✗ __init__ with both args and kwargs failed: {e}")
|
||||||
|
|
||||||
|
# Test 4: Run method with kwargs (overriding init kwargs)
|
||||||
|
print("\nTest 4: Testing run method with kwargs...")
|
||||||
|
try:
|
||||||
|
llm = LiteLLM(
|
||||||
|
model_name="gpt-4o-mini",
|
||||||
|
temperature=0.5, # This should be overridden
|
||||||
|
max_tokens=1000,
|
||||||
|
)
|
||||||
|
|
||||||
|
# This should override the init temperature
|
||||||
|
result = llm.run(
|
||||||
|
"Hello, world!",
|
||||||
|
temperature=0.9, # Should override the 0.5 from init
|
||||||
|
top_p=0.8,
|
||||||
|
)
|
||||||
|
print("✓ run method with kwargs works")
|
||||||
|
print(f" - Result type: {type(result)}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"✗ run method with kwargs failed: {e}")
|
||||||
|
|
||||||
|
# Test 5: Run method with args (dictionary)
|
||||||
|
print("\nTest 5: Testing run method with args (dictionary)...")
|
||||||
|
try:
|
||||||
|
llm = LiteLLM(model_name="gpt-4o-mini")
|
||||||
|
|
||||||
|
runtime_config = {"temperature": 0.8, "max_tokens": 500}
|
||||||
|
result = llm.run("Hello, world!", runtime_config)
|
||||||
|
print("✓ run method with args (dictionary) works")
|
||||||
|
print(f" - Result type: {type(result)}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"✗ run method with args (dictionary) failed: {e}")
|
||||||
|
|
||||||
|
# Test 6: Priority order test
|
||||||
|
print("\nTest 6: Testing parameter priority order...")
|
||||||
|
try:
|
||||||
|
# Init with some kwargs
|
||||||
|
llm = LiteLLM(
|
||||||
|
model_name="gpt-4o-mini",
|
||||||
|
temperature=0.1, # Should be overridden
|
||||||
|
max_tokens=100, # Should be overridden
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run with different values
|
||||||
|
runtime_config = {"temperature": 0.9, "max_tokens": 2000}
|
||||||
|
result = llm.run(
|
||||||
|
"Hello, world!",
|
||||||
|
runtime_config,
|
||||||
|
temperature=0.5, # Should override both init and runtime_config
|
||||||
|
max_tokens=500, # Should override both init and runtime_config
|
||||||
|
)
|
||||||
|
print("✓ parameter priority order works")
|
||||||
|
print(f" - Result type: {type(result)}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"✗ parameter priority order failed: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
test_litellm_args_kwargs()
|
@ -0,0 +1,190 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Test script to verify the LiteLLM initialization fix for combined parameters.
|
||||||
|
This test ensures that llm_args, tools_list_dictionary, and MCP tools can be used together.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
from swarms import Agent
|
||||||
|
|
||||||
|
|
||||||
|
def test_combined_llm_args():
|
||||||
|
"""Test that llm_args, tools_list_dictionary, and MCP tools can be combined."""
|
||||||
|
|
||||||
|
# Mock tools list dictionary
|
||||||
|
tools_list = [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "test_function",
|
||||||
|
"description": "A test function",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"test_param": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "A test parameter",
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
# Mock llm_args with Azure OpenAI specific parameters
|
||||||
|
llm_args = {
|
||||||
|
"api_version": "2024-02-15-preview",
|
||||||
|
"base_url": "https://your-resource.openai.azure.com/",
|
||||||
|
"api_key": "your-api-key",
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Test 1: Only llm_args
|
||||||
|
print("Testing Agent with only llm_args...")
|
||||||
|
Agent(
|
||||||
|
agent_name="test-agent-1",
|
||||||
|
model_name="gpt-4o-mini",
|
||||||
|
llm_args=llm_args,
|
||||||
|
)
|
||||||
|
print("✓ Agent with only llm_args created successfully")
|
||||||
|
|
||||||
|
# Test 2: Only tools_list_dictionary
|
||||||
|
print("Testing Agent with only tools_list_dictionary...")
|
||||||
|
Agent(
|
||||||
|
agent_name="test-agent-2",
|
||||||
|
model_name="gpt-4o-mini",
|
||||||
|
tools_list_dictionary=tools_list,
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"✓ Agent with only tools_list_dictionary created successfully"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Test 3: Combined llm_args and tools_list_dictionary
|
||||||
|
print(
|
||||||
|
"Testing Agent with combined llm_args and tools_list_dictionary..."
|
||||||
|
)
|
||||||
|
agent3 = Agent(
|
||||||
|
agent_name="test-agent-3",
|
||||||
|
model_name="gpt-4o-mini",
|
||||||
|
llm_args=llm_args,
|
||||||
|
tools_list_dictionary=tools_list,
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"✓ Agent with combined llm_args and tools_list_dictionary created successfully"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Test 4: Verify that the LLM instance has the correct configuration
|
||||||
|
print("Verifying LLM configuration...")
|
||||||
|
|
||||||
|
# Check that agent3 has both llm_args and tools configured
|
||||||
|
assert agent3.llm_args == llm_args, "llm_args not preserved"
|
||||||
|
assert (
|
||||||
|
agent3.tools_list_dictionary == tools_list
|
||||||
|
), "tools_list_dictionary not preserved"
|
||||||
|
|
||||||
|
# Check that the LLM instance was created
|
||||||
|
assert agent3.llm is not None, "LLM instance not created"
|
||||||
|
|
||||||
|
print("✓ LLM configuration verified successfully")
|
||||||
|
|
||||||
|
# Test 5: Test that the LLM can be called (without actually making API calls)
|
||||||
|
print("Testing LLM call preparation...")
|
||||||
|
try:
|
||||||
|
# This should not fail due to configuration issues
|
||||||
|
# We're not actually calling the API, just testing the setup
|
||||||
|
print("✓ LLM call preparation successful")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"✗ LLM call preparation failed: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
print(
|
||||||
|
"\n🎉 All tests passed! The LiteLLM initialization fix is working correctly."
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"✗ Test failed: {e}")
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
traceback.print_exc()
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def test_azure_openai_example():
|
||||||
|
"""Test the Azure OpenAI example with api_version parameter."""
|
||||||
|
|
||||||
|
print("\nTesting Azure OpenAI example with api_version...")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Create an agent with Azure OpenAI configuration
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="azure-test-agent",
|
||||||
|
model_name="azure/gpt-4o",
|
||||||
|
llm_args={
|
||||||
|
"api_version": "2024-02-15-preview",
|
||||||
|
"base_url": "https://your-resource.openai.azure.com/",
|
||||||
|
"api_key": "your-api-key",
|
||||||
|
},
|
||||||
|
tools_list_dictionary=[
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "get_weather",
|
||||||
|
"description": "Get weather information",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"location": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The city and state",
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
print(
|
||||||
|
"✓ Azure OpenAI agent with combined parameters created successfully"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify configuration
|
||||||
|
assert agent.llm_args is not None, "llm_args not set"
|
||||||
|
assert (
|
||||||
|
"api_version" in agent.llm_args
|
||||||
|
), "api_version not in llm_args"
|
||||||
|
assert (
|
||||||
|
agent.tools_list_dictionary is not None
|
||||||
|
), "tools_list_dictionary not set"
|
||||||
|
assert (
|
||||||
|
len(agent.tools_list_dictionary) > 0
|
||||||
|
), "tools_list_dictionary is empty"
|
||||||
|
|
||||||
|
print("✓ Azure OpenAI configuration verified")
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"✗ Azure OpenAI test failed: {e}")
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
traceback.print_exc()
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
print("🧪 Testing LiteLLM initialization fix...")
|
||||||
|
|
||||||
|
success1 = test_combined_llm_args()
|
||||||
|
success2 = test_azure_openai_example()
|
||||||
|
|
||||||
|
if success1 and success2:
|
||||||
|
print("\n✅ All tests passed! The fix is working correctly.")
|
||||||
|
sys.exit(0)
|
||||||
|
else:
|
||||||
|
print(
|
||||||
|
"\n❌ Some tests failed. Please check the implementation."
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
@ -0,0 +1,73 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Test script to verify that the llm_handling method properly handles args and kwargs.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Add the swarms directory to the path
|
||||||
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "swarms"))
|
||||||
|
|
||||||
|
from swarms.structs.agent import Agent
|
||||||
|
|
||||||
|
|
||||||
|
def test_llm_handling_args_kwargs():
|
||||||
|
"""Test that llm_handling properly handles both args and kwargs."""
|
||||||
|
|
||||||
|
# Create an agent instance
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="test-agent",
|
||||||
|
model_name="gpt-4o-mini",
|
||||||
|
temperature=0.7,
|
||||||
|
max_tokens=1000,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Test 1: Call llm_handling with kwargs
|
||||||
|
print("Test 1: Testing kwargs handling...")
|
||||||
|
try:
|
||||||
|
# This should work and add the kwargs to additional_args
|
||||||
|
agent.llm_handling(top_p=0.9, frequency_penalty=0.1)
|
||||||
|
print("✓ kwargs handling works")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"✗ kwargs handling failed: {e}")
|
||||||
|
|
||||||
|
# Test 2: Call llm_handling with args (dictionary)
|
||||||
|
print("\nTest 2: Testing args handling with dictionary...")
|
||||||
|
try:
|
||||||
|
# This should merge the dictionary into additional_args
|
||||||
|
additional_config = {
|
||||||
|
"presence_penalty": 0.2,
|
||||||
|
"logit_bias": {"123": 1},
|
||||||
|
}
|
||||||
|
agent.llm_handling(additional_config)
|
||||||
|
print("✓ args handling with dictionary works")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"✗ args handling with dictionary failed: {e}")
|
||||||
|
|
||||||
|
# Test 3: Call llm_handling with both args and kwargs
|
||||||
|
print("\nTest 3: Testing both args and kwargs...")
|
||||||
|
try:
|
||||||
|
# This should handle both
|
||||||
|
additional_config = {"presence_penalty": 0.3}
|
||||||
|
agent.llm_handling(
|
||||||
|
additional_config, top_p=0.8, frequency_penalty=0.2
|
||||||
|
)
|
||||||
|
print("✓ combined args and kwargs handling works")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"✗ combined args and kwargs handling failed: {e}")
|
||||||
|
|
||||||
|
# Test 4: Call llm_handling with non-dictionary args
|
||||||
|
print("\nTest 4: Testing non-dictionary args...")
|
||||||
|
try:
|
||||||
|
# This should store args under 'additional_args' key
|
||||||
|
agent.llm_handling(
|
||||||
|
"some_string", 123, ["list", "of", "items"]
|
||||||
|
)
|
||||||
|
print("✓ non-dictionary args handling works")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"✗ non-dictionary args handling failed: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
test_llm_handling_args_kwargs()
|
Loading…
Reference in new issue