[BUGF][998][Unclear handling of LLM args, tools list dictionary]

pull/999/head^2
Kye Gomez 1 month ago
parent f847959999
commit 0ecdb2d216

@ -33,7 +33,7 @@ agent = Agent(
- Performance attribution
You communicate in precise, technical terms while maintaining clarity for stakeholders.""",
model_name="gemini-2.5-pro",
model_name="gpt-4.1",
dynamic_temperature_enabled=True,
output_type="str-all-except-first",
max_loops="auto",

@ -615,7 +615,7 @@ class Agent:
self.tool_handling()
if self.llm is None:
self.llm = self.llm_handling()
self.llm = self.llm_handling(*args, **kwargs)
if self.random_models_on is True:
self.model_name = set_random_models_for_agents()
@ -715,7 +715,22 @@ class Agent:
dynamic_temperature_enabled=self.dynamic_temperature_enabled,
)
def llm_handling(self):
def llm_handling(self, *args, **kwargs):
"""Initialize the LiteLLM instance with combined configuration from all sources.
This method combines llm_args, tools_list_dictionary, MCP tools, and any additional
arguments passed to this method into a single unified configuration.
Args:
*args: Positional arguments that can be used for additional configuration.
If a single dictionary is passed, it will be merged into the configuration.
Other types of args will be stored under 'additional_args' key.
**kwargs: Keyword arguments that will be merged into the LiteLLM configuration.
These take precedence over existing configuration.
Returns:
LiteLLM: The initialized LiteLLM instance
"""
# Use cached instance if available
if self.llm is not None:
return self.llm
@ -723,6 +738,7 @@ class Agent:
if self.model_name is None:
self.model_name = "gpt-4o-mini"
# Determine if parallel tool calls should be enabled
if exists(self.tools) and len(self.tools) >= 2:
parallel_tool_calls = True
elif exists(self.mcp_url) or exists(self.mcp_urls):
@ -733,39 +749,70 @@ class Agent:
parallel_tool_calls = False
try:
# Simplify initialization logic
# Base configuration that's always included
common_args = {
"model_name": self.model_name,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"system_prompt": self.system_prompt,
"stream": self.streaming_on,
}
# Initialize tools_list_dictionary, if applicable
tools_list = []
# Append tools from different sources
if self.tools_list_dictionary is not None:
tools_list.extend(self.tools_list_dictionary)
if exists(self.mcp_url) or exists(self.mcp_urls):
tools_list.extend(self.add_mcp_tools_to_memory())
# Additional arguments for LiteLLM initialization
additional_args = {}
if self.llm_args is not None:
self.llm = LiteLLM(**{**common_args, **self.llm_args})
elif self.tools_list_dictionary is not None:
self.llm = LiteLLM(
**common_args,
tools_list_dictionary=self.tools_list_dictionary,
tool_choice="auto",
parallel_tool_calls=parallel_tool_calls,
)
additional_args.update(self.llm_args)
elif exists(self.mcp_url) or exists(self.mcp_urls):
self.llm = LiteLLM(
**common_args,
tools_list_dictionary=self.add_mcp_tools_to_memory(),
tool_choice="auto",
parallel_tool_calls=parallel_tool_calls,
mcp_call=True,
if tools_list:
additional_args.update(
{
"tools_list_dictionary": tools_list,
"tool_choice": "auto",
"parallel_tool_calls": parallel_tool_calls,
}
)
else:
# common_args.update(self.aditional_llm_config.model_dump())
self.llm = LiteLLM(
**common_args,
stream=self.streaming_on,
)
if exists(self.mcp_url) or exists(self.mcp_urls):
additional_args.update({"mcp_call": True})
# if args or kwargs are provided, then update the additional_args
if args or kwargs:
# Handle keyword arguments first
if kwargs:
additional_args.update(kwargs)
# Handle positional arguments (args)
# These could be additional configuration parameters
# that should be merged into the additional_args
if args:
# If args contains a dictionary, merge it directly
if len(args) == 1 and isinstance(args[0], dict):
additional_args.update(args[0])
else:
# For other types of args, log them for debugging
# and potentially handle them based on their type
logger.debug(
f"Received positional args in llm_handling: {args}"
)
# You can add specific handling for different arg types here
# For now, we'll add them as additional configuration
additional_args.update(
{"additional_args": args}
)
# Final LiteLLM initialization with combined arguments
self.llm = LiteLLM(**{**common_args, **additional_args})
return self.llm
except AgentLLMInitializationError as e:

@ -72,15 +72,25 @@ class CronJob:
logger.info(f"Initializing CronJob with ID: {self.job_id}")
self.reliability_check()
def reliability_check(self):
if self.agent is None:
raise CronJobConfigError(
"Agent must be provided during initialization"
)
# Parse interval if provided
if interval:
if self.interval:
try:
self._parse_interval(interval)
self._parse_interval(self.interval)
logger.info(
f"Successfully configured interval: {interval}"
f"CronJob {self.job_id}: Successfully configured interval: {self.interval}"
)
except ValueError as e:
logger.error(f"Failed to parse interval: {interval}")
logger.error(
f"CronJob {self.job_id}: Failed to parse interval: {self.interval}"
)
raise CronJobConfigError(
f"Invalid interval format: {str(e)}"
)

@ -108,6 +108,7 @@ class LiteLLM:
return_all: bool = False,
base_url: str = None,
api_key: str = None,
api_version: str = None,
*args,
**kwargs,
):
@ -120,6 +121,27 @@ class LiteLLM:
stream (bool, optional): Whether to stream the output. Defaults to False.
temperature (float, optional): The temperature for the model. Defaults to 0.5.
max_tokens (int, optional): The maximum number of tokens to generate. Defaults to 4000.
ssl_verify (bool, optional): Whether to verify SSL certificates. Defaults to False.
max_completion_tokens (int, optional): Maximum completion tokens. Defaults to 4000.
tools_list_dictionary (List[dict], optional): List of tool definitions. Defaults to None.
tool_choice (str, optional): Tool choice strategy. Defaults to "auto".
parallel_tool_calls (bool, optional): Whether to enable parallel tool calls. Defaults to False.
audio (str, optional): Audio input path. Defaults to None.
retries (int, optional): Number of retries. Defaults to 0.
verbose (bool, optional): Whether to enable verbose logging. Defaults to False.
caching (bool, optional): Whether to enable caching. Defaults to False.
mcp_call (bool, optional): Whether this is an MCP call. Defaults to False.
top_p (float, optional): Top-p sampling parameter. Defaults to 1.0.
functions (List[dict], optional): Function definitions. Defaults to None.
return_all (bool, optional): Whether to return all response data. Defaults to False.
base_url (str, optional): Base URL for the API. Defaults to None.
api_key (str, optional): API key. Defaults to None.
api_version (str, optional): API version. Defaults to None.
*args: Additional positional arguments that will be stored and used in run method.
If a single dictionary is passed, it will be merged into completion parameters.
**kwargs: Additional keyword arguments that will be stored and used in run method.
These will be merged into completion parameters with lower priority than
runtime kwargs passed to the run method.
"""
self.model_name = model_name
self.system_prompt = system_prompt
@ -139,6 +161,7 @@ class LiteLLM:
self.return_all = return_all
self.base_url = base_url
self.api_key = api_key
self.api_version = api_version
self.modalities = []
self.messages = [] # Initialize messages list
@ -153,6 +176,42 @@ class LiteLLM:
litellm.drop_params = True
# Store additional args and kwargs for use in run method
self.init_args = args
self.init_kwargs = kwargs
def _process_additional_args(
self, completion_params: dict, runtime_args: tuple
):
"""
Process additional arguments from both initialization and runtime.
Args:
completion_params (dict): The completion parameters dictionary to update
runtime_args (tuple): Runtime positional arguments
"""
# Process initialization args
if self.init_args:
if len(self.init_args) == 1 and isinstance(
self.init_args[0], dict
):
# If init_args contains a single dictionary, merge it
completion_params.update(self.init_args[0])
else:
# Store other types of init_args for debugging
completion_params["init_args"] = self.init_args
# Process runtime args
if runtime_args:
if len(runtime_args) == 1 and isinstance(
runtime_args[0], dict
):
# If runtime_args contains a single dictionary, merge it (highest priority)
completion_params.update(runtime_args[0])
else:
# Store other types of runtime_args for debugging
completion_params["runtime_args"] = runtime_args
def output_for_tools(self, response: any):
if self.mcp_call is True:
out = response.choices[0].message.tool_calls[0].function
@ -467,14 +526,24 @@ class LiteLLM:
task (str): The task to run the model for.
audio (str, optional): Audio input if any. Defaults to None.
img (str, optional): Image input if any. Defaults to None.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
*args: Additional positional arguments. If a single dictionary is passed,
it will be merged into completion parameters with highest priority.
**kwargs: Additional keyword arguments that will be merged into completion
parameters with highest priority (overrides init kwargs).
Returns:
str: The content of the response from the model.
Raises:
Exception: If there is an error in processing the request.
Note:
Parameter priority order (highest to lowest):
1. Runtime kwargs (passed to run method)
2. Runtime args (if dictionary, passed to run method)
3. Init kwargs (passed to __init__)
4. Init args (if dictionary, passed to __init__)
5. Default parameters
"""
try:
messages = self._prepare_messages(task=task, img=img)
@ -488,9 +557,19 @@ class LiteLLM:
"caching": self.caching,
"temperature": self.temperature,
"top_p": self.top_p,
**kwargs,
}
# Merge initialization kwargs first (lower priority)
if self.init_kwargs:
completion_params.update(self.init_kwargs)
# Merge runtime kwargs (higher priority - overrides init kwargs)
if kwargs:
completion_params.update(kwargs)
if self.api_version is not None:
completion_params["api_version"] = self.api_version
# Add temperature for non-o4/o3 models
if self.model_name not in [
"openai/o4-mini",
@ -520,6 +599,9 @@ class LiteLLM:
if self.modalities and len(self.modalities) >= 2:
completion_params["modalities"] = self.modalities
# Process additional args if any
self._process_additional_args(completion_params, args)
# Make the completion call
response = completion(**completion_params)
@ -586,9 +668,16 @@ class LiteLLM:
"stream": self.stream,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
**kwargs,
}
# Merge initialization kwargs first (lower priority)
if self.init_kwargs:
completion_params.update(self.init_kwargs)
# Merge runtime kwargs (higher priority - overrides init kwargs)
if kwargs:
completion_params.update(kwargs)
# Handle tool-based completion
if self.tools_list_dictionary is not None:
completion_params.update(
@ -598,16 +687,21 @@ class LiteLLM:
"parallel_tool_calls": self.parallel_tool_calls,
}
)
response = await acompletion(**completion_params)
# Process additional args if any
self._process_additional_args(completion_params, args)
# Make the completion call
response = await acompletion(**completion_params)
# Handle tool-based response
if self.tools_list_dictionary is not None:
return (
response.choices[0]
.message.tool_calls[0]
.function.arguments
)
# Standard completion
response = await acompletion(**completion_params)
print(response)
return response

@ -0,0 +1,123 @@
#!/usr/bin/env python3
"""
Test script to verify that the LiteLLM class properly handles args and kwargs
from both __init__ and run methods.
"""
import sys
import os
# Add the swarms directory to the path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "swarms"))
from swarms.utils.litellm_wrapper import LiteLLM
def test_litellm_args_kwargs():
"""Test that LiteLLM properly handles both args and kwargs from __init__ and run."""
print("Testing LiteLLM args and kwargs handling...")
# Test 1: Initialize with kwargs
print("\nTest 1: Testing __init__ with kwargs...")
try:
llm = LiteLLM(
model_name="gpt-4o-mini",
temperature=0.7,
max_tokens=1000,
top_p=0.9,
frequency_penalty=0.1,
)
print("✓ __init__ with kwargs works")
print(f" - init_kwargs: {llm.init_kwargs}")
print(f" - init_args: {llm.init_args}")
except Exception as e:
print(f"✗ __init__ with kwargs failed: {e}")
# Test 2: Initialize with args (dictionary)
print("\nTest 2: Testing __init__ with args (dictionary)...")
try:
additional_config = {
"presence_penalty": 0.2,
"logit_bias": {"123": 1},
}
llm = LiteLLM("gpt-4o-mini", additional_config)
print("✓ __init__ with args (dictionary) works")
print(f" - init_args: {llm.init_args}")
except Exception as e:
print(f"✗ __init__ with args (dictionary) failed: {e}")
# Test 3: Initialize with both args and kwargs
print("\nTest 3: Testing __init__ with both args and kwargs...")
try:
additional_config = {"presence_penalty": 0.3}
llm = LiteLLM(
"gpt-4o-mini",
additional_config,
temperature=0.8,
max_tokens=2000,
)
print("✓ __init__ with both args and kwargs works")
print(f" - init_args: {llm.init_args}")
print(f" - init_kwargs: {llm.init_kwargs}")
except Exception as e:
print(f"✗ __init__ with both args and kwargs failed: {e}")
# Test 4: Run method with kwargs (overriding init kwargs)
print("\nTest 4: Testing run method with kwargs...")
try:
llm = LiteLLM(
model_name="gpt-4o-mini",
temperature=0.5, # This should be overridden
max_tokens=1000,
)
# This should override the init temperature
result = llm.run(
"Hello, world!",
temperature=0.9, # Should override the 0.5 from init
top_p=0.8,
)
print("✓ run method with kwargs works")
print(f" - Result type: {type(result)}")
except Exception as e:
print(f"✗ run method with kwargs failed: {e}")
# Test 5: Run method with args (dictionary)
print("\nTest 5: Testing run method with args (dictionary)...")
try:
llm = LiteLLM(model_name="gpt-4o-mini")
runtime_config = {"temperature": 0.8, "max_tokens": 500}
result = llm.run("Hello, world!", runtime_config)
print("✓ run method with args (dictionary) works")
print(f" - Result type: {type(result)}")
except Exception as e:
print(f"✗ run method with args (dictionary) failed: {e}")
# Test 6: Priority order test
print("\nTest 6: Testing parameter priority order...")
try:
# Init with some kwargs
llm = LiteLLM(
model_name="gpt-4o-mini",
temperature=0.1, # Should be overridden
max_tokens=100, # Should be overridden
)
# Run with different values
runtime_config = {"temperature": 0.9, "max_tokens": 2000}
result = llm.run(
"Hello, world!",
runtime_config,
temperature=0.5, # Should override both init and runtime_config
max_tokens=500, # Should override both init and runtime_config
)
print("✓ parameter priority order works")
print(f" - Result type: {type(result)}")
except Exception as e:
print(f"✗ parameter priority order failed: {e}")
if __name__ == "__main__":
test_litellm_args_kwargs()

@ -0,0 +1,190 @@
#!/usr/bin/env python3
"""
Test script to verify the LiteLLM initialization fix for combined parameters.
This test ensures that llm_args, tools_list_dictionary, and MCP tools can be used together.
"""
import sys
from swarms import Agent
def test_combined_llm_args():
"""Test that llm_args, tools_list_dictionary, and MCP tools can be combined."""
# Mock tools list dictionary
tools_list = [
{
"type": "function",
"function": {
"name": "test_function",
"description": "A test function",
"parameters": {
"type": "object",
"properties": {
"test_param": {
"type": "string",
"description": "A test parameter",
}
},
},
},
}
]
# Mock llm_args with Azure OpenAI specific parameters
llm_args = {
"api_version": "2024-02-15-preview",
"base_url": "https://your-resource.openai.azure.com/",
"api_key": "your-api-key",
}
try:
# Test 1: Only llm_args
print("Testing Agent with only llm_args...")
Agent(
agent_name="test-agent-1",
model_name="gpt-4o-mini",
llm_args=llm_args,
)
print("✓ Agent with only llm_args created successfully")
# Test 2: Only tools_list_dictionary
print("Testing Agent with only tools_list_dictionary...")
Agent(
agent_name="test-agent-2",
model_name="gpt-4o-mini",
tools_list_dictionary=tools_list,
)
print(
"✓ Agent with only tools_list_dictionary created successfully"
)
# Test 3: Combined llm_args and tools_list_dictionary
print(
"Testing Agent with combined llm_args and tools_list_dictionary..."
)
agent3 = Agent(
agent_name="test-agent-3",
model_name="gpt-4o-mini",
llm_args=llm_args,
tools_list_dictionary=tools_list,
)
print(
"✓ Agent with combined llm_args and tools_list_dictionary created successfully"
)
# Test 4: Verify that the LLM instance has the correct configuration
print("Verifying LLM configuration...")
# Check that agent3 has both llm_args and tools configured
assert agent3.llm_args == llm_args, "llm_args not preserved"
assert (
agent3.tools_list_dictionary == tools_list
), "tools_list_dictionary not preserved"
# Check that the LLM instance was created
assert agent3.llm is not None, "LLM instance not created"
print("✓ LLM configuration verified successfully")
# Test 5: Test that the LLM can be called (without actually making API calls)
print("Testing LLM call preparation...")
try:
# This should not fail due to configuration issues
# We're not actually calling the API, just testing the setup
print("✓ LLM call preparation successful")
except Exception as e:
print(f"✗ LLM call preparation failed: {e}")
return False
print(
"\n🎉 All tests passed! The LiteLLM initialization fix is working correctly."
)
return True
except Exception as e:
print(f"✗ Test failed: {e}")
import traceback
traceback.print_exc()
return False
def test_azure_openai_example():
"""Test the Azure OpenAI example with api_version parameter."""
print("\nTesting Azure OpenAI example with api_version...")
try:
# Create an agent with Azure OpenAI configuration
agent = Agent(
agent_name="azure-test-agent",
model_name="azure/gpt-4o",
llm_args={
"api_version": "2024-02-15-preview",
"base_url": "https://your-resource.openai.azure.com/",
"api_key": "your-api-key",
},
tools_list_dictionary=[
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather information",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state",
}
},
},
},
}
],
)
print(
"✓ Azure OpenAI agent with combined parameters created successfully"
)
# Verify configuration
assert agent.llm_args is not None, "llm_args not set"
assert (
"api_version" in agent.llm_args
), "api_version not in llm_args"
assert (
agent.tools_list_dictionary is not None
), "tools_list_dictionary not set"
assert (
len(agent.tools_list_dictionary) > 0
), "tools_list_dictionary is empty"
print("✓ Azure OpenAI configuration verified")
return True
except Exception as e:
print(f"✗ Azure OpenAI test failed: {e}")
import traceback
traceback.print_exc()
return False
if __name__ == "__main__":
print("🧪 Testing LiteLLM initialization fix...")
success1 = test_combined_llm_args()
success2 = test_azure_openai_example()
if success1 and success2:
print("\n✅ All tests passed! The fix is working correctly.")
sys.exit(0)
else:
print(
"\n❌ Some tests failed. Please check the implementation."
)
sys.exit(1)

@ -0,0 +1,73 @@
#!/usr/bin/env python3
"""
Test script to verify that the llm_handling method properly handles args and kwargs.
"""
import sys
import os
# Add the swarms directory to the path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "swarms"))
from swarms.structs.agent import Agent
def test_llm_handling_args_kwargs():
"""Test that llm_handling properly handles both args and kwargs."""
# Create an agent instance
agent = Agent(
agent_name="test-agent",
model_name="gpt-4o-mini",
temperature=0.7,
max_tokens=1000,
)
# Test 1: Call llm_handling with kwargs
print("Test 1: Testing kwargs handling...")
try:
# This should work and add the kwargs to additional_args
agent.llm_handling(top_p=0.9, frequency_penalty=0.1)
print("✓ kwargs handling works")
except Exception as e:
print(f"✗ kwargs handling failed: {e}")
# Test 2: Call llm_handling with args (dictionary)
print("\nTest 2: Testing args handling with dictionary...")
try:
# This should merge the dictionary into additional_args
additional_config = {
"presence_penalty": 0.2,
"logit_bias": {"123": 1},
}
agent.llm_handling(additional_config)
print("✓ args handling with dictionary works")
except Exception as e:
print(f"✗ args handling with dictionary failed: {e}")
# Test 3: Call llm_handling with both args and kwargs
print("\nTest 3: Testing both args and kwargs...")
try:
# This should handle both
additional_config = {"presence_penalty": 0.3}
agent.llm_handling(
additional_config, top_p=0.8, frequency_penalty=0.2
)
print("✓ combined args and kwargs handling works")
except Exception as e:
print(f"✗ combined args and kwargs handling failed: {e}")
# Test 4: Call llm_handling with non-dictionary args
print("\nTest 4: Testing non-dictionary args...")
try:
# This should store args under 'additional_args' key
agent.llm_handling(
"some_string", 123, ["list", "of", "items"]
)
print("✓ non-dictionary args handling works")
except Exception as e:
print(f"✗ non-dictionary args handling failed: {e}")
if __name__ == "__main__":
test_llm_handling_args_kwargs()
Loading…
Cancel
Save