improved error handling !

pull/938/head
harshalmore31 2 months ago
parent f73cdc3934
commit b873c35986

@ -3099,8 +3099,9 @@ class BaseTool(BaseModel):
if isinstance(args, str):
try:
args = json.loads(args)
except json.JSONDecodeError:
args = {}
except json.JSONDecodeError as e:
print(f"Warning: Failed to parse tool arguments for {tc.get('name')}: {e}")
args = {"error": f"JSON parse failed: {e}", "raw": args}
formatted_calls.append({
"type": "function",

@ -687,6 +687,8 @@ class LiteLLM:
def _process_anthropic_chunk(self, chunk, current_tool_call, tool_call_buffer, tool_calls_in_stream, print_on, verbose):
"""Process Anthropic-style streaming chunks."""
import json
from loguru import logger
chunk_type = getattr(chunk, 'type', None)
full_text_response = ""
@ -711,8 +713,11 @@ class LiteLLM:
tool_input = json.loads(tool_call_buffer)
current_tool_call["input"] = tool_input
tool_calls_in_stream.append(current_tool_call)
except json.JSONDecodeError:
logger.error(f"Failed to parse tool arguments: {tool_call_buffer}")
except json.JSONDecodeError as e:
logger.error(f"Failed to parse tool arguments: {tool_call_buffer}. Error: {e}")
# Store the raw buffer for debugging
current_tool_call["input"] = {"raw_buffer": tool_call_buffer, "error": str(e)}
tool_calls_in_stream.append(current_tool_call)
current_tool_call = None
tool_call_buffer = ""
@ -776,7 +781,9 @@ class LiteLLM:
if verbose:
logger.info(f"Complete tool call for {tool_calls_in_stream[tool_index]['name']} with args: {args_dict}")
except json.JSONDecodeError:
pass
# Continue accumulating - JSON might be incomplete
if verbose:
logger.debug(f"Incomplete JSON for {tool_calls_in_stream[tool_index].get('name', 'unknown')}: {tool_calls_in_stream[tool_index]['arguments'][:100]}...")
return full_text_response
@ -808,6 +815,7 @@ class LiteLLM:
print(f"{agent_name}: ", end="", flush=True)
# Process streaming chunks in real-time
try:
for chunk in stream:
if verbose:
logger.debug(f"Processing streaming chunk: {type(chunk)}")
@ -825,6 +833,11 @@ class LiteLLM:
openai_text = self._process_openai_chunk(chunk, tool_calls_in_stream, print_on, verbose)
if openai_text:
full_text_response += openai_text
except Exception as e:
logger.error(f"Error processing streaming chunks: {e}")
if print_on:
print(f"\n[Streaming Error: {e}]")
return full_text_response, tool_calls_in_stream
if print_on:
print() # Newline after streaming text

Loading…
Cancel
Save