|
|
@ -1144,7 +1144,6 @@ class Agent:
|
|
|
|
self.tool_call_completed = True
|
|
|
|
self.tool_call_completed = True
|
|
|
|
# Reset expecting_tool_call so subsequent requests can stream
|
|
|
|
# Reset expecting_tool_call so subsequent requests can stream
|
|
|
|
self.expecting_tool_call = False
|
|
|
|
self.expecting_tool_call = False
|
|
|
|
|
|
|
|
|
|
|
|
# Handle MCP tools
|
|
|
|
# Handle MCP tools
|
|
|
|
if (
|
|
|
|
if (
|
|
|
|
exists(self.mcp_url)
|
|
|
|
exists(self.mcp_url)
|
|
|
@ -2558,7 +2557,6 @@ class Agent:
|
|
|
|
try:
|
|
|
|
try:
|
|
|
|
# Decide whether streaming should be used for this call
|
|
|
|
# Decide whether streaming should be used for this call
|
|
|
|
streaming_enabled = self.streaming_on and not getattr(self, "expecting_tool_call", False)
|
|
|
|
streaming_enabled = self.streaming_on and not getattr(self, "expecting_tool_call", False)
|
|
|
|
|
|
|
|
|
|
|
|
# Set streaming parameter in LLM if streaming is enabled for this call
|
|
|
|
# Set streaming parameter in LLM if streaming is enabled for this call
|
|
|
|
if streaming_enabled and hasattr(self.llm, "stream"):
|
|
|
|
if streaming_enabled and hasattr(self.llm, "stream"):
|
|
|
|
original_stream = self.llm.stream
|
|
|
|
original_stream = self.llm.stream
|
|
|
@ -2573,7 +2571,7 @@ class Agent:
|
|
|
|
task=task, *args, **kwargs
|
|
|
|
task=task, *args, **kwargs
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
# If we get a streaming response, handle it with the streaming panel
|
|
|
|
# If we get a streaming response, handle it with the new streaming panel
|
|
|
|
if hasattr(
|
|
|
|
if hasattr(
|
|
|
|
streaming_response, "__iter__"
|
|
|
|
streaming_response, "__iter__"
|
|
|
|
) and not isinstance(streaming_response, str):
|
|
|
|
) and not isinstance(streaming_response, str):
|
|
|
@ -3079,11 +3077,9 @@ class Agent:
|
|
|
|
Focus on the key information and insights that would be most relevant to the user's original request.
|
|
|
|
Focus on the key information and insights that would be most relevant to the user's original request.
|
|
|
|
{self.run_task}
|
|
|
|
{self.run_task}
|
|
|
|
If there are any errors or issues, highlight them prominently.
|
|
|
|
If there are any errors or issues, highlight them prominently.
|
|
|
|
|
|
|
|
|
|
|
|
Tool Output:
|
|
|
|
Tool Output:
|
|
|
|
{output}
|
|
|
|
{output}
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
# Stream the tool summary only if the agent is configured for streaming
|
|
|
|
# Stream the tool summary only if the agent is configured for streaming
|
|
|
|
if self.streaming_on and self.print_on:
|
|
|
|
if self.streaming_on and self.print_on:
|
|
|
|
# Handle streaming response with streaming panel
|
|
|
|
# Handle streaming response with streaming panel
|
|
|
|