refactor streaming response handling in Formatter class; implement lock for concurrent Live panel management and improve error handling

pull/938/head
harshalmore31 3 weeks ago
parent 638d81c338
commit ccddb17ccc

@ -1144,7 +1144,6 @@ class Agent:
self.tool_call_completed = True self.tool_call_completed = True
# Reset expecting_tool_call so subsequent requests can stream # Reset expecting_tool_call so subsequent requests can stream
self.expecting_tool_call = False self.expecting_tool_call = False
# Handle MCP tools # Handle MCP tools
if ( if (
exists(self.mcp_url) exists(self.mcp_url)
@ -2558,7 +2557,6 @@ class Agent:
try: try:
# Decide whether streaming should be used for this call # Decide whether streaming should be used for this call
streaming_enabled = self.streaming_on and not getattr(self, "expecting_tool_call", False) streaming_enabled = self.streaming_on and not getattr(self, "expecting_tool_call", False)
# Set streaming parameter in LLM if streaming is enabled for this call # Set streaming parameter in LLM if streaming is enabled for this call
if streaming_enabled and hasattr(self.llm, "stream"): if streaming_enabled and hasattr(self.llm, "stream"):
original_stream = self.llm.stream original_stream = self.llm.stream
@ -2573,7 +2571,7 @@ class Agent:
task=task, *args, **kwargs task=task, *args, **kwargs
) )
# If we get a streaming response, handle it with the streaming panel # If we get a streaming response, handle it with the new streaming panel
if hasattr( if hasattr(
streaming_response, "__iter__" streaming_response, "__iter__"
) and not isinstance(streaming_response, str): ) and not isinstance(streaming_response, str):
@ -3079,11 +3077,9 @@ class Agent:
Focus on the key information and insights that would be most relevant to the user's original request. Focus on the key information and insights that would be most relevant to the user's original request.
{self.run_task} {self.run_task}
If there are any errors or issues, highlight them prominently. If there are any errors or issues, highlight them prominently.
Tool Output: Tool Output:
{output} {output}
""" """
# Stream the tool summary only if the agent is configured for streaming # Stream the tool summary only if the agent is configured for streaming
if self.streaming_on and self.print_on: if self.streaming_on and self.print_on:
# Handle streaming response with streaming panel # Handle streaming response with streaming panel

@ -9,6 +9,12 @@ from rich.progress import Progress, SpinnerColumn, TextColumn
from rich.table import Table from rich.table import Table
from rich.text import Text from rich.text import Text
# Global lock to ensure only a single Rich Live context is active at any moment.
# Rich's Live render is **not** thread-safe; concurrent Live contexts on the same
# console raise runtime errors. Using a module-level lock serialises access and
# prevents crashes when multiple agents stream simultaneously in different
# threads (e.g., in ConcurrentWorkflow).
live_render_lock = threading.Lock()
def choose_random_color(): def choose_random_color():
import random import random
@ -209,6 +215,10 @@ class Formatter:
complete_response = "" complete_response = ""
chunks_collected = [] chunks_collected = []
# Acquire the lock so that only one Live panel is active at a time.
# Other threads will wait here until the current streaming completes,
# avoiding Rich.Live concurrency errors.
with live_render_lock:
# TRUE streaming with Rich's automatic text wrapping # TRUE streaming with Rich's automatic text wrapping
with Live( with Live(
create_streaming_panel(streaming_text), create_streaming_panel(streaming_text),

Loading…
Cancel
Save