From 92a9eac8d834b8d4b74d360792aa7ca774cc6a50 Mon Sep 17 00:00:00 2001 From: harshalmore31 Date: Fri, 27 Jun 2025 03:43:33 +0530 Subject: [PATCH] Refactor streaming response handling to support print_on parameter for flexible output --- stream_example.py | 1 + swarms/structs/agent.py | 61 ++++++++++++++++++++++++----------------- 2 files changed, 37 insertions(+), 25 deletions(-) diff --git a/stream_example.py b/stream_example.py index 0c081700..a09a4260 100644 --- a/stream_example.py +++ b/stream_example.py @@ -6,6 +6,7 @@ agent = Agent( model_name="gpt-4o-mini", streaming_on=True, # 🔥 This enables real streaming! max_loops=1, + print_on=True, # By Default its False, raw streaming !! ) # This will now stream in real-time with beautiful UI! diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 9503770d..9c33ceea 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -2488,25 +2488,38 @@ class Agent: # If we get a streaming response, handle it with the new streaming panel if hasattr(streaming_response, '__iter__') and not isinstance(streaming_response, str): - # Collect chunks for conversation saving - collected_chunks = [] - - def on_chunk_received(chunk: str): - """Callback to collect chunks as they arrive""" - collected_chunks.append(chunk) - # Optional: Save each chunk to conversation in real-time - # This creates a more detailed conversation history - if self.verbose: - logger.debug(f"Streaming chunk received: {chunk[:50]}...") - - # Use the streaming panel to display and collect the response - complete_response = formatter.print_streaming_panel( - streaming_response, - title=f"🤖 {self.agent_name} Streaming Response", - style="bold cyan", - collect_chunks=True, - on_chunk_callback=on_chunk_received - ) + # Check print_on parameter for different streaming behaviors + if self.print_on is False: + # Show raw streaming text without formatting panels + chunks = [] + print(f"\n{self.agent_name}: ", end="", flush=True) + for chunk in streaming_response: + if hasattr(chunk, 'choices') and chunk.choices[0].delta.content: + content = chunk.choices[0].delta.content + print(content, end="", flush=True) # Print raw streaming text + chunks.append(content) + print() # New line after streaming completes + complete_response = ''.join(chunks) + else: + # Collect chunks for conversation saving + collected_chunks = [] + + def on_chunk_received(chunk: str): + """Callback to collect chunks as they arrive""" + collected_chunks.append(chunk) + # Optional: Save each chunk to conversation in real-time + # This creates a more detailed conversation history + if self.verbose: + logger.debug(f"Streaming chunk received: {chunk[:50]}...") + + # Use the streaming panel to display and collect the response + complete_response = formatter.print_streaming_panel( + streaming_response, + title=f"🤖 {self.agent_name} Streaming Response", + style="bold cyan", + collect_chunks=True, + on_chunk_callback=on_chunk_received + ) # Restore original stream setting self.llm.stream = original_stream @@ -2744,12 +2757,10 @@ class Agent: def pretty_print(self, response: str, loop_count: int): if self.print_on is False: if self.streaming_on is True: - # self.stream_response(response) - formatter.print_panel_token_by_token( - f"{self.agent_name}: {response}", - title=f"Agent Name: {self.agent_name} [Max Loops: {loop_count}]", - ) - elif self.print_on is True: + # Skip printing here since real streaming is handled in call_llm + # This avoids double printing when streaming_on=True + pass + elif self.no_print is True: pass else: # logger.info(f"Response: {response}")