From 20c3d8a19c22843c1d0ff25a09919a571f9fadf6 Mon Sep 17 00:00:00 2001 From: harshalmore31 Date: Mon, 30 Jun 2025 22:03:32 +0530 Subject: [PATCH 1/4] Refactor streaming response formatting and printing logic for improved clarity and functionality --- swarms/structs/agent.py | 27 ++++++++++++++------------- swarms/utils/formatter.py | 14 +++++++++----- 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 87eb131d..72cfdfc0 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -2557,7 +2557,7 @@ class Agent: complete_response = formatter.print_streaming_panel( streaming_response, title=f"🤖 Agent: {self.agent_name} Loops: {current_loop}", - style="bold cyan", + style=None, # Use random color like non-streaming approach collect_chunks=True, on_chunk_callback=on_chunk_received, ) @@ -2796,19 +2796,20 @@ class Agent: return self.role def pretty_print(self, response: str, loop_count: int): + if self.streaming_on is True: + # Skip printing here since real streaming is handled in call_llm + # This avoids double printing when streaming_on=True + return + if self.print_on is False: - if self.streaming_on is True: - # Skip printing here since real streaming is handled in call_llm - # This avoids double printing when streaming_on=True - pass - elif self.no_print is True: - pass - else: - # logger.info(f"Response: {response}") - formatter.print_panel( - f"{self.agent_name}: {response}", - f"Agent Name {self.agent_name} [Max Loops: {loop_count} ]", - ) + # Show raw text without formatting panels + print(f"\n{self.agent_name}: {response}\n") + else: + # Use formatted panels (default behavior when print_on=True) + formatter.print_panel( + f"{self.agent_name}: {response}", + f"Agent Name {self.agent_name} [Max Loops: {loop_count} ]", + ) def parse_llm_output(self, response: Any): """Parse and standardize the output from the LLM. diff --git a/swarms/utils/formatter.py b/swarms/utils/formatter.py index 5a4b8c2e..e10afc7f 100644 --- a/swarms/utils/formatter.py +++ b/swarms/utils/formatter.py @@ -156,7 +156,7 @@ class Formatter: self, streaming_response, title: str = "🤖 Agent Streaming Response", - style: str = choose_random_color(), + style: str = None, collect_chunks: bool = False, on_chunk_callback: Optional[Callable] = None, ) -> str: @@ -167,13 +167,17 @@ class Formatter: Args: streaming_response: The streaming response generator from LiteLLM. title (str): Title of the panel. - style (str): Style for the panel border. + style (str): Style for the panel border (if None, will use random color). collect_chunks (bool): Whether to collect individual chunks for conversation saving. on_chunk_callback (Optional[Callable]): Callback function to call for each chunk. Returns: str: The complete accumulated response text. """ + # Get random color similar to non-streaming approach + random_color = choose_random_color() + panel_style = f"bold {random_color}" if style is None else style + text_style = random_color def create_streaming_panel(text_obj, is_complete=False): """Create panel with proper text wrapping using Rich's built-in capabilities""" @@ -190,7 +194,7 @@ class Formatter: panel = Panel( display_text, title=panel_title, - border_style=style, + border_style=panel_style, padding=(1, 2), width=self.console.size.width, # Rich handles wrapping automatically ) @@ -214,9 +218,9 @@ class Formatter: and part.choices and part.choices[0].delta.content ): - # Add ONLY the new chunk to the Text object + # Add ONLY the new chunk to the Text object with random color style chunk = part.choices[0].delta.content - streaming_text.append(chunk, style="white") + streaming_text.append(chunk, style=text_style) complete_response += chunk # Collect chunks if requested From 50a69950f260e86c7de106df66f34aeed5e72204 Mon Sep 17 00:00:00 2001 From: harshalmore31 Date: Mon, 30 Jun 2025 23:28:05 +0530 Subject: [PATCH 2/4] Refactor agent streaming behavior and update formatter panel title color --- stream_example.py | 10 +++++----- swarms/structs/agent.py | 15 +++------------ swarms/utils/formatter.py | 2 +- 3 files changed, 9 insertions(+), 18 deletions(-) diff --git a/stream_example.py b/stream_example.py index 3978fcaa..22e21d9f 100644 --- a/stream_example.py +++ b/stream_example.py @@ -3,10 +3,10 @@ from swarms import Agent # Enable real-time streaming agent = Agent( agent_name="StoryAgent", - model_name="gpt-4o-mini", - streaming_on=True, # 🔥 This enables real streaming! - max_loops=1, - print_on=False, # By Default its False, raw streaming !! + model_name="gpt-4o-mini", # 🔥 This enables real streaming! + max_loops=4, + streaming_on=True, + print_on=True, output_type="all", ) @@ -14,4 +14,4 @@ agent = Agent( response = agent.run( "Tell me a detailed story about Humanity colonizing the stars" ) -print(response) +# print(response) diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 72cfdfc0..9fce6eea 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -2518,13 +2518,8 @@ class Agent: ) and not isinstance(streaming_response, str): # Check print_on parameter for different streaming behaviors if self.print_on is False: - # Show raw streaming text without formatting panels + # Silent streaming - no printing, just collect chunks chunks = [] - print( - f"\n{self.agent_name}: ", - end="", - flush=True, - ) for chunk in streaming_response: if ( hasattr(chunk, "choices") @@ -2533,11 +2528,7 @@ class Agent: content = chunk.choices[ 0 ].delta.content - print( - content, end="", flush=True - ) # Print raw streaming text chunks.append(content) - print() # New line after streaming completes complete_response = "".join(chunks) else: # Collect chunks for conversation saving @@ -2802,8 +2793,8 @@ class Agent: return if self.print_on is False: - # Show raw text without formatting panels - print(f"\n{self.agent_name}: {response}\n") + # Silent mode - no printing at all + return else: # Use formatted panels (default behavior when print_on=True) formatter.print_panel( diff --git a/swarms/utils/formatter.py b/swarms/utils/formatter.py index e10afc7f..b8f9d963 100644 --- a/swarms/utils/formatter.py +++ b/swarms/utils/formatter.py @@ -181,7 +181,7 @@ class Formatter: def create_streaming_panel(text_obj, is_complete=False): """Create panel with proper text wrapping using Rich's built-in capabilities""" - panel_title = f"[bold cyan]{title}[/bold cyan]" + panel_title = f"[white]{title}[/white]" if is_complete: panel_title += " [bold green]✅[/bold green]" From d0efa7ab966e6fff3664d4be9cd49a6edded86e1 Mon Sep 17 00:00:00 2001 From: harshalmore31 Date: Tue, 1 Jul 2025 00:02:14 +0530 Subject: [PATCH 3/4] Refactor agent initialization parameters for improved clarity and functionality --- stream_example.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/stream_example.py b/stream_example.py index 22e21d9f..a09a4260 100644 --- a/stream_example.py +++ b/stream_example.py @@ -3,15 +3,11 @@ from swarms import Agent # Enable real-time streaming agent = Agent( agent_name="StoryAgent", - model_name="gpt-4o-mini", # 🔥 This enables real streaming! - max_loops=4, - streaming_on=True, - print_on=True, - output_type="all", + model_name="gpt-4o-mini", + streaming_on=True, # 🔥 This enables real streaming! + max_loops=1, + print_on=True, # By Default its False, raw streaming !! ) # This will now stream in real-time with beautiful UI! -response = agent.run( - "Tell me a detailed story about Humanity colonizing the stars" -) -# print(response) +response = agent.run("Tell me a detailed story...") \ No newline at end of file From 3b37542f77b420b7300d2be457bad8ef4c457db9 Mon Sep 17 00:00:00 2001 From: harshalmore31 Date: Tue, 1 Jul 2025 00:03:56 +0530 Subject: [PATCH 4/4] Update agent parameters for improved streaming functionality and response clarity --- stream_example.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/stream_example.py b/stream_example.py index a09a4260..82ff97b6 100644 --- a/stream_example.py +++ b/stream_example.py @@ -3,11 +3,15 @@ from swarms import Agent # Enable real-time streaming agent = Agent( agent_name="StoryAgent", - model_name="gpt-4o-mini", + model_name="gpt-4o-mini", streaming_on=True, # 🔥 This enables real streaming! max_loops=1, - print_on=True, # By Default its False, raw streaming !! + print_on=False, + output_type="all", ) # This will now stream in real-time with beautiful UI! -response = agent.run("Tell me a detailed story...") \ No newline at end of file +response = agent.run( + "Tell me a detailed story about Humanity colonizing the stars" +) +print(response) \ No newline at end of file