pull/938/head
harshalmore31 2 months ago
parent f3ed5b9055
commit 616cbe3293

@ -1134,6 +1134,7 @@ class Agent:
success = False success = False
while attempt < self.retry_attempts and not success: while attempt < self.retry_attempts and not success:
try: try:
if img is not None: if img is not None:
response = self.call_llm( response = self.call_llm(
task=task_prompt, task=task_prompt,
@ -1203,8 +1204,12 @@ class Agent:
f"LLM returned None response in loop {loop_count}, skipping MCP tool handling" f"LLM returned None response in loop {loop_count}, skipping MCP tool handling"
) )
success = True # self.sentiment_and_evaluator(response)
success = True # Mark as successful to exit the retry loop
except Exception as e: except Exception as e:
if self.autosave is True: if self.autosave is True:
log_agent_data(self.to_dict()) log_agent_data(self.to_dict())
self.save() self.save()
@ -2226,11 +2231,11 @@ class Agent:
self, response: str, delay: float = 0.001 self, response: str, delay: float = 0.001
) -> None: ) -> None:
""" """
Streams the response token by token using centralized wrapper logic. Streams the response token by token.
Args: Args:
response (str): The response text to be streamed. response (str): The response text to be streamed.
delay (float, optional): Delay in seconds between printing each token. Default is 0.001 seconds. delay (float, optional): Delay in seconds between printing each token. Default is 0.1 seconds.
Raises: Raises:
ValueError: If the response is not provided. ValueError: If the response is not provided.

Loading…
Cancel
Save