proper implemenation of llm_output_parser

- implements the decision making , when 'choice' type output is received .
pull/611/head
Sambhav Dixit 3 months ago committed by GitHub
parent e06e898486
commit a299a5854e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -1858,20 +1858,21 @@ class Agent:
return response return response
def llm_output_parser(self, response: Any) -> str: def llm_output_parser(self, response):
""" """Parse the output from the LLM"""
Parses the response from the LLM (Low-Level Monitor) and returns it as a string. try:
if isinstance(response, dict):
Args: if 'choices' in response:
response (Any): The response from the LLM. return response['choices'][0]['message']['content']
else:
Returns: return json.dumps(response) # Convert dict to string
str: The parsed response as a string. elif isinstance(response, str):
""" return response
if response is not str: else:
response = str(response) return str(response) # Convert any other type to string
except Exception as e:
return response logger.error(f"Error parsing LLM output: {e}")
return str(response) # Return string representation as fallback
def log_step_metadata( def log_step_metadata(
self, loop: int, task: str, response: str self, loop: int, task: str, response: str

Loading…
Cancel
Save