Merge pull request #611 from sambhavnoobcoder/Longterm-Memory-Access-Issue

Ensure tool execution regardless of long-term memory usage
pull/617/head
Kye Gomez 3 months ago committed by GitHub
commit 46c206c817
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -799,51 +799,61 @@ class Agent:
while attempt < self.retry_attempts and not success:
try:
if self.long_term_memory is not None:
logger.info(
"Querying long term memory..."
)
logger.info("Querying long term memory...")
self.memory_query(task_prompt)
# Generate response using LLM
response_args = (
(task_prompt, *args) if img is None else (task_prompt, img, *args)
)
response = self.call_llm(*response_args, **kwargs)
# Check if response is a dictionary and has 'choices' key
if isinstance(response, dict) and 'choices' in response:
response = response['choices'][0]['message']['content']
elif isinstance(response, str):
# If response is already a string, use it as is
pass
else:
response_args = (
(task_prompt, *args)
if img is None
else (task_prompt, img, *args)
)
response = self.call_llm(
*response_args, **kwargs
)
# Log the step metadata
logged = self.log_step_metadata(
loop_count, task_prompt, response
)
logger.info(logged)
# Conver to a str if the response is not a str
response = self.llm_output_parser(
response
raise ValueError(
f"Unexpected response format: {type(response)}"
)
# Print
if self.streaming_on is True:
self.stream_response(response)
else:
print(response)
# Add the response to the memory
self.short_memory.add(
role=self.agent_name, content=response
)
# Add to all responses
all_responses.append(response)
# TODO: Implement reliablity check
# Check and execute tools
if self.tools is not None:
print(f"self.tools is not None: {response}")
self.parse_and_execute_tools(response)
# Log the step metadata
logged = self.log_step_metadata(
loop_count,
task_prompt,
response
)
logger.info(logged)
# Convert to a str if the response is not a str
response = self.llm_output_parser(response)
# Print
if self.streaming_on is True:
self.stream_response(response)
else:
print(response)
# Add the response to the memory
self.short_memory.add(
role=self.agent_name,
content=response
)
# Add to all responses
all_responses.append(response)
# TODO: Implement reliability check
if self.tools is not None:
# self.parse_function_call_and_execute(response)
self.parse_and_execute_tools(response)
# if self.code_interpreter is True:
# # Parse the code and execute
# logger.info("Parsing code and executing...")
@ -1846,20 +1856,21 @@ class Agent:
return response
def llm_output_parser(self, response: Any) -> str:
"""
Parses the response from the LLM (Low-Level Monitor) and returns it as a string.
Args:
response (Any): The response from the LLM.
Returns:
str: The parsed response as a string.
"""
if response is not str:
response = str(response)
return response
def llm_output_parser(self, response):
"""Parse the output from the LLM"""
try:
if isinstance(response, dict):
if 'choices' in response:
return response['choices'][0]['message']['content']
else:
return json.dumps(response) # Convert dict to string
elif isinstance(response, str):
return response
else:
return str(response) # Convert any other type to string
except Exception as e:
logger.error(f"Error parsing LLM output: {e}")
return str(response) # Return string representation as fallback
def log_step_metadata(
self, loop: int, task: str, response: str

Loading…
Cancel
Save