parent
d23a728ac2
commit
fdff36f715
@ -1,109 +0,0 @@
|
|||||||
import faiss
|
|
||||||
from langchain.chat_models import ChatOpenAI
|
|
||||||
from langchain.docstore import InMemoryDocstore
|
|
||||||
from langchain.embeddings import OpenAIEmbeddings
|
|
||||||
from langchain.vectorstores import FAISS
|
|
||||||
from langchain_experimental.autonomous_agents import AutoGPT
|
|
||||||
|
|
||||||
from swarms.tools.autogpt import (
|
|
||||||
DuckDuckGoSearchRun,
|
|
||||||
FileChatMessageHistory,
|
|
||||||
ReadFileTool,
|
|
||||||
WebpageQATool,
|
|
||||||
WriteFileTool,
|
|
||||||
process_csv,
|
|
||||||
# web_search,
|
|
||||||
query_website_tool,
|
|
||||||
)
|
|
||||||
from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator
|
|
||||||
|
|
||||||
ROOT_DIR = "./data/"
|
|
||||||
|
|
||||||
|
|
||||||
class AutoBot:
|
|
||||||
@log_decorator
|
|
||||||
@error_decorator
|
|
||||||
@timing_decorator
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
model_name="gpt-4",
|
|
||||||
openai_api_key=None,
|
|
||||||
ai_name="Autobot Swarm Worker",
|
|
||||||
ai_role="Worker in a swarm",
|
|
||||||
# embedding_size=None,
|
|
||||||
# k=None,
|
|
||||||
temperature=0.5
|
|
||||||
):
|
|
||||||
self.openai_api_key = openai_api_key
|
|
||||||
self.temperature = temperature
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.llm = ChatOpenAI(model_name=model_name,
|
|
||||||
openai_api_key=self.openai_api_key,
|
|
||||||
temperature=self.temperature)
|
|
||||||
except Exception as error:
|
|
||||||
raise RuntimeError(f"Error Initializing ChatOpenAI: {error}")
|
|
||||||
|
|
||||||
self.ai_name = ai_name
|
|
||||||
self.ai_role = ai_role
|
|
||||||
|
|
||||||
# self.embedding_size = embedding_size
|
|
||||||
# # self.k = k
|
|
||||||
|
|
||||||
self.setup_tools()
|
|
||||||
self.setup_memory()
|
|
||||||
self.setup_agent()
|
|
||||||
|
|
||||||
@log_decorator
|
|
||||||
@error_decorator
|
|
||||||
@timing_decorator
|
|
||||||
def setup_tools(self):
|
|
||||||
self.tools = [
|
|
||||||
WriteFileTool(root_dir=ROOT_DIR),
|
|
||||||
ReadFileTool(root_dir=ROOT_DIR),
|
|
||||||
process_csv,
|
|
||||||
query_website_tool,
|
|
||||||
]
|
|
||||||
|
|
||||||
def setup_memory(self):
|
|
||||||
try:
|
|
||||||
embeddings_model = OpenAIEmbeddings(openai_api_key=self.openai_api_key)
|
|
||||||
embedding_size = 1536
|
|
||||||
index = faiss.IndexFlatL2(embedding_size)
|
|
||||||
self.vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
|
|
||||||
except Exception as error:
|
|
||||||
raise RuntimeError(f"Error setting up memory perhaps try try tuning the embedding size: {error}")
|
|
||||||
|
|
||||||
|
|
||||||
def setup_agent(self):
|
|
||||||
try:
|
|
||||||
self.agent = AutoGPT.from_llm_and_tools(
|
|
||||||
ai_name=self.ai_name,
|
|
||||||
ai_role=self.ai_role,
|
|
||||||
tools=self.tools,
|
|
||||||
llm=self.llm,
|
|
||||||
memory=self.vectorstore.as_retriever(search_kwargs={"k": 8}),
|
|
||||||
)
|
|
||||||
|
|
||||||
except Exception as error:
|
|
||||||
raise RuntimeError(f"Error setting up agent: {error}")
|
|
||||||
|
|
||||||
@log_decorator
|
|
||||||
@error_decorator
|
|
||||||
@timing_decorator
|
|
||||||
def run(self, task):
|
|
||||||
try:
|
|
||||||
result = self.agent.run([task])
|
|
||||||
return result
|
|
||||||
except Exception as error:
|
|
||||||
raise RuntimeError(f"Error while running agent: {error}")
|
|
||||||
|
|
||||||
@log_decorator
|
|
||||||
@error_decorator
|
|
||||||
@timing_decorator
|
|
||||||
def __call__(self, task):
|
|
||||||
try:
|
|
||||||
results = self.agent.run([task])
|
|
||||||
return results
|
|
||||||
except Exception as error:
|
|
||||||
raise RuntimeError(f"Error while running agent: {error}")
|
|
@ -1,268 +0,0 @@
|
|||||||
import re
|
|
||||||
from datetime import datetime
|
|
||||||
from typing import Any, Dict, List, Optional, Tuple
|
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
|
|
||||||
from langchain.chains import LLMChain
|
|
||||||
from langchain.prompts import PromptTemplate
|
|
||||||
from langchain_experimental.generative_agents.memory import GenerativeAgentMemory
|
|
||||||
|
|
||||||
from swarms.utils.schema.base import BaseLanguageModel
|
|
||||||
|
|
||||||
# TODO: Add ability to integrate with tools
|
|
||||||
# TODO: Replace with f strings and all PromptTemplate
|
|
||||||
|
|
||||||
class WorkerCharacterAgent(BaseModel):
|
|
||||||
"""A character with memory and innate characteristics."""
|
|
||||||
|
|
||||||
name: str
|
|
||||||
"""The character's name."""
|
|
||||||
|
|
||||||
age: Optional[int] = None
|
|
||||||
"""The optional age of the character."""
|
|
||||||
traits: str = "N/A"
|
|
||||||
"""Permanent traits to ascribe to the character."""
|
|
||||||
status: str
|
|
||||||
"""The traits of the character you wish not to change."""
|
|
||||||
memory: GenerativeAgentMemory
|
|
||||||
"""The memory object that combines relevance, recency, and 'importance'."""
|
|
||||||
llm: BaseLanguageModel
|
|
||||||
"""The underlying language model."""
|
|
||||||
verbose: bool = False
|
|
||||||
summary: str = "" #: :meta private:
|
|
||||||
"""Stateful self-summary generated via reflection on the character's memory."""
|
|
||||||
|
|
||||||
summary_refresh_seconds: int = 3600 #: :meta private:
|
|
||||||
"""How frequently to re-generate the summary."""
|
|
||||||
|
|
||||||
last_refreshed: datetime = Field(default_factory=datetime.now) # : :meta private:
|
|
||||||
"""The last time the character's summary was regenerated."""
|
|
||||||
|
|
||||||
daily_summaries: List[str] = Field(default_factory=list) # : :meta private:
|
|
||||||
"""Summary of the events in the plan that the agent took."""
|
|
||||||
|
|
||||||
class Config:
|
|
||||||
"""Configuration for this pydantic object."""
|
|
||||||
|
|
||||||
arbitrary_types_allowed = True
|
|
||||||
|
|
||||||
# LLM-related methods
|
|
||||||
@staticmethod
|
|
||||||
def _parse_list(text: str) -> List[str]:
|
|
||||||
"""Parse a newline-separated string into a list of strings."""
|
|
||||||
lines = re.split(r"\n", text.strip())
|
|
||||||
return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines]
|
|
||||||
|
|
||||||
def chain(self, prompt: str) -> LLMChain:
|
|
||||||
return LLMChain(
|
|
||||||
llm=self.llm, prompt=prompt, verbose=self.verbose, memory=self.memory
|
|
||||||
)
|
|
||||||
|
|
||||||
def _get_entity_from_observation(self, observation: str) -> str:
|
|
||||||
# prompt = PromptTemplate.from_template(
|
|
||||||
# "What is the observed entity in the following observation? {observation}"
|
|
||||||
# + "\nEntity="
|
|
||||||
# )
|
|
||||||
# return self.chain(prompt).run(observation=observation).strip()
|
|
||||||
prompt = self.chain(f"What is the observed entity in the following observation? {observation}\n Entity=").strip()
|
|
||||||
return prompt
|
|
||||||
|
|
||||||
def _get_entity_action(self, observation: str, entity_name: str) -> str:
|
|
||||||
# prompt = PromptTemplate.from_template(
|
|
||||||
# "What is the {entity} doing in the following observation? {observation}"
|
|
||||||
# + "\nThe {entity} is"
|
|
||||||
# )
|
|
||||||
# return (
|
|
||||||
# self.chain(prompt).run(entity=entity_name, observation=observation).strip()
|
|
||||||
# )
|
|
||||||
return self.chain(f"What is the {entity_name} doing in the following observation {observation}\n The {entity_name} is?").strip()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: Replace with f strings and all PromptTemplate
|
|
||||||
def summarize_related_memories(self, observation: str) -> str:
|
|
||||||
"""Summarize memories that are most relevant to an observation."""
|
|
||||||
prompt = f"""
|
|
||||||
{q1}
|
|
||||||
Context from Memory:
|
|
||||||
{relevant_memories}
|
|
||||||
Relevant Context:
|
|
||||||
|
|
||||||
"""
|
|
||||||
prompt = PromptTemplate.from_template(
|
|
||||||
"""
|
|
||||||
{q1}?
|
|
||||||
Context from memory:
|
|
||||||
{relevant_memories}
|
|
||||||
Relevant context:
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
entity_name = self._get_entity_from_observation(observation)
|
|
||||||
entity_action = self._get_entity_action(observation, entity_name)
|
|
||||||
q1 = f"What is the relationship between {self.name} and {entity_name}"
|
|
||||||
q2 = f"{entity_name} is {entity_action}"
|
|
||||||
return self.chain(prompt=prompt).run(q1=q1, queries=[q1, q2]).strip()
|
|
||||||
|
|
||||||
def _generate_reaction(
|
|
||||||
self, observation: str, suffix: str, now: Optional[datetime] = None
|
|
||||||
) -> str:
|
|
||||||
"""React to a given observation or dialogue act."""
|
|
||||||
prompt = PromptTemplate.from_template(
|
|
||||||
"{agent_summary_description}"
|
|
||||||
+ "\nIt is {current_time}."
|
|
||||||
+ "\n{agent_name}'s status: {agent_status}"
|
|
||||||
+ "\nSummary of relevant context from {agent_name}'s memory:"
|
|
||||||
+ "\n{relevant_memories}"
|
|
||||||
+ "\nMost recent observations: {most_recent_memories}"
|
|
||||||
+ "\nObservation: {observation}"
|
|
||||||
+ "\n\n"
|
|
||||||
+ suffix
|
|
||||||
)
|
|
||||||
agent_summary_description = self.get_summary(now=now)
|
|
||||||
relevant_memories_str = self.summarize_related_memories(observation)
|
|
||||||
current_time_str = (
|
|
||||||
datetime.now().strftime("%B %d, %Y, %I:%M %p")
|
|
||||||
if now is None
|
|
||||||
else now.strftime("%B %d, %Y, %I:%M %p")
|
|
||||||
)
|
|
||||||
kwargs: Dict[str, Any] = dict(
|
|
||||||
agent_summary_description=agent_summary_description,
|
|
||||||
current_time=current_time_str,
|
|
||||||
relevant_memories=relevant_memories_str,
|
|
||||||
agent_name=self.name,
|
|
||||||
observation=observation,
|
|
||||||
agent_status=self.status,
|
|
||||||
)
|
|
||||||
consumed_tokens = self.llm.get_num_tokens(
|
|
||||||
prompt.format(most_recent_memories="", **kwargs)
|
|
||||||
)
|
|
||||||
kwargs[self.memory.most_recent_memories_token_key] = consumed_tokens
|
|
||||||
return self.chain(prompt=prompt).run(**kwargs).strip()
|
|
||||||
|
|
||||||
def _clean_response(self, text: str) -> str:
|
|
||||||
return re.sub(f"^{self.name} ", "", text.strip()).strip()
|
|
||||||
|
|
||||||
def generate_reaction(
|
|
||||||
self, observation: str, now: Optional[datetime] = None
|
|
||||||
) -> Tuple[bool, str]:
|
|
||||||
"""React to a given observation."""
|
|
||||||
call_to_action_template = (
|
|
||||||
"Should {agent_name} react to the observation, and if so,"
|
|
||||||
+ " what would be an appropriate reaction? Respond in one line."
|
|
||||||
+ ' If the action is to engage in dialogue, write:\nSAY: "what to say"'
|
|
||||||
+ "\notherwise, write:\nREACT: {agent_name}'s reaction (if anything)."
|
|
||||||
+ "\nEither do nothing, react, or say something but not both.\n\n"
|
|
||||||
)
|
|
||||||
full_result = self._generate_reaction(
|
|
||||||
observation, call_to_action_template, now=now
|
|
||||||
)
|
|
||||||
result = full_result.strip().split("\n")[0]
|
|
||||||
# AAA
|
|
||||||
self.memory.save_context(
|
|
||||||
{},
|
|
||||||
{
|
|
||||||
self.memory.add_memory_key: f"{self.name} observed "
|
|
||||||
f"{observation} and reacted by {result}",
|
|
||||||
self.memory.now_key: now,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
if "REACT:" in result:
|
|
||||||
reaction = self._clean_response(result.split("REACT:")[-1])
|
|
||||||
return False, f"{self.name} {reaction}"
|
|
||||||
if "SAY:" in result:
|
|
||||||
said_value = self._clean_response(result.split("SAY:")[-1])
|
|
||||||
return True, f"{self.name} said {said_value}"
|
|
||||||
else:
|
|
||||||
return False, result
|
|
||||||
|
|
||||||
def generate_dialogue_response(
|
|
||||||
self, observation: str, now: Optional[datetime] = None
|
|
||||||
) -> Tuple[bool, str]:
|
|
||||||
"""React to a given observation."""
|
|
||||||
call_to_action_template = (
|
|
||||||
"What would {agent_name} say? To end the conversation, write:"
|
|
||||||
' GOODBYE: "what to say". Otherwise to continue the conversation,'
|
|
||||||
' write: SAY: "what to say next"\n\n'
|
|
||||||
)
|
|
||||||
full_result = self._generate_reaction(
|
|
||||||
observation, call_to_action_template, now=now
|
|
||||||
)
|
|
||||||
result = full_result.strip().split("\n")[0]
|
|
||||||
if "GOODBYE:" in result:
|
|
||||||
farewell = self._clean_response(result.split("GOODBYE:")[-1])
|
|
||||||
self.memory.save_context(
|
|
||||||
{},
|
|
||||||
{
|
|
||||||
self.memory.add_memory_key: f"{self.name} observed "
|
|
||||||
f"{observation} and said {farewell}",
|
|
||||||
self.memory.now_key: now,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
return False, f"{self.name} said {farewell}"
|
|
||||||
if "SAY:" in result:
|
|
||||||
response_text = self._clean_response(result.split("SAY:")[-1])
|
|
||||||
self.memory.save_context(
|
|
||||||
{},
|
|
||||||
{
|
|
||||||
self.memory.add_memory_key: f"{self.name} observed "
|
|
||||||
f"{observation} and said {response_text}",
|
|
||||||
self.memory.now_key: now,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
return True, f"{self.name} said {response_text}"
|
|
||||||
else:
|
|
||||||
return False, result
|
|
||||||
|
|
||||||
######################################################
|
|
||||||
# Agent stateful' summary methods. #
|
|
||||||
# Each dialog or response prompt includes a header #
|
|
||||||
# summarizing the agent's self-description. This is #
|
|
||||||
# updated periodically through probing its memories #
|
|
||||||
######################################################
|
|
||||||
def _compute_agent_summary(self) -> str:
|
|
||||||
""""""
|
|
||||||
prompt = PromptTemplate.from_template(
|
|
||||||
"How would you summarize {name}'s core characteristics given the"
|
|
||||||
+ " following statements:\n"
|
|
||||||
+ "{relevant_memories}"
|
|
||||||
+ "Do not embellish."
|
|
||||||
+ "\n\nSummary: "
|
|
||||||
)
|
|
||||||
# The agent seeks to think about their core characteristics.
|
|
||||||
return (
|
|
||||||
self.chain(prompt)
|
|
||||||
.run(name=self.name, queries=[f"{self.name}'s core characteristics"])
|
|
||||||
.strip()
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_summary(
|
|
||||||
self, force_refresh: bool = False, now: Optional[datetime] = None
|
|
||||||
) -> str:
|
|
||||||
"""Return a descriptive summary of the agent."""
|
|
||||||
current_time = datetime.now() if now is None else now
|
|
||||||
since_refresh = (current_time - self.last_refreshed).seconds
|
|
||||||
if (
|
|
||||||
not self.summary
|
|
||||||
or since_refresh >= self.summary_refresh_seconds
|
|
||||||
or force_refresh
|
|
||||||
):
|
|
||||||
self.summary = self._compute_agent_summary()
|
|
||||||
self.last_refreshed = current_time
|
|
||||||
age = self.age if self.age is not None else "N/A"
|
|
||||||
return (
|
|
||||||
f"Name: {self.name} (age: {age})"
|
|
||||||
+ f"\nInnate traits: {self.traits}"
|
|
||||||
+ f"\n{self.summary}"
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_full_header(
|
|
||||||
self, force_refresh: bool = False, now: Optional[datetime] = None
|
|
||||||
) -> str:
|
|
||||||
"""Return a full header of the agent's status, summary, and current time."""
|
|
||||||
now = datetime.now() if now is None else now
|
|
||||||
summary = self.get_summary(force_refresh=force_refresh, now=now)
|
|
||||||
current_time_str = now.strftime("%B %d, %Y, %I:%M %p")
|
|
||||||
return (
|
|
||||||
f"{summary}\nIt is {current_time_str}.\n{self.name}'s status: {self.status}"
|
|
||||||
)
|
|
@ -1,304 +0,0 @@
|
|||||||
import logging
|
|
||||||
import re
|
|
||||||
from datetime import datetime
|
|
||||||
from typing import Any, Dict, List, Optional
|
|
||||||
|
|
||||||
from langchain import LLMChain
|
|
||||||
from langchain.base_language import BaseLanguageModel
|
|
||||||
|
|
||||||
############
|
|
||||||
from langchain.prompts import PromptTemplate
|
|
||||||
from langchain.retrievers import TimeWeightedVectorStoreRetriever
|
|
||||||
from langchain.schema import BaseMemory, Document
|
|
||||||
from langchain.utils import mock_now
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
#######################
|
|
||||||
|
|
||||||
|
|
||||||
class WorkerSims(BaseMemory):
|
|
||||||
llm: BaseLanguageModel
|
|
||||||
"""The core language model."""
|
|
||||||
|
|
||||||
memory_retriever: TimeWeightedVectorStoreRetriever
|
|
||||||
"""The retriever to fetch related memories."""
|
|
||||||
verbose: bool = False
|
|
||||||
|
|
||||||
reflection_threshold: Optional[float] = None
|
|
||||||
"""When aggregate_importance exceeds reflection_threshold, stop to reflect."""
|
|
||||||
|
|
||||||
current_plan: List[str] = []
|
|
||||||
"""The current plan of the agent."""
|
|
||||||
|
|
||||||
# A weight of 0.15 makes this less important than it
|
|
||||||
# would be otherwise, relative to salience and time
|
|
||||||
importance_weight: float = 0.15
|
|
||||||
"""How much weight to assign the memory importance."""
|
|
||||||
|
|
||||||
aggregate_importance: float = 0.0 # : :meta private:
|
|
||||||
"""Track the sum of the 'importance' of recent memories.
|
|
||||||
|
|
||||||
Triggers reflection when it reaches reflection_threshold."""
|
|
||||||
|
|
||||||
max_tokens_limit: int = 1200 # : :meta private:
|
|
||||||
# input keys
|
|
||||||
queries_key: str = "queries"
|
|
||||||
most_recent_memories_token_key: str = "recent_memories_token"
|
|
||||||
add_memory_key: str = "add_memory"
|
|
||||||
# output keys
|
|
||||||
relevant_memories_key: str = "relevant_memories"
|
|
||||||
relevant_memories_simple_key: str = "relevant_memories_simple"
|
|
||||||
most_recent_memories_key: str = "most_recent_memories"
|
|
||||||
now_key: str = "now"
|
|
||||||
reflecting: bool = False
|
|
||||||
|
|
||||||
def chain(self, prompt: PromptTemplate) -> LLMChain:
|
|
||||||
return LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _parse_list(text: str) -> List[str]:
|
|
||||||
"""Parse a newline-separated string into a list of strings."""
|
|
||||||
lines = re.split(r"\n", text.strip())
|
|
||||||
lines = [line for line in lines if line.strip()] # remove empty lines
|
|
||||||
return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines]
|
|
||||||
|
|
||||||
def _get_topics_of_reflection(self, last_k: int = 50) -> List[str]:
|
|
||||||
"""Return the 3 most salient high-level questions about recent observations."""
|
|
||||||
prompt = PromptTemplate.from_template(
|
|
||||||
"{observations}\n\n"
|
|
||||||
"Given only the information above, what are the 3 most salient "
|
|
||||||
"high-level questions we can answer about the subjects in the statements?\n"
|
|
||||||
"Provide each question on a new line."
|
|
||||||
)
|
|
||||||
observations = self.memory_retriever.memory_stream[-last_k:]
|
|
||||||
observation_str = "\n".join(
|
|
||||||
[self._format_memory_detail(o) for o in observations]
|
|
||||||
)
|
|
||||||
result = self.chain(prompt).run(observations=observation_str)
|
|
||||||
return self._parse_list(result)
|
|
||||||
|
|
||||||
def _get_insights_on_topic(
|
|
||||||
self, topic: str, now: Optional[datetime] = None
|
|
||||||
) -> List[str]:
|
|
||||||
"""Generate 'insights' on a topic of reflection, based on pertinent memories."""
|
|
||||||
prompt = PromptTemplate.from_template(
|
|
||||||
"Statements relevant to: '{topic}'\n"
|
|
||||||
"---\n"
|
|
||||||
"{related_statements}\n"
|
|
||||||
"---\n"
|
|
||||||
"What 5 high-level novel insights can you infer from the above statements "
|
|
||||||
"that are relevant for answering the following question?\n"
|
|
||||||
"Do not include any insights that are not relevant to the question.\n"
|
|
||||||
"Do not repeat any insights that have already been made.\n\n"
|
|
||||||
"Question: {topic}\n\n"
|
|
||||||
"(example format: insight (because of 1, 5, 3))\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
related_memories = self.fetch_memories(topic, now=now)
|
|
||||||
related_statements = "\n".join(
|
|
||||||
[
|
|
||||||
self._format_memory_detail(memory, prefix=f"{i+1}. ")
|
|
||||||
for i, memory in enumerate(related_memories)
|
|
||||||
]
|
|
||||||
)
|
|
||||||
result = self.chain(prompt).run(
|
|
||||||
topic=topic, related_statements=related_statements
|
|
||||||
)
|
|
||||||
# TODO: Parse the connections between memories and insights
|
|
||||||
return self._parse_list(result)
|
|
||||||
|
|
||||||
def pause_to_reflect(self, now: Optional[datetime] = None) -> List[str]:
|
|
||||||
"""Reflect on recent observations and generate 'insights'."""
|
|
||||||
if self.verbose:
|
|
||||||
logger.info("Character is reflecting")
|
|
||||||
new_insights = []
|
|
||||||
topics = self._get_topics_of_reflection()
|
|
||||||
for topic in topics:
|
|
||||||
insights = self._get_insights_on_topic(topic, now=now)
|
|
||||||
for insight in insights:
|
|
||||||
self.add_memory(insight, now=now)
|
|
||||||
new_insights.extend(insights)
|
|
||||||
return new_insights
|
|
||||||
|
|
||||||
def _score_memory_importance(self, memory_content: str) -> float:
|
|
||||||
"""Score the absolute importance of the given memory."""
|
|
||||||
prompt = PromptTemplate.from_template(
|
|
||||||
"On the scale of 1 to 10, where 1 is purely mundane"
|
|
||||||
+ " (e.g., brushing teeth, making bed) and 10 is"
|
|
||||||
+ " extremely poignant (e.g., a break up, college"
|
|
||||||
+ " acceptance), rate the likely poignancy of the"
|
|
||||||
+ " following piece of memory. Respond with a single integer."
|
|
||||||
+ "\nMemory: {memory_content}"
|
|
||||||
+ "\nRating: "
|
|
||||||
)
|
|
||||||
score = self.chain(prompt).run(memory_content=memory_content).strip()
|
|
||||||
if self.verbose:
|
|
||||||
logger.info(f"Importance score: {score}")
|
|
||||||
match = re.search(r"^\D*(\d+)", score)
|
|
||||||
if match:
|
|
||||||
return (float(match.group(1)) / 10) * self.importance_weight
|
|
||||||
else:
|
|
||||||
return 0.0
|
|
||||||
|
|
||||||
def _score_memories_importance(self, memory_content: str) -> List[float]:
|
|
||||||
"""Score the absolute importance of the given memory."""
|
|
||||||
prompt = PromptTemplate.from_template(
|
|
||||||
"On the scale of 1 to 10, where 1 is purely mundane"
|
|
||||||
+ " (e.g., brushing teeth, making bed) and 10 is"
|
|
||||||
+ " extremely poignant (e.g., a break up, college"
|
|
||||||
+ " acceptance), rate the likely poignancy of the"
|
|
||||||
+ " following piece of memory. Always answer with only a list of numbers."
|
|
||||||
+ " If just given one memory still respond in a list."
|
|
||||||
+ " Memories are separated by semi colans (;)"
|
|
||||||
+ "\Memories: {memory_content}"
|
|
||||||
+ "\nRating: "
|
|
||||||
)
|
|
||||||
scores = self.chain(prompt).run(memory_content=memory_content).strip()
|
|
||||||
|
|
||||||
if self.verbose:
|
|
||||||
logger.info(f"Importance scores: {scores}")
|
|
||||||
|
|
||||||
# Split into list of strings and convert to floats
|
|
||||||
scores_list = [float(x) for x in scores.split(";")]
|
|
||||||
|
|
||||||
return scores_list
|
|
||||||
|
|
||||||
def add_memories(
|
|
||||||
self, memory_content: str, now: Optional[datetime] = None
|
|
||||||
) -> List[str]:
|
|
||||||
"""Add an observations or memories to the agent's memory."""
|
|
||||||
importance_scores = self._score_memories_importance(memory_content)
|
|
||||||
|
|
||||||
self.aggregate_importance += max(importance_scores)
|
|
||||||
memory_list = memory_content.split(";")
|
|
||||||
documents = []
|
|
||||||
|
|
||||||
for i in range(len(memory_list)):
|
|
||||||
documents.append(
|
|
||||||
Document(
|
|
||||||
page_content=memory_list[i],
|
|
||||||
metadata={"importance": importance_scores[i]},
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
result = self.memory_retriever.add_documents(documents, current_time=now)
|
|
||||||
|
|
||||||
# After an agent has processed a certain amount of memories (as measured by
|
|
||||||
# aggregate importance), it is time to reflect on recent events to add
|
|
||||||
# more synthesized memories to the agent's memory stream.
|
|
||||||
if (
|
|
||||||
self.reflection_threshold is not None
|
|
||||||
and self.aggregate_importance > self.reflection_threshold
|
|
||||||
and not self.reflecting
|
|
||||||
):
|
|
||||||
self.reflecting = True
|
|
||||||
self.pause_to_reflect(now=now)
|
|
||||||
# Hack to clear the importance from reflection
|
|
||||||
self.aggregate_importance = 0.0
|
|
||||||
self.reflecting = False
|
|
||||||
return result
|
|
||||||
|
|
||||||
def add_memory(
|
|
||||||
self, memory_content: str, now: Optional[datetime] = None
|
|
||||||
) -> List[str]:
|
|
||||||
"""Add an observation or memory to the agent's memory."""
|
|
||||||
importance_score = self._score_memory_importance(memory_content)
|
|
||||||
self.aggregate_importance += importance_score
|
|
||||||
document = Document(
|
|
||||||
page_content=memory_content, metadata={"importance": importance_score}
|
|
||||||
)
|
|
||||||
result = self.memory_retriever.add_documents([document], current_time=now)
|
|
||||||
|
|
||||||
# After an agent has processed a certain amount of memories (as measured by
|
|
||||||
# aggregate importance), it is time to reflect on recent events to add
|
|
||||||
# more synthesized memories to the agent's memory stream.
|
|
||||||
if (
|
|
||||||
self.reflection_threshold is not None
|
|
||||||
and self.aggregate_importance > self.reflection_threshold
|
|
||||||
and not self.reflecting
|
|
||||||
):
|
|
||||||
self.reflecting = True
|
|
||||||
self.pause_to_reflect(now=now)
|
|
||||||
# Hack to clear the importance from reflection
|
|
||||||
self.aggregate_importance = 0.0
|
|
||||||
self.reflecting = False
|
|
||||||
return result
|
|
||||||
|
|
||||||
def fetch_memories(
|
|
||||||
self, observation: str, now: Optional[datetime] = None
|
|
||||||
) -> List[Document]:
|
|
||||||
"""Fetch related memories."""
|
|
||||||
if now is not None:
|
|
||||||
with mock_now(now):
|
|
||||||
return self.memory_retriever.get_relevant_documents(observation)
|
|
||||||
else:
|
|
||||||
return self.memory_retriever.get_relevant_documents(observation)
|
|
||||||
|
|
||||||
def format_memories_detail(self, relevant_memories: List[Document]) -> str:
|
|
||||||
content = []
|
|
||||||
for mem in relevant_memories:
|
|
||||||
content.append(self._format_memory_detail(mem, prefix="- "))
|
|
||||||
return "\n".join([f"{mem}" for mem in content])
|
|
||||||
|
|
||||||
def _format_memory_detail(self, memory: Document, prefix: str = "") -> str:
|
|
||||||
created_time = memory.metadata["created_at"].strftime("%B %d, %Y, %I:%M %p")
|
|
||||||
return f"{prefix}[{created_time}] {memory.page_content.strip()}"
|
|
||||||
|
|
||||||
def format_memories_simple(self, relevant_memories: List[Document]) -> str:
|
|
||||||
return "; ".join([f"{mem.page_content}" for mem in relevant_memories])
|
|
||||||
|
|
||||||
def _get_memories_until_limit(self, consumed_tokens: int) -> str:
|
|
||||||
"""Reduce the number of tokens in the documents."""
|
|
||||||
result = []
|
|
||||||
for doc in self.memory_retriever.memory_stream[::-1]:
|
|
||||||
if consumed_tokens >= self.max_tokens_limit:
|
|
||||||
break
|
|
||||||
consumed_tokens += self.llm.get_num_tokens(doc.page_content)
|
|
||||||
if consumed_tokens < self.max_tokens_limit:
|
|
||||||
result.append(doc)
|
|
||||||
return self.format_memories_simple(result)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def memory_variables(self) -> List[str]:
|
|
||||||
"""Input keys this memory class will load dynamically."""
|
|
||||||
return []
|
|
||||||
|
|
||||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
|
||||||
"""Return key-value pairs given the text input to the chain."""
|
|
||||||
queries = inputs.get(self.queries_key)
|
|
||||||
now = inputs.get(self.now_key)
|
|
||||||
if queries is not None:
|
|
||||||
relevant_memories = [
|
|
||||||
mem for query in queries for mem in self.fetch_memories(query, now=now)
|
|
||||||
]
|
|
||||||
return {
|
|
||||||
self.relevant_memories_key: self.format_memories_detail(
|
|
||||||
relevant_memories
|
|
||||||
),
|
|
||||||
self.relevant_memories_simple_key: self.format_memories_simple(
|
|
||||||
relevant_memories
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
most_recent_memories_token = inputs.get(self.most_recent_memories_token_key)
|
|
||||||
if most_recent_memories_token is not None:
|
|
||||||
return {
|
|
||||||
self.most_recent_memories_key: self._get_memories_until_limit(
|
|
||||||
most_recent_memories_token
|
|
||||||
)
|
|
||||||
}
|
|
||||||
return {}
|
|
||||||
|
|
||||||
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, Any]) -> None:
|
|
||||||
"""Save the context of this model run to memory."""
|
|
||||||
# TODO: fix the save memory key
|
|
||||||
mem = outputs.get(self.add_memory_key)
|
|
||||||
now = outputs.get(self.now_key)
|
|
||||||
if mem:
|
|
||||||
self.add_memory(mem, now=now)
|
|
||||||
|
|
||||||
def clear(self) -> None:
|
|
||||||
"""Clear memory contents."""
|
|
||||||
# TODO
|
|
Loading…
Reference in new issue