@ -1,6 +1,7 @@
|
||||
# from swarms import Swarms, swarm
|
||||
from swarms.swarms import Swarms, swarm
|
||||
from swarms.agents import worker_node
|
||||
from swarms.agents.workers.worker_ultra_node import WorkerUltraNode, WorkerUltra
|
||||
from swarms.agents.workers.worker_agent_ultra import worker_ultra_node
|
||||
from swarms.agents.workers.WorkerNode import WorkerNode, worker_node
|
||||
from swarms.workers import worker_node
|
||||
from swarms.workers.worker_ultra_node import WorkerUltraNode, WorkerUltra
|
||||
from swarms.workers.worker_agent_ultra import worker_ultra_node
|
||||
from swarms.workers.WorkerNode import WorkerNode, worker_node
|
||||
from swarms.boss.boss_node import BossNode
|
@ -0,0 +1,340 @@
|
||||
"""Chain that just formats a prompt and calls an LLM."""
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
|
||||
|
||||
from pydantic import Extra, Field
|
||||
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManager,
|
||||
AsyncCallbackManagerForChainRun,
|
||||
CallbackManager,
|
||||
CallbackManagerForChainRun,
|
||||
Callbacks,
|
||||
)
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.input import get_colored_text
|
||||
from langchain.load.dump import dumpd
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
from langchain.schema import (
|
||||
BaseLLMOutputParser,
|
||||
BasePromptTemplate,
|
||||
LLMResult,
|
||||
NoOpOutputParser,
|
||||
PromptValue,
|
||||
)
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
|
||||
|
||||
class LLMChain(Chain):
|
||||
"""Chain to run queries against LLMs.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain import LLMChain, OpenAI, PromptTemplate
|
||||
prompt_template = "Tell me a {adjective} joke"
|
||||
prompt = PromptTemplate(
|
||||
input_variables=["adjective"], template=prompt_template
|
||||
)
|
||||
llm = LLMChain(llm=OpenAI(), prompt=prompt)
|
||||
"""
|
||||
|
||||
@property
|
||||
def lc_serializable(self) -> bool:
|
||||
return True
|
||||
|
||||
prompt: BasePromptTemplate
|
||||
"""Prompt object to use."""
|
||||
llm: BaseLanguageModel
|
||||
"""Language model to call."""
|
||||
output_key: str = "text" #: :meta private:
|
||||
output_parser: BaseLLMOutputParser = Field(default_factory=NoOpOutputParser)
|
||||
"""Output parser to use.
|
||||
Defaults to one that takes the most likely string but does not change it
|
||||
otherwise."""
|
||||
return_final_only: bool = True
|
||||
"""Whether to return only the final parsed result. Defaults to True.
|
||||
If false, will return a bunch of extra information about the generation."""
|
||||
llm_kwargs: dict = Field(default_factory=dict)
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Will be whatever keys the prompt expects.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return self.prompt.input_variables
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
"""Will always return text key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
if self.return_final_only:
|
||||
return [self.output_key]
|
||||
else:
|
||||
return [self.output_key, "full_generation"]
|
||||
|
||||
def _call(
|
||||
self,
|
||||
inputs: Dict[str, Any],
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> Dict[str, str]:
|
||||
response = self.generate([inputs], run_manager=run_manager)
|
||||
return self.create_outputs(response)[0]
|
||||
|
||||
def generate(
|
||||
self,
|
||||
input_list: List[Dict[str, Any]],
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> LLMResult:
|
||||
"""Generate LLM result from inputs."""
|
||||
prompts, stop = self.prep_prompts(input_list, run_manager=run_manager)
|
||||
return self.llm.generate_prompt(
|
||||
prompts,
|
||||
stop,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**self.llm_kwargs,
|
||||
)
|
||||
|
||||
async def agenerate(
|
||||
self,
|
||||
input_list: List[Dict[str, Any]],
|
||||
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
||||
) -> LLMResult:
|
||||
"""Generate LLM result from inputs."""
|
||||
prompts, stop = await self.aprep_prompts(input_list, run_manager=run_manager)
|
||||
return await self.llm.agenerate_prompt(
|
||||
prompts,
|
||||
stop,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**self.llm_kwargs,
|
||||
)
|
||||
|
||||
def prep_prompts(
|
||||
self,
|
||||
input_list: List[Dict[str, Any]],
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> Tuple[List[PromptValue], Optional[List[str]]]:
|
||||
"""Prepare prompts from inputs."""
|
||||
stop = None
|
||||
if "stop" in input_list[0]:
|
||||
stop = input_list[0]["stop"]
|
||||
prompts = []
|
||||
for inputs in input_list:
|
||||
selected_inputs = {k: inputs[k] for k in self.prompt.input_variables}
|
||||
prompt = self.prompt.format_prompt(**selected_inputs)
|
||||
_colored_text = get_colored_text(prompt.to_string(), "green")
|
||||
_text = "Prompt after formatting:\n" + _colored_text
|
||||
if run_manager:
|
||||
run_manager.on_text(_text, end="\n", verbose=self.verbose)
|
||||
if "stop" in inputs and inputs["stop"] != stop:
|
||||
raise ValueError(
|
||||
"If `stop` is present in any inputs, should be present in all."
|
||||
)
|
||||
prompts.append(prompt)
|
||||
return prompts, stop
|
||||
|
||||
async def aprep_prompts(
|
||||
self,
|
||||
input_list: List[Dict[str, Any]],
|
||||
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
||||
) -> Tuple[List[PromptValue], Optional[List[str]]]:
|
||||
"""Prepare prompts from inputs."""
|
||||
stop = None
|
||||
if "stop" in input_list[0]:
|
||||
stop = input_list[0]["stop"]
|
||||
prompts = []
|
||||
for inputs in input_list:
|
||||
selected_inputs = {k: inputs[k] for k in self.prompt.input_variables}
|
||||
prompt = self.prompt.format_prompt(**selected_inputs)
|
||||
_colored_text = get_colored_text(prompt.to_string(), "green")
|
||||
_text = "Prompt after formatting:\n" + _colored_text
|
||||
if run_manager:
|
||||
await run_manager.on_text(_text, end="\n", verbose=self.verbose)
|
||||
if "stop" in inputs and inputs["stop"] != stop:
|
||||
raise ValueError(
|
||||
"If `stop` is present in any inputs, should be present in all."
|
||||
)
|
||||
prompts.append(prompt)
|
||||
return prompts, stop
|
||||
|
||||
def apply(
|
||||
self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
|
||||
) -> List[Dict[str, str]]:
|
||||
"""Utilize the LLM generate method for speed gains."""
|
||||
callback_manager = CallbackManager.configure(
|
||||
callbacks, self.callbacks, self.verbose
|
||||
)
|
||||
run_manager = callback_manager.on_chain_start(
|
||||
dumpd(self),
|
||||
{"input_list": input_list},
|
||||
)
|
||||
try:
|
||||
response = self.generate(input_list, run_manager=run_manager)
|
||||
except (KeyboardInterrupt, Exception) as e:
|
||||
run_manager.on_chain_error(e)
|
||||
raise e
|
||||
outputs = self.create_outputs(response)
|
||||
run_manager.on_chain_end({"outputs": outputs})
|
||||
return outputs
|
||||
|
||||
async def aapply(
|
||||
self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
|
||||
) -> List[Dict[str, str]]:
|
||||
"""Utilize the LLM generate method for speed gains."""
|
||||
callback_manager = AsyncCallbackManager.configure(
|
||||
callbacks, self.callbacks, self.verbose
|
||||
)
|
||||
run_manager = await callback_manager.on_chain_start(
|
||||
dumpd(self),
|
||||
{"input_list": input_list},
|
||||
)
|
||||
try:
|
||||
response = await self.agenerate(input_list, run_manager=run_manager)
|
||||
except (KeyboardInterrupt, Exception) as e:
|
||||
await run_manager.on_chain_error(e)
|
||||
raise e
|
||||
outputs = self.create_outputs(response)
|
||||
await run_manager.on_chain_end({"outputs": outputs})
|
||||
return outputs
|
||||
|
||||
@property
|
||||
def _run_output_key(self) -> str:
|
||||
return self.output_key
|
||||
|
||||
def create_outputs(self, llm_result: LLMResult) -> List[Dict[str, Any]]:
|
||||
"""Create outputs from response."""
|
||||
result = [
|
||||
# Get the text of the top generated string.
|
||||
{
|
||||
self.output_key: self.output_parser.parse_result(generation),
|
||||
"full_generation": generation,
|
||||
}
|
||||
for generation in llm_result.generations
|
||||
]
|
||||
if self.return_final_only:
|
||||
result = [{self.output_key: r[self.output_key]} for r in result]
|
||||
return result
|
||||
|
||||
async def _acall(
|
||||
self,
|
||||
inputs: Dict[str, Any],
|
||||
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
||||
) -> Dict[str, str]:
|
||||
response = await self.agenerate([inputs], run_manager=run_manager)
|
||||
return self.create_outputs(response)[0]
|
||||
|
||||
def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:
|
||||
"""Format prompt with kwargs and pass to LLM.
|
||||
|
||||
Args:
|
||||
callbacks: Callbacks to pass to LLMChain
|
||||
**kwargs: Keys to pass to prompt template.
|
||||
|
||||
Returns:
|
||||
Completion from LLM.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
completion = llm.predict(adjective="funny")
|
||||
"""
|
||||
return self(kwargs, callbacks=callbacks)[self.output_key]
|
||||
|
||||
async def apredict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:
|
||||
"""Format prompt with kwargs and pass to LLM.
|
||||
|
||||
Args:
|
||||
callbacks: Callbacks to pass to LLMChain
|
||||
**kwargs: Keys to pass to prompt template.
|
||||
|
||||
Returns:
|
||||
Completion from LLM.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
completion = llm.predict(adjective="funny")
|
||||
"""
|
||||
return (await self.acall(kwargs, callbacks=callbacks))[self.output_key]
|
||||
|
||||
def predict_and_parse(
|
||||
self, callbacks: Callbacks = None, **kwargs: Any
|
||||
) -> Union[str, List[str], Dict[str, Any]]:
|
||||
"""Call predict and then parse the results."""
|
||||
warnings.warn(
|
||||
"The predict_and_parse method is deprecated, "
|
||||
"instead pass an output parser directly to LLMChain."
|
||||
)
|
||||
result = self.predict(callbacks=callbacks, **kwargs)
|
||||
if self.prompt.output_parser is not None:
|
||||
return self.prompt.output_parser.parse(result)
|
||||
else:
|
||||
return result
|
||||
|
||||
async def apredict_and_parse(
|
||||
self, callbacks: Callbacks = None, **kwargs: Any
|
||||
) -> Union[str, List[str], Dict[str, str]]:
|
||||
"""Call apredict and then parse the results."""
|
||||
warnings.warn(
|
||||
"The apredict_and_parse method is deprecated, "
|
||||
"instead pass an output parser directly to LLMChain."
|
||||
)
|
||||
result = await self.apredict(callbacks=callbacks, **kwargs)
|
||||
if self.prompt.output_parser is not None:
|
||||
return self.prompt.output_parser.parse(result)
|
||||
else:
|
||||
return result
|
||||
|
||||
def apply_and_parse(
|
||||
self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
|
||||
) -> Sequence[Union[str, List[str], Dict[str, str]]]:
|
||||
"""Call apply and then parse the results."""
|
||||
warnings.warn(
|
||||
"The apply_and_parse method is deprecated, "
|
||||
"instead pass an output parser directly to LLMChain."
|
||||
)
|
||||
result = self.apply(input_list, callbacks=callbacks)
|
||||
return self._parse_generation(result)
|
||||
|
||||
def _parse_generation(
|
||||
self, generation: List[Dict[str, str]]
|
||||
) -> Sequence[Union[str, List[str], Dict[str, str]]]:
|
||||
if self.prompt.output_parser is not None:
|
||||
return [
|
||||
self.prompt.output_parser.parse(res[self.output_key])
|
||||
for res in generation
|
||||
]
|
||||
else:
|
||||
return generation
|
||||
|
||||
async def aapply_and_parse(
|
||||
self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
|
||||
) -> Sequence[Union[str, List[str], Dict[str, str]]]:
|
||||
"""Call apply and then parse the results."""
|
||||
warnings.warn(
|
||||
"The aapply_and_parse method is deprecated, "
|
||||
"instead pass an output parser directly to LLMChain."
|
||||
)
|
||||
result = await self.aapply(input_list, callbacks=callbacks)
|
||||
return self._parse_generation(result)
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "llm_chain"
|
||||
|
||||
@classmethod
|
||||
def from_string(cls, llm: BaseLanguageModel, template: str) -> LLMChain:
|
||||
"""Create LLMChain from LLM and template."""
|
||||
prompt_template = PromptTemplate.from_template(template)
|
||||
return cls(llm=llm, prompt=prompt_template)
|
@ -1,99 +0,0 @@
|
||||
from langchain import OpenAI, LLMChain, PromptTemplate
|
||||
from langchain.memory import ConversationBufferWindowMemory
|
||||
|
||||
def initialize_chain(instructions, memory=None):
|
||||
if memory is None:
|
||||
memory = ConversationBufferWindowMemory()
|
||||
memory.ai_prefix = "Assistant"
|
||||
|
||||
template = f"""
|
||||
Instructions: {instructions}
|
||||
{{{memory.memory_key}}}
|
||||
Human: {{human_input}}
|
||||
Assistant:"""
|
||||
|
||||
prompt = PromptTemplate(
|
||||
input_variables=["history", "human_input"], template=template
|
||||
)
|
||||
|
||||
chain = LLMChain(
|
||||
llm=OpenAI(temperature=0),
|
||||
prompt=prompt,
|
||||
verbose=True,
|
||||
memory=ConversationBufferWindowMemory(),
|
||||
)
|
||||
return chain
|
||||
|
||||
|
||||
def initialize_meta_chain():
|
||||
meta_template = """
|
||||
Assistant has just had the below interactions with a User. Assistant followed their "Instructions" closely. Your job is to critique the Assistant's performance and then revise the Instructions so that Assistant would quickly and correctly respond in the future.
|
||||
|
||||
####
|
||||
|
||||
{chat_history}
|
||||
|
||||
####
|
||||
|
||||
Please reflect on these interactions.
|
||||
|
||||
You should first critique Assistant's performance. What could Assistant have done better? What should the Assistant remember about this user? Are there things this user always wants? Indicate this with "Critique: ...".
|
||||
|
||||
You should next revise the Instructions so that Assistant would quickly and correctly respond in the future. Assistant's goal is to satisfy the user in as few interactions as possible. Assistant will only see the new Instructions, not the interaction history, so anything important must be summarized in the Instructions. Don't forget any important details in the current Instructions! Indicate the new Instructions by "Instructions: ...".
|
||||
"""
|
||||
|
||||
meta_prompt = PromptTemplate(
|
||||
input_variables=["chat_history"], template=meta_template
|
||||
)
|
||||
|
||||
meta_chain = LLMChain(
|
||||
llm=OpenAI(temperature=0),
|
||||
prompt=meta_prompt,
|
||||
verbose=True,
|
||||
)
|
||||
return meta_chain
|
||||
|
||||
|
||||
def get_chat_history(chain_memory):
|
||||
memory_key = chain_memory.memory_key
|
||||
chat_history = chain_memory.load_memory_variables(memory_key)[memory_key]
|
||||
return chat_history
|
||||
|
||||
|
||||
def get_new_instructions(meta_output):
|
||||
delimiter = "Instructions: "
|
||||
new_instructions = meta_output[meta_output.find(delimiter) + len(delimiter) :]
|
||||
return new_instructions
|
||||
|
||||
def meta_agent(task, max_iters=3, max_meta_iters=5):
|
||||
failed_phrase = "task failed"
|
||||
success_phrase = "task succeeded"
|
||||
key_phrases = [success_phrase, failed_phrase]
|
||||
|
||||
instructions = "None"
|
||||
for i in range(max_meta_iters):
|
||||
print(f"[Episode {i+1}/{max_meta_iters}]")
|
||||
chain = initialize_chain(instructions, memory=None)
|
||||
output = chain.predict(human_input=task)
|
||||
for j in range(max_iters):
|
||||
print(f"(Step {j+1}/{max_iters})")
|
||||
print(f"Assistant: {output}")
|
||||
print(f"Human: ")
|
||||
human_input = input()
|
||||
if any(phrase in human_input.lower() for phrase in key_phrases):
|
||||
break
|
||||
output = chain.predict(human_input=human_input)
|
||||
if success_phrase in human_input.lower():
|
||||
print(f"You succeeded! Thanks for playing!")
|
||||
return
|
||||
meta_chain = initialize_meta_chain()
|
||||
meta_output = meta_chain.predict(chat_history=get_chat_history(chain.memory))
|
||||
print(f"Feedback: {meta_output}")
|
||||
instructions = get_new_instructions(meta_output)
|
||||
print(f"New Instructions: {instructions}")
|
||||
print("\n" + "#" * 80 + "\n")
|
||||
print(f"You failed! Thanks for playing!")
|
||||
|
||||
|
||||
task = "Provide a systematic argument for why we should always eat pasta with olives."
|
||||
meta_agent(task)
|
@ -1 +0,0 @@
|
||||
from swarms.agents.workers.multi_modal_workers.omni_agent.omni_chat import chat_huggingface
|
@ -0,0 +1,268 @@
|
||||
import re
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.experimental.generative_agents.memory import GenerativeAgentMemory
|
||||
|
||||
from swarms.utils.schema.base import BaseLanguageModel
|
||||
|
||||
# TODO: Add ability to integrate with tools
|
||||
# TODO: Replace with f strings and all PromptTemplate
|
||||
|
||||
class WorkerCharacterAgent(BaseModel):
|
||||
"""A character with memory and innate characteristics."""
|
||||
|
||||
name: str
|
||||
"""The character's name."""
|
||||
|
||||
age: Optional[int] = None
|
||||
"""The optional age of the character."""
|
||||
traits: str = "N/A"
|
||||
"""Permanent traits to ascribe to the character."""
|
||||
status: str
|
||||
"""The traits of the character you wish not to change."""
|
||||
memory: GenerativeAgentMemory
|
||||
"""The memory object that combines relevance, recency, and 'importance'."""
|
||||
llm: BaseLanguageModel
|
||||
"""The underlying language model."""
|
||||
verbose: bool = False
|
||||
summary: str = "" #: :meta private:
|
||||
"""Stateful self-summary generated via reflection on the character's memory."""
|
||||
|
||||
summary_refresh_seconds: int = 3600 #: :meta private:
|
||||
"""How frequently to re-generate the summary."""
|
||||
|
||||
last_refreshed: datetime = Field(default_factory=datetime.now) # : :meta private:
|
||||
"""The last time the character's summary was regenerated."""
|
||||
|
||||
daily_summaries: List[str] = Field(default_factory=list) # : :meta private:
|
||||
"""Summary of the events in the plan that the agent took."""
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
# LLM-related methods
|
||||
@staticmethod
|
||||
def _parse_list(text: str) -> List[str]:
|
||||
"""Parse a newline-separated string into a list of strings."""
|
||||
lines = re.split(r"\n", text.strip())
|
||||
return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines]
|
||||
|
||||
def chain(self, prompt: str) -> LLMChain:
|
||||
return LLMChain(
|
||||
llm=self.llm, prompt=prompt, verbose=self.verbose, memory=self.memory
|
||||
)
|
||||
|
||||
def _get_entity_from_observation(self, observation: str) -> str:
|
||||
# prompt = PromptTemplate.from_template(
|
||||
# "What is the observed entity in the following observation? {observation}"
|
||||
# + "\nEntity="
|
||||
# )
|
||||
# return self.chain(prompt).run(observation=observation).strip()
|
||||
prompt = self.chain(f"What is the observed entity in the following observation? {observation}\n Entity=").strip()
|
||||
return prompt
|
||||
|
||||
def _get_entity_action(self, observation: str, entity_name: str) -> str:
|
||||
# prompt = PromptTemplate.from_template(
|
||||
# "What is the {entity} doing in the following observation? {observation}"
|
||||
# + "\nThe {entity} is"
|
||||
# )
|
||||
# return (
|
||||
# self.chain(prompt).run(entity=entity_name, observation=observation).strip()
|
||||
# )
|
||||
return self.chain(f"What is the {entity_name} doing in the following observation {observation}\n The {entity_name} is?").strip()
|
||||
|
||||
|
||||
|
||||
# TODO: Replace with f strings and all PromptTemplate
|
||||
def summarize_related_memories(self, observation: str) -> str:
|
||||
"""Summarize memories that are most relevant to an observation."""
|
||||
prompt = f"""
|
||||
{q1}
|
||||
Context from Memory:
|
||||
{relevant_memories}
|
||||
Relevant Context:
|
||||
|
||||
"""
|
||||
prompt = PromptTemplate.from_template(
|
||||
"""
|
||||
{q1}?
|
||||
Context from memory:
|
||||
{relevant_memories}
|
||||
Relevant context:
|
||||
"""
|
||||
)
|
||||
entity_name = self._get_entity_from_observation(observation)
|
||||
entity_action = self._get_entity_action(observation, entity_name)
|
||||
q1 = f"What is the relationship between {self.name} and {entity_name}"
|
||||
q2 = f"{entity_name} is {entity_action}"
|
||||
return self.chain(prompt=prompt).run(q1=q1, queries=[q1, q2]).strip()
|
||||
|
||||
def _generate_reaction(
|
||||
self, observation: str, suffix: str, now: Optional[datetime] = None
|
||||
) -> str:
|
||||
"""React to a given observation or dialogue act."""
|
||||
prompt = PromptTemplate.from_template(
|
||||
"{agent_summary_description}"
|
||||
+ "\nIt is {current_time}."
|
||||
+ "\n{agent_name}'s status: {agent_status}"
|
||||
+ "\nSummary of relevant context from {agent_name}'s memory:"
|
||||
+ "\n{relevant_memories}"
|
||||
+ "\nMost recent observations: {most_recent_memories}"
|
||||
+ "\nObservation: {observation}"
|
||||
+ "\n\n"
|
||||
+ suffix
|
||||
)
|
||||
agent_summary_description = self.get_summary(now=now)
|
||||
relevant_memories_str = self.summarize_related_memories(observation)
|
||||
current_time_str = (
|
||||
datetime.now().strftime("%B %d, %Y, %I:%M %p")
|
||||
if now is None
|
||||
else now.strftime("%B %d, %Y, %I:%M %p")
|
||||
)
|
||||
kwargs: Dict[str, Any] = dict(
|
||||
agent_summary_description=agent_summary_description,
|
||||
current_time=current_time_str,
|
||||
relevant_memories=relevant_memories_str,
|
||||
agent_name=self.name,
|
||||
observation=observation,
|
||||
agent_status=self.status,
|
||||
)
|
||||
consumed_tokens = self.llm.get_num_tokens(
|
||||
prompt.format(most_recent_memories="", **kwargs)
|
||||
)
|
||||
kwargs[self.memory.most_recent_memories_token_key] = consumed_tokens
|
||||
return self.chain(prompt=prompt).run(**kwargs).strip()
|
||||
|
||||
def _clean_response(self, text: str) -> str:
|
||||
return re.sub(f"^{self.name} ", "", text.strip()).strip()
|
||||
|
||||
def generate_reaction(
|
||||
self, observation: str, now: Optional[datetime] = None
|
||||
) -> Tuple[bool, str]:
|
||||
"""React to a given observation."""
|
||||
call_to_action_template = (
|
||||
"Should {agent_name} react to the observation, and if so,"
|
||||
+ " what would be an appropriate reaction? Respond in one line."
|
||||
+ ' If the action is to engage in dialogue, write:\nSAY: "what to say"'
|
||||
+ "\notherwise, write:\nREACT: {agent_name}'s reaction (if anything)."
|
||||
+ "\nEither do nothing, react, or say something but not both.\n\n"
|
||||
)
|
||||
full_result = self._generate_reaction(
|
||||
observation, call_to_action_template, now=now
|
||||
)
|
||||
result = full_result.strip().split("\n")[0]
|
||||
# AAA
|
||||
self.memory.save_context(
|
||||
{},
|
||||
{
|
||||
self.memory.add_memory_key: f"{self.name} observed "
|
||||
f"{observation} and reacted by {result}",
|
||||
self.memory.now_key: now,
|
||||
},
|
||||
)
|
||||
if "REACT:" in result:
|
||||
reaction = self._clean_response(result.split("REACT:")[-1])
|
||||
return False, f"{self.name} {reaction}"
|
||||
if "SAY:" in result:
|
||||
said_value = self._clean_response(result.split("SAY:")[-1])
|
||||
return True, f"{self.name} said {said_value}"
|
||||
else:
|
||||
return False, result
|
||||
|
||||
def generate_dialogue_response(
|
||||
self, observation: str, now: Optional[datetime] = None
|
||||
) -> Tuple[bool, str]:
|
||||
"""React to a given observation."""
|
||||
call_to_action_template = (
|
||||
"What would {agent_name} say? To end the conversation, write:"
|
||||
' GOODBYE: "what to say". Otherwise to continue the conversation,'
|
||||
' write: SAY: "what to say next"\n\n'
|
||||
)
|
||||
full_result = self._generate_reaction(
|
||||
observation, call_to_action_template, now=now
|
||||
)
|
||||
result = full_result.strip().split("\n")[0]
|
||||
if "GOODBYE:" in result:
|
||||
farewell = self._clean_response(result.split("GOODBYE:")[-1])
|
||||
self.memory.save_context(
|
||||
{},
|
||||
{
|
||||
self.memory.add_memory_key: f"{self.name} observed "
|
||||
f"{observation} and said {farewell}",
|
||||
self.memory.now_key: now,
|
||||
},
|
||||
)
|
||||
return False, f"{self.name} said {farewell}"
|
||||
if "SAY:" in result:
|
||||
response_text = self._clean_response(result.split("SAY:")[-1])
|
||||
self.memory.save_context(
|
||||
{},
|
||||
{
|
||||
self.memory.add_memory_key: f"{self.name} observed "
|
||||
f"{observation} and said {response_text}",
|
||||
self.memory.now_key: now,
|
||||
},
|
||||
)
|
||||
return True, f"{self.name} said {response_text}"
|
||||
else:
|
||||
return False, result
|
||||
|
||||
######################################################
|
||||
# Agent stateful' summary methods. #
|
||||
# Each dialog or response prompt includes a header #
|
||||
# summarizing the agent's self-description. This is #
|
||||
# updated periodically through probing its memories #
|
||||
######################################################
|
||||
def _compute_agent_summary(self) -> str:
|
||||
""""""
|
||||
prompt = PromptTemplate.from_template(
|
||||
"How would you summarize {name}'s core characteristics given the"
|
||||
+ " following statements:\n"
|
||||
+ "{relevant_memories}"
|
||||
+ "Do not embellish."
|
||||
+ "\n\nSummary: "
|
||||
)
|
||||
# The agent seeks to think about their core characteristics.
|
||||
return (
|
||||
self.chain(prompt)
|
||||
.run(name=self.name, queries=[f"{self.name}'s core characteristics"])
|
||||
.strip()
|
||||
)
|
||||
|
||||
def get_summary(
|
||||
self, force_refresh: bool = False, now: Optional[datetime] = None
|
||||
) -> str:
|
||||
"""Return a descriptive summary of the agent."""
|
||||
current_time = datetime.now() if now is None else now
|
||||
since_refresh = (current_time - self.last_refreshed).seconds
|
||||
if (
|
||||
not self.summary
|
||||
or since_refresh >= self.summary_refresh_seconds
|
||||
or force_refresh
|
||||
):
|
||||
self.summary = self._compute_agent_summary()
|
||||
self.last_refreshed = current_time
|
||||
age = self.age if self.age is not None else "N/A"
|
||||
return (
|
||||
f"Name: {self.name} (age: {age})"
|
||||
+ f"\nInnate traits: {self.traits}"
|
||||
+ f"\n{self.summary}"
|
||||
)
|
||||
|
||||
def get_full_header(
|
||||
self, force_refresh: bool = False, now: Optional[datetime] = None
|
||||
) -> str:
|
||||
"""Return a full header of the agent's status, summary, and current time."""
|
||||
now = datetime.now() if now is None else now
|
||||
summary = self.get_summary(force_refresh=force_refresh, now=now)
|
||||
current_time_str = now.strftime("%B %d, %Y, %I:%M %p")
|
||||
return (
|
||||
f"{summary}\nIt is {current_time_str}.\n{self.name}'s status: {self.status}"
|
||||
)
|
Before Width: | Height: | Size: 256 KiB After Width: | Height: | Size: 256 KiB |
Before Width: | Height: | Size: 1.2 MiB After Width: | Height: | Size: 1.2 MiB |
Before Width: | Height: | Size: 1.1 MiB After Width: | Height: | Size: 1.1 MiB |
Before Width: | Height: | Size: 286 KiB After Width: | Height: | Size: 286 KiB |
Before Width: | Height: | Size: 555 KiB After Width: | Height: | Size: 555 KiB |
Before Width: | Height: | Size: 120 KiB After Width: | Height: | Size: 120 KiB |
Before Width: | Height: | Size: 373 KiB After Width: | Height: | Size: 373 KiB |
Before Width: | Height: | Size: 354 KiB After Width: | Height: | Size: 354 KiB |
Before Width: | Height: | Size: 2.8 MiB After Width: | Height: | Size: 2.8 MiB |
Before Width: | Height: | Size: 472 KiB After Width: | Height: | Size: 472 KiB |
Before Width: | Height: | Size: 456 KiB After Width: | Height: | Size: 456 KiB |
Before Width: | Height: | Size: 3.5 MiB After Width: | Height: | Size: 3.5 MiB |
Before Width: | Height: | Size: 130 KiB After Width: | Height: | Size: 130 KiB |
Before Width: | Height: | Size: 1.9 MiB After Width: | Height: | Size: 1.9 MiB |
Before Width: | Height: | Size: 568 KiB After Width: | Height: | Size: 568 KiB |
Before Width: | Height: | Size: 854 KiB After Width: | Height: | Size: 854 KiB |
Before Width: | Height: | Size: 1.2 MiB After Width: | Height: | Size: 1.2 MiB |
Before Width: | Height: | Size: 438 KiB After Width: | Height: | Size: 438 KiB |