[FEAT][agent_id][FEAT][dynamic_prompt_setup][FEAT][_history]

pull/207/head^2
Kye 1 year ago
parent 5489dc576a
commit a4c6f87a20

@ -5,6 +5,7 @@ import logging
import random import random
import re import re
import time import time
import uuid
from typing import Any, Callable, Dict, List, Optional, Tuple from typing import Any, Callable, Dict, List, Optional, Tuple
from termcolor import colored from termcolor import colored
@ -69,6 +70,11 @@ def parse_done_token(response: str) -> bool:
return "<DONE>" in response return "<DONE>" in response
def agent_id():
"""Generate an agent id"""
return str(uuid.uuid4())
class Agent: class Agent:
""" """
Agent is the structure that provides autonomy to any llm in a reliable and effective fashion. Agent is the structure that provides autonomy to any llm in a reliable and effective fashion.
@ -94,52 +100,47 @@ class Agent:
**kwargs (Any): Any additional keyword arguments **kwargs (Any): Any additional keyword arguments
Methods: Methods:
run: Run the autonomous agent loop run(task: str, **kwargs: Any): Run the agent on a task
run_concurrent: Run the autonomous agent loop concurrently run_concurrent(tasks: List[str], **kwargs: Any): Run the agent on a list of tasks concurrently
bulk_run: Run the autonomous agent loop in bulk bulk_run(inputs: List[Dict[str, Any]]): Run the agent on a list of inputs
save: Save the agent history to a file from_llm_and_template(llm: Any, template: str): Create AgentStream from LLM and a string template.
load: Load the agent history from a file from_llm_and_template_file(llm: Any, template_file: str): Create AgentStream from LLM and a template file.
validate_response: Validate the response based on certain criteria save(file_path): Save the agent history to a file
print_history_and_memory: Print the history and memory of the agent load(file_path): Load the agent history from a file
step: Execute a single step in the agent interaction validate_response(response: str): Validate the response based on certain criteria
graceful_shutdown: Gracefully shutdown the system saving the state print_history_and_memory(): Print the entire history and memory of the agent
run_with_timeout: Run the loop but stop if it takes longer than the timeout step(task: str, **kwargs): Executes a single step in the agent interaction, generating a response from the language model based on the given input text.
analyze_feedback: Analyze the feedback for issues graceful_shutdown(): Gracefully shutdown the system saving the state
undo_last: Response the last response and return the previous state run_with_timeout(task: str, timeout: int): Run the loop but stop if it takes longer than the timeout
add_response_filter: Add a response filter to filter out certain words from the response analyze_feedback(): Analyze the feedback for issues
apply_reponse_filters: Apply the response filters to the response undo_last(): Response the last response and return the previous state
filtered_run: Filter the response add_response_filter(filter_word: str): Add a response filter to filter out certain words from the response
interactive_run: Interactive run mode apply_reponse_filters(response: str): Apply the response filters to the response
streamed_generation: Stream the generation of the response filtered_run(task: str): Filtered run
get_llm_params: Extracts and returns the parameters of the llm object for serialization. interactive_run(max_loops: int): Interactive run mode
agent_history_prompt: Generate the agent history prompt streamed_generation(prompt: str): Stream the generation of the response
add_task_to_memory: Add the task to the memory get_llm_params(): Extracts and returns the parameters of the llm object for serialization.
add_message_to_memory: Add the message to the memory save_state(file_path: str): Saves the current state of the agent to a JSON file, including the llm parameters.
add_message_to_memory_and_truncate: Add the message to the memory and truncate load_state(file_path: str): Loads the state of the agent from a json file and restores the configuration and memory.
print_dashboard: Print dashboard retry_on_failure(function, retries: int = 3, retry_delay: int = 1): Retry wrapper for LLM calls.
activate_autonomous_agent: Print the autonomous agent activation message run_code(response: str): Run the code in the response
_check_stopping_condition: Check if the stopping condition is met construct_dynamic_prompt(): Construct the dynamic prompt
format_prompt: Format the prompt extract_tool_commands(text: str): Extract the tool commands from the text
get_llm_init_params: Get the llm init params parse_and_execute_tools(response: str): Parse and execute the tools
provide_feedback: Allow users to provide feedback on the responses execute_tools(tool_name, params): Execute the tool with the provided params
truncate_history: Take the history and truncate it to fit into the model context length truncate_history(): Take the history and truncate it to fit into the model context length
agent_history_prompt: Generate the agent history prompt add_task_to_memory(task: str): Add the task to the memory
extract_tool_commands: Extract the tool commands from the text add_message_to_memory(message: str): Add the message to the memory
parse_and_execute_tools: Parse and execute the tools add_message_to_memory_and_truncate(message: str): Add the message to the memory and truncate
execute_tools: Execute the tool with the provided parameters print_dashboard(task: str): Print dashboard
construct_dynamic_prompt: Construct the dynamic prompt activate_autonomous_agent(): Print the autonomous agent activation message
get_tool_description: Get the tool description dynamic_temperature(): Dynamically change the temperature
find_tool_by_name: Find a tool by name _check_stopping_condition(response: str): Check if the stopping condition is met
parse_tool_command: Parse the text for tool usage format_prompt(template, **kwargs: Any): Format the template with the provided kwargs using f-string interpolation.
dynamic_temperature: Dynamically change the temperature get_llm_init_params(): Get LLM init params
_run: Generate a result using the provided keyword args. get_tool_description(): Get the tool description
from_llm_and_template: Create AgentStream from LLM and a string template. find_tool_by_name(name: str): Find a tool by name
from_llm_and_template_file: Create AgentStream from LLM and a template file.
save_state: Save the state of the agent
load_state: Load the state of the agent
run_async: Run the agent asynchronously
arun: Run the agent asynchronously
run_code: Run the code in the response
Example: Example:
>>> from swarms.models import OpenAIChat >>> from swarms.models import OpenAIChat
@ -159,7 +160,8 @@ class Agent:
def __init__( def __init__(
self, self,
llm: Any, id: str = agent_id,
llm: Any = None,
template: Optional[str] = None, template: Optional[str] = None,
max_loops=5, max_loops=5,
stopping_condition: Optional[Callable[[str], bool]] = None, stopping_condition: Optional[Callable[[str], bool]] = None,
@ -191,6 +193,7 @@ class Agent:
*args, *args,
**kwargs: Any, **kwargs: Any,
): ):
self.id = id
self.llm = llm self.llm = llm
self.template = template self.template = template
self.max_loops = max_loops self.max_loops = max_loops
@ -403,6 +406,7 @@ class Agent:
---------------------------------------- ----------------------------------------
Agent Configuration: Agent Configuration:
Agent ID: {self.id}
Name: {self.agent_name} Name: {self.agent_name}
Description: {self.agent_description} Description: {self.agent_description}
Standard Operating Procedure: {self.sop} Standard Operating Procedure: {self.sop}
@ -452,6 +456,45 @@ class Agent:
) )
print(error) print(error)
def loop_count_print(self, loop_count, max_loops):
"""loop_count_print summary
Args:
loop_count (_type_): _description_
max_loops (_type_): _description_
"""
print(
colored(f"\nLoop {loop_count} of {max_loops}", "cyan")
)
print("\n")
def _history(self, user_name: str, task: str) -> str:
"""Generate the history for the history prompt
Args:
user_name (str): _description_
task (str): _description_
Returns:
str: _description_
"""
history = [f"{user_name}: {task}"]
return history
def _dynamic_prompt_setup(self, dynamic_prompt: str, task: str) -> str:
"""_dynamic_prompt_setup summary
Args:
dynamic_prompt (str): _description_
task (str): _description_
Returns:
str: _description_
"""
dynamic_prompt = dynamic_prompt or self.construct_dynamic_prompt()
combined_prompt = f"{dynamic_prompt}\n{task}"
return combined_prompt
def run(self, task: Optional[str], img: Optional[str] = None, **kwargs): def run(self, task: Optional[str], img: Optional[str] = None, **kwargs):
""" """
Run the autonomous agent loop Run the autonomous agent loop
@ -468,14 +511,11 @@ class Agent:
""" """
try: try:
# dynamic_prompt = self.construct_dynamic_prompt()
# combined_prompt = f"{dynamic_prompt}\n{task}"
# Activate Autonomous agent message # Activate Autonomous agent message
self.activate_autonomous_agent() self.activate_autonomous_agent()
response = task # or combined_prompt response = task # or combined_prompt
history = [f"{self.user_name}: {task}"] history = self._history(self.user_name, task)
# If dashboard = True then print the dashboard # If dashboard = True then print the dashboard
if self.dashboard: if self.dashboard:
@ -487,9 +527,7 @@ class Agent:
while self.max_loops == "auto" or loop_count < self.max_loops: while self.max_loops == "auto" or loop_count < self.max_loops:
# Loop count # Loop count
loop_count += 1 loop_count += 1
print( self.loop_count_print(loop_count, self.max_loops)
colored(f"\nLoop {loop_count} of {self.max_loops}", "blue")
)
print("\n") print("\n")
# Check to see if stopping token is in the output to stop the loop # Check to see if stopping token is in the output to stop the loop
@ -1129,14 +1167,14 @@ class Agent:
""" """
def self_healing(self, **kwargs): # def self_healing(self, **kwargs):
""" # """
Self healing by debugging errors and refactoring its own code # Self healing by debugging errors and refactoring its own code
Args: # Args:
**kwargs (Any): Any additional keyword arguments # **kwargs (Any): Any additional keyword arguments
""" # """
pass # pass
# def refactor_code( # def refactor_code(
# self, # self,
@ -1161,29 +1199,29 @@ class Agent:
# # Sort the changes in reverse line order # # Sort the changes in reverse line order
# # explanations.sort(key=lambda x: x["line", reverse=True]) # # explanations.sort(key=lambda x: x["line", reverse=True])
# # def error_prompt_inject( # def error_prompt_inject(
# # self, # self,
# # file_path: str, # file_path: str,
# # args: List, # args: List,
# # error: str, # error: str,
# # ): # ):
# # with open(file_path, "r") as f: # with open(file_path, "r") as f:
# # file_lines = f.readlines() # file_lines = f.readlines()
# # file_with_lines = [] # file_with_lines = []
# # for i, line in enumerate(file_lines): # for i, line in enumerate(file_lines):
# # file_with_lines.append(str(i + 1) + "" + line) # file_with_lines.append(str(i + 1) + "" + line)
# # file_with_lines = "".join(file_with_lines) # file_with_lines = "".join(file_with_lines)
# # prompt = f""" # prompt = f"""
# # Here is the script that needs fixing:\n\n # Here is the script that needs fixing:\n\n
# # {file_with_lines}\n\n # {file_with_lines}\n\n
# # Here are the arguments it was provided:\n\n # Here are the arguments it was provided:\n\n
# # {args}\n\n # {args}\n\n
# # Here is the error message:\n\n # Here is the error message:\n\n
# # {error}\n # {error}\n
# # "Please provide your suggested changes, and remember to stick to the " # "Please provide your suggested changes, and remember to stick to the "
# # exact format as described above. # exact format as described above.
# # """ # """
# # # Print(prompt) # print(prompt)

Loading…
Cancel
Save