diff --git a/README.md b/README.md index 4f0cd695..49ca3770 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,7 @@ Run example in Collab: >> schema_generator = SchemaGenerator() + >>> schema_generator.add_constraint("No user assistance") + >>> schema_generator.add_resource("Internet access for searches and information gathering.") + >>> schema_generator.add_performance_evaluation("Continuously review and analyze your actions to ensure you are performing to the best of your abilities.") + >>> prompt_string = schema_generator.generate_prompt_string() + >>> print(prompt_string) + """ + + def __init__(self) -> None: + """Initialize the SchemaGenerator object. + + Starts with empty lists of constraints, commands, resources, + and performance evaluations. + """ + self.constraints: List[str] = [] + self.commands: List[BaseTool] = [] + self.resources: List[str] = [] + self.performance_evaluation: List[str] = [] + self.response_format = { + "thoughts": { + "text": "thought", + "reasoning": "reasoning", + "plan": ( + "- short bulleted\n- list that conveys\n-" + " long-term plan" + ), + "criticism": "constructive self-criticism", + "speak": "thoughts summary to say to user", + }, + "command": { + "name": "command name", + "args": {"arg name": "value"}, + }, + } + + def add_constraint(self, constraint: str) -> None: + """ + Add a constraint to the constraints list. + + Args: + constraint (str): The constraint to be added. + """ + self.constraints.append(constraint) + + def add_tool(self, tool: BaseTool) -> None: + self.commands.append(tool) + + def _generate_command_string(self, tool: BaseTool) -> str: + output = f"{tool.name}: {tool.description}" + output += f", args json schema: {json.dumps(tool.args)}" + return output + + def add_resource(self, resource: str) -> None: + """ + Add a resource to the resources list. + + Args: + resource (str): The resource to be added. + """ + self.resources.append(resource) + + def add_performance_evaluation(self, evaluation: str) -> None: + """ + Add a performance evaluation item to the performance_evaluation list. + + Args: + evaluation (str): The evaluation item to be added. + """ + self.performance_evaluation.append(evaluation) + + def _generate_numbered_list( + self, items: list, item_type: str = "list" + ) -> str: + """ + Generate a numbered list from given items based on the item_type. + + Args: + items (list): A list of items to be numbered. + item_type (str, optional): The type of items in the list. + Defaults to 'list'. + + Returns: + str: The formatted numbered list. + """ + if item_type == "command": + command_strings = [ + f"{i + 1}. {self._generate_command_string(item)}" + for i, item in enumerate(items) + ] + finish_description = ( + "use this to signal that you have finished all your" + " objectives" + ) + finish_args = ( + '"response": "final response to let ' + 'people know you have finished your objectives"' + ) + finish_string = ( + f"{len(items) + 1}. {FINISH_NAME}: " + f"{finish_description}, args: {finish_args}" + ) + return "\n".join(command_strings + [finish_string]) + else: + return "\n".join( + f"{i+1}. {item}" for i, item in enumerate(items) + ) + + def generate_prompt_string(self) -> str: + """Generate a prompt string. + + Returns: + str: The generated prompt string. + """ + formatted_response_format = json.dumps( + self.response_format, indent=4 + ) + prompt_string = ( + f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\nCommands:\n{self._generate_numbered_list(self.commands, item_type='command')}\n\nResources:\n{self._generate_numbered_list(self.resources)}\n\nPerformance" + f" Evaluation:\n{self._generate_numbered_list(self.performance_evaluation)}\n\nYou" + " should only respond in JSON format as described below" + " \nResponse Format:" + f" \n{formatted_response_format} \nEnsure the response" + " can be parsed by Python json.loads" + ) + + return prompt_string + + +def get_prompt(tools: List[BaseTool]) -> str: + """Generates a prompt string. + + It includes various constraints, commands, resources, and performance evaluations. + + Returns: + str: The generated prompt string. + """ + + # Initialize the SchemaGenerator object + schema_generator = SchemaGenerator() + + # Add constraints to the SchemaGenerator object + schema_generator.add_constraint( + "~4000 word limit for short term memory. " + "Your short term memory is short, " + "so immediately save important information to files." + ) + schema_generator.add_constraint( + "If you are unsure how you previously did something " + "or want to recall past events, " + "thinking about similar events will help you remember." + ) + schema_generator.add_constraint("No user assistance") + schema_generator.add_constraint( + "Exclusively use the commands listed in double quotes e.g." + ' "command name"' + ) + + # Add commands to the SchemaGenerator object + for tool in tools: + schema_generator.add_tool(tool) + + # Add resources to the SchemaGenerator object + schema_generator.add_resource( + "Internet access for searches and information gathering." + ) + schema_generator.add_resource("Long Term memory management.") + schema_generator.add_resource( + "GPT-3.5 powered Agents for delegation of simple tasks." + ) + schema_generator.add_resource("File output.") + + # Add performance evaluations to the SchemaGenerator object + schema_generator.add_performance_evaluation( + "Continuously review and analyze your actions " + "to ensure you are performing to the best of your abilities." + ) + schema_generator.add_performance_evaluation( + "Constructively self-criticize your big-picture behavior" + " constantly." + ) + schema_generator.add_performance_evaluation( + "Reflect on past decisions and strategies to refine your" + " approach." + ) + schema_generator.add_performance_evaluation( + "Every command has a cost, so be smart and efficient. " + "Aim to complete tasks in the least number of steps." + ) + + # Generate the prompt string + prompt_string = schema_generator.generate_prompt_string() + + return prompt_string diff --git a/swarms/prompts/worker_prompt.py b/swarms/prompts/worker_prompt.py index 03b8ce06..5ba0b5e5 100644 --- a/swarms/prompts/worker_prompt.py +++ b/swarms/prompts/worker_prompt.py @@ -57,4 +57,4 @@ def worker_agent_system(name: str, memory: str = None): [{memory}] Human: Determine which next command to use, and respond using the format specified above: - """.format(name=name, memory=memory) \ No newline at end of file + """.format(name=name, memory=memory) diff --git a/swarms/tools/__init__.py b/swarms/tools/__init__.py index 877fb0de..c36c9608 100644 --- a/swarms/tools/__init__.py +++ b/swarms/tools/__init__.py @@ -7,7 +7,13 @@ from swarms.tools.tool_utils import ( execute_tools, ) from swarms.tools.tool import BaseTool, Tool, StructuredTool, tool - +from swarms.tools.exec_tool import ( + AgentAction, + BaseAgentOutputParser, + preprocess_json_input, + AgentOutputParser, + execute_tool_by_name, +) __all__ = [ "scrape_tool_func_docs", @@ -20,4 +26,9 @@ __all__ = [ "Tool", "StructuredTool", "tool", + "AgentAction", + "BaseAgentOutputParser", + "preprocess_json_input", + "AgentOutputParser", + "execute_tool_by_name", ] diff --git a/swarms/tools/exec_tool.py b/swarms/tools/exec_tool.py new file mode 100644 index 00000000..3ca02dea --- /dev/null +++ b/swarms/tools/exec_tool.py @@ -0,0 +1,125 @@ +import json +import re +from abc import abstractmethod +from typing import Dict, List, NamedTuple + +from langchain.schema import BaseOutputParser +from pydantic import ValidationError + +from swarms.tools.tool import BaseTool + + +class AgentAction(NamedTuple): + """Action returned by AgentOutputParser.""" + + name: str + args: Dict + + +class BaseAgentOutputParser(BaseOutputParser): + """Base Output parser for Agent.""" + + @abstractmethod + def parse(self, text: str) -> AgentAction: + """Return AgentAction""" + + +def preprocess_json_input(input_str: str) -> str: + """Preprocesses a string to be parsed as json. + + Replace single backslashes with double backslashes, + while leaving already escaped ones intact. + + Args: + input_str: String to be preprocessed + + Returns: + Preprocessed string + """ + corrected_str = re.sub( + r'(? AgentAction: + try: + parsed = json.loads(text, strict=False) + except json.JSONDecodeError: + preprocessed_text = preprocess_json_input(text) + try: + parsed = json.loads(preprocessed_text, strict=False) + except Exception: + return AgentAction( + name="ERROR", + args={ + "error": ( + f"Could not parse invalid json: {text}" + ) + }, + ) + try: + return AgentAction( + name=parsed["command"]["name"], + args=parsed["command"]["args"], + ) + except (KeyError, TypeError): + # If the command is null or incomplete, return an erroneous tool + return AgentAction( + name="ERROR", + args={"error": f"Incomplete command args: {parsed}"}, + ) + + +def execute_tool_by_name( + text: str, + tools: List[BaseTool], + stop_token: str = "finish", +): + """ + Executes a tool based on the given text command. + + Args: + text (str): The text command to be executed. + tools (List[BaseTool]): A list of available tools. + stop_token (str, optional): The stop token to terminate the execution. Defaults to "finish". + + Returns: + str: The result of the command execution. + """ + output_parser = AgentOutputParser() + # Get command name and arguments + action = output_parser.parse(text) + tools = {t.name: t for t in tools} + if action.name == stop_token: + return action.args["response"] + if action.name in tools: + tool = tools[action.name] + try: + observation = tool.run(action.args) + except ValidationError as e: + observation = ( + f"Validation Error in args: {str(e)}, args:" + f" {action.args}" + ) + except Exception as e: + observation = ( + f"Error: {str(e)}, {type(e).__name__}, args:" + f" {action.args}" + ) + result = f"Command {tool.name} returned: {observation}" + elif action.name == "ERROR": + result = f"Error: {action.args}. " + else: + result = ( + f"Unknown command '{action.name}'. " + "Please refer to the 'COMMANDS' list for available " + "commands and only respond in the specified JSON format." + ) + + return result