|
|
|
@ -1,4 +1,5 @@
|
|
|
|
|
from typing import Any, List, Optional, Sequence, Tuple
|
|
|
|
|
import logging
|
|
|
|
|
|
|
|
|
|
from langchain.agents.agent import Agent
|
|
|
|
|
from langchain.callbacks.base import BaseCallbackManager
|
|
|
|
@ -22,6 +23,7 @@ from langchain.tools.base import BaseTool
|
|
|
|
|
|
|
|
|
|
from swarms.prompts.prompts import EVAL_TOOL_RESPONSE
|
|
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
|
|
|
|
|
|
|
|
class ConversationalChatAgent(Agent):
|
|
|
|
|
"""An agent designed to hold a conversation in addition to using tools."""
|
|
|
|
@ -51,6 +53,17 @@ class ConversationalChatAgent(Agent):
|
|
|
|
|
output_parser: BaseOutputParser,
|
|
|
|
|
input_variables: Optional[List[str]] = None,
|
|
|
|
|
) -> BasePromptTemplate:
|
|
|
|
|
if not isinstance(tools, Sequence):
|
|
|
|
|
raise TypeError("Tools must be a sequence")
|
|
|
|
|
if not isinstance(system_message, str):
|
|
|
|
|
raise TypeError("System message must be a string")
|
|
|
|
|
if not isinstance(human_message, str):
|
|
|
|
|
raise TypeError("Human message must be a string")
|
|
|
|
|
if not isinstance(output_parser, BaseOutputParser):
|
|
|
|
|
raise TypeError("Output parser must be an instance of BaseOutputParser")
|
|
|
|
|
if input_variables and not isinstance(input_variables, list):
|
|
|
|
|
raise TypeError("Input variables must be a list")
|
|
|
|
|
|
|
|
|
|
tool_strings = "\n".join(
|
|
|
|
|
[f"> {tool.name}: {tool.description}" for tool in tools]
|
|
|
|
|
)
|
|
|
|
@ -75,7 +88,8 @@ class ConversationalChatAgent(Agent):
|
|
|
|
|
try:
|
|
|
|
|
response = self.output_parser.parse(llm_output)
|
|
|
|
|
return response["action"], response["action_input"]
|
|
|
|
|
except Exception:
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logging.error(f"Error while extracting tool and input: {str(e)}")
|
|
|
|
|
raise ValueError(f"Could not parse LLM output: {llm_output}")
|
|
|
|
|
|
|
|
|
|
def _construct_scratchpad(
|
|
|
|
@ -118,9 +132,13 @@ class ConversationalChatAgent(Agent):
|
|
|
|
|
callback_manager=callback_manager,
|
|
|
|
|
)
|
|
|
|
|
tool_names = [tool.name for tool in tools]
|
|
|
|
|
return cls(
|
|
|
|
|
llm_chain=llm_chain,
|
|
|
|
|
allowed_tools=tool_names,
|
|
|
|
|
output_parser=output_parser,
|
|
|
|
|
**kwargs,
|
|
|
|
|
)
|
|
|
|
|
try:
|
|
|
|
|
return cls(
|
|
|
|
|
llm_chain=llm_chain,
|
|
|
|
|
allowed_tools=tool_names,
|
|
|
|
|
output_parser=output_parser,
|
|
|
|
|
**kwargs,
|
|
|
|
|
)
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logging.error(f"Error while creating agent from LLM and tools: {str(e)}")
|
|
|
|
|
raise e
|