tools fix, parse docs, inject tools docs into prompts, and attempt to execute tools, display markdown

pull/158/head^2
Kye 1 year ago
parent fa52e09414
commit 4ae59df890

@ -42,10 +42,8 @@ api_key = ""
# Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC # Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC
llm = OpenAIChat( llm = OpenAIChat(
# model_name="gpt-4"
openai_api_key=api_key, openai_api_key=api_key,
temperature=0.5, temperature=0.5,
# max_tokens=100,
) )
## Initialize the workflow ## Initialize the workflow
@ -53,24 +51,10 @@ flow = Flow(
llm=llm, llm=llm,
max_loops=2, max_loops=2,
dashboard=True, dashboard=True,
# stopping_condition=None, # You can define a stopping condition as needed.
# loop_interval=1,
# retry_attempts=3,
# retry_interval=1,
# interactive=False, # Set to 'True' for interactive mode.
# dynamic_temperature=False, # Set to 'True' for dynamic temperature handling.
) )
# out = flow.load_state("flow_state.json")
# temp = flow.dynamic_temperature()
# filter = flow.add_response_filter("Trump")
out = flow.run("Generate a 10,000 word blog on health and wellness.") out = flow.run("Generate a 10,000 word blog on health and wellness.")
# out = flow.validate_response(out)
# out = flow.analyze_feedback(out)
# out = flow.print_history_and_memory()
# # out = flow.save_state("flow_state.json")
# print(out)
``` ```

@ -1,37 +1,15 @@
from swarms.models import OpenAIChat from swarms.models import OpenAIChat
from swarms.structs import Flow from swarms.structs import Flow
# Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC # Initialize the language model
llm = OpenAIChat( llm = OpenAIChat(
# model_name="gpt-4"
# openai_api_key=api_key,
temperature=0.5, temperature=0.5,
# max_tokens=100,
) )
## Initialize the workflow ## Initialize the workflow
flow = Flow( flow = Flow(llm=llm, max_loops=1, dashboard=True)
llm=llm,
max_loops=2, # Run the workflow on a task
dashboard=True, out = flow.run("Generate a 10,000 word blog on health and wellness.")
# tools=[search_api]
# stopping_condition=None, # You can define a stopping condition as needed.
# loop_interval=1,
# retry_attempts=3,
# retry_interval=1,
# interactive=False, # Set to 'True' for interactive mode.
# dynamic_temperature=False, # Set to 'True' for dynamic temperature handling.
)
# out = flow.load_state("flow_state.json")
# temp = flow.dynamic_temperature()
# filter = flow.add_response_filter("Trump")
out = flow.run(
"Generate a 10,000 word blog on mental clarity and the benefits of meditation."
)
# out = flow.validate_response(out)
# out = flow.analyze_feedback(out)
# out = flow.print_history_and_memory()
# # out = flow.save_state("flow_state.json")
# print(out)

@ -11,6 +11,7 @@ from termcolor import colored
from swarms.utils.code_interpreter import SubprocessCodeInterpreter from swarms.utils.code_interpreter import SubprocessCodeInterpreter
from swarms.utils.parse_code import extract_code_in_backticks_in_string from swarms.utils.parse_code import extract_code_in_backticks_in_string
from swarms.tools.tool import BaseTool
# System prompt # System prompt
FLOW_SYSTEM_PROMPT = f""" FLOW_SYSTEM_PROMPT = f"""
@ -25,7 +26,6 @@ to aid in these complex tasks. Your responses should be coherent, contextually r
""" """
# Prompts # Prompts
DYNAMIC_STOP_PROMPT = """ DYNAMIC_STOP_PROMPT = """
@ -36,7 +36,6 @@ This will enable you to leave the autonomous loop.
""" """
# Make it able to handle multi input tools # Make it able to handle multi input tools
DYNAMICAL_TOOL_USAGE = """ DYNAMICAL_TOOL_USAGE = """
You have access to the following tools: You have access to the following tools:
@ -53,6 +52,11 @@ commands: {
"tool1": "inputs", "tool1": "inputs",
"tool1": "inputs" "tool1": "inputs"
} }
"tool3: "tool_name",
"params": {
"tool1": "inputs",
"tool1": "inputs"
}
} }
} }
@ -60,6 +64,29 @@ commands: {
{tools} {tools}
""" """
SCENARIOS = """
commands: {
"tools": {
tool1: "tool_name",
"params": {
"tool1": "inputs",
"tool1": "inputs"
}
"tool2: "tool_name",
"params": {
"tool1": "inputs",
"tool1": "inputs"
}
"tool3: "tool_name",
"params": {
"tool1": "inputs",
"tool1": "inputs"
}
}
}
"""
def autonomous_agent_prompt( def autonomous_agent_prompt(
tools_prompt: str = DYNAMICAL_TOOL_USAGE, tools_prompt: str = DYNAMICAL_TOOL_USAGE,
@ -198,7 +225,7 @@ class Flow:
def __init__( def __init__(
self, self,
llm: Any, llm: Any,
template: str, template: Optional[str] = None,
max_loops=5, max_loops=5,
stopping_condition: Optional[Callable[[str], bool]] = None, stopping_condition: Optional[Callable[[str], bool]] = None,
loop_interval: int = 1, loop_interval: int = 1,
@ -212,7 +239,7 @@ class Flow:
agent_name: str = " Autonomous Agent XYZ1B", agent_name: str = " Autonomous Agent XYZ1B",
agent_description: str = None, agent_description: str = None,
system_prompt: str = FLOW_SYSTEM_PROMPT, system_prompt: str = FLOW_SYSTEM_PROMPT,
# tools: List[Any] = None, tools: List[BaseTool] = None,
dynamic_temperature: bool = False, dynamic_temperature: bool = False,
sop: str = None, sop: str = None,
saved_state_path: Optional[str] = "flow_state.json", saved_state_path: Optional[str] = "flow_state.json",
@ -246,7 +273,7 @@ class Flow:
# The max_loops will be set dynamically if the dynamic_loop # The max_loops will be set dynamically if the dynamic_loop
if self.dynamic_loops: if self.dynamic_loops:
self.max_loops = "auto" self.max_loops = "auto"
# self.tools = tools or [] self.tools = tools or []
self.system_prompt = system_prompt self.system_prompt = system_prompt
self.agent_name = agent_name self.agent_name = agent_name
self.agent_description = agent_description self.agent_description = agent_description
@ -310,68 +337,81 @@ class Flow:
# # Parse the text for tool usage # # Parse the text for tool usage
# pass # pass
# def get_tool_description(self): def get_tool_description(self):
# """Get the tool description""" """Get the tool description"""
# tool_descriptions = [] if self.tools:
# for tool in self.tools: try:
# description = f"{tool.name}: {tool.description}" tool_descriptions = []
# tool_descriptions.append(description) for tool in self.tools:
# return "\n".join(tool_descriptions) description = f"{tool.name}: {tool.description}"
tool_descriptions.append(description)
# def find_tool_by_name(self, name: str): return "\n".join(tool_descriptions)
# """Find a tool by name""" except Exception as error:
# for tool in self.tools: print(
# if tool.name == name: f"Error getting tool description: {error} try adding a description to the tool or removing the tool"
# return tool )
# return None else:
return "No tools available"
# def construct_dynamic_prompt(self):
# """Construct the dynamic prompt"""
# tools_description = self.get_tool_description()
# return DYNAMICAL_TOOL_USAGE.format(tools=tools_description)
# def extract_tool_commands(self, text: str):
# """
# Extract the tool commands from the text
# Example:
# ```json
# {
# "tool": "tool_name",
# "params": {
# "tool1": "inputs",
# "param2": "value2"
# }
# }
# ```
# """ def find_tool_by_name(self, name: str):
# # Regex to find JSON like strings """Find a tool by name"""
# pattern = r"```json(.+?)```" for tool in self.tools:
# matches = re.findall(pattern, text, re.DOTALL) if tool.name == name:
# json_commands = [] return tool
# for match in matches: return None
# try:
# json_commands = json.loads(match) def construct_dynamic_prompt(self):
# json_commands.append(json_commands) """Construct the dynamic prompt"""
# except Exception as error: tools_description = self.get_tool_description()
# print(f"Error parsing JSON command: {error}")
tool_prompt = self.tool_prompt_prep(tools_description, SCENARIOS)
# def parse_and_execute_tools(self, response):
# """Parse and execute the tools""" return tool_prompt
# json_commands = self.extract_tool_commands(response)
# for command in json_commands: # return DYNAMICAL_TOOL_USAGE.format(tools=tools_description)
# tool_name = command.get("tool")
# params = command.get("parmas", {}) def extract_tool_commands(self, text: str):
# self.execute_tool(tool_name, params) """
Extract the tool commands from the text
# def execute_tools(self, tool_name, params):
# """Execute the tool with the provided params""" Example:
# tool = self.tool_find_by_name(tool_name) ```json
# if tool: {
# # Execute the tool with the provided parameters "tool": "tool_name",
# tool_result = tool.run(**params) "params": {
# print(tool_result) "tool1": "inputs",
"param2": "value2"
}
}
```
"""
# Regex to find JSON like strings
pattern = r"```json(.+?)```"
matches = re.findall(pattern, text, re.DOTALL)
json_commands = []
for match in matches:
try:
json_commands = json.loads(match)
json_commands.append(json_commands)
except Exception as error:
print(f"Error parsing JSON command: {error}")
def parse_and_execute_tools(self, response: str):
"""Parse and execute the tools"""
json_commands = self.extract_tool_commands(response)
for command in json_commands:
tool_name = command.get("tool")
params = command.get("parmas", {})
self.execute_tool(tool_name, params)
def execute_tools(self, tool_name, params):
"""Execute the tool with the provided params"""
tool = self.tool_find_by_name(tool_name)
if tool:
# Execute the tool with the provided parameters
tool_result = tool.run(**params)
print(tool_result)
def truncate_history(self): def truncate_history(self):
""" """
@ -483,12 +523,12 @@ class Flow:
self.print_dashboard(task) self.print_dashboard(task)
loop_count = 0 loop_count = 0
# for i in range(self.max_loops):
while self.max_loops == "auto" or loop_count < self.max_loops: while self.max_loops == "auto" or loop_count < self.max_loops:
loop_count += 1 loop_count += 1
print(colored(f"\nLoop {loop_count} of {self.max_loops}", "blue")) print(colored(f"\nLoop {loop_count} of {self.max_loops}", "blue"))
print("\n") print("\n")
# Check to see if stopping token is in the output to stop the loop
if self.stopping_token: if self.stopping_token:
if self._check_stopping_condition(response) or parse_done_token( if self._check_stopping_condition(response) or parse_done_token(
response response
@ -510,111 +550,22 @@ class Flow:
**kwargs, **kwargs,
) )
# If code interpreter is enabled then run the code
if self.code_interpreter: if self.code_interpreter:
self.run_code(response) self.run_code(response)
# If there are any tools then parse and execute them
# if self.tools:
# self.parse_and_execute_tools(response)
if self.interactive:
print(f"AI: {response}")
history.append(f"AI: {response}")
response = input("You: ")
history.append(f"Human: {response}")
else:
print(f"AI: {response}")
history.append(f"AI: {response}")
# print(response)
break
except Exception as e:
logging.error(f"Error generating response: {e}")
attempt += 1
time.sleep(self.retry_interval)
history.append(response)
time.sleep(self.loop_interval)
self.memory.append(history)
if self.autosave:
save_path = self.saved_state_path or "flow_state.json"
print(colored(f"Autosaving flow state to {save_path}", "green"))
self.save_state(save_path)
if self.return_history:
return response, history
return response
except Exception as error:
print(f"Error running flow: {error}")
raise
def __call__(self, task: str, **kwargs):
"""
Run the autonomous agent loop
Args:
task (str): The initial task to run
Flow:
1. Generate a response
2. Check stopping condition
3. If stopping condition is met, stop
4. If stopping condition is not met, generate a response
5. Repeat until stopping condition is met or max_loops is reached
"""
try:
# dynamic_prompt = self.construct_dynamic_prompt()
# combined_prompt = f"{dynamic_prompt}\n{task}"
# Activate Autonomous agent message
self.activate_autonomous_agent()
response = task # or combined_prompt
history = [f"{self.user_name}: {task}"]
# If dashboard = True then print the dashboard
if self.dashboard:
self.print_dashboard(task)
loop_count = 0
# for i in range(self.max_loops):
while self.max_loops == "auto" or loop_count < self.max_loops:
loop_count += 1
print(colored(f"\nLoop {loop_count} of {self.max_loops}", "blue"))
print("\n")
if self.stopping_token:
if self._check_stopping_condition(response) or parse_done_token(
response
):
break
# Adjust temperature, comment if no work
if self.dynamic_temperature:
self.dynamic_temperature()
# Preparing the prompt
task = self.agent_history_prompt(FLOW_SYSTEM_PROMPT, response)
attempt = 0
while attempt < self.retry_attempts:
try:
response = self.llm(
task,
**kwargs,
)
if self.code_interpreter:
self.run_code(response)
# If there are any tools then parse and execute them # If there are any tools then parse and execute them
# if self.tools: if self.tools:
# self.parse_and_execute_tools(response) self.parse_and_execute_tools(response)
# If interactive mode is enabled then print the response and get user input
if self.interactive: if self.interactive:
print(f"AI: {response}") print(f"AI: {response}")
history.append(f"AI: {response}") history.append(f"AI: {response}")
response = input("You: ") response = input("You: ")
history.append(f"Human: {response}") history.append(f"Human: {response}")
# If interactive mode is not enabled then print the response
else: else:
print(f"AI: {response}") print(f"AI: {response}")
history.append(f"AI: {response}") history.append(f"AI: {response}")
@ -624,15 +575,20 @@ class Flow:
logging.error(f"Error generating response: {e}") logging.error(f"Error generating response: {e}")
attempt += 1 attempt += 1
time.sleep(self.retry_interval) time.sleep(self.retry_interval)
# Add the response to the history
history.append(response) history.append(response)
time.sleep(self.loop_interval) time.sleep(self.loop_interval)
# Add the history to the memory
self.memory.append(history) self.memory.append(history)
# If autosave is enabled then save the state
if self.autosave: if self.autosave:
save_path = self.saved_state_path or "flow_state.json" save_path = self.saved_state_path or "flow_state.json"
print(colored(f"Autosaving flow state to {save_path}", "green")) print(colored(f"Autosaving flow state to {save_path}", "green"))
self.save_state(save_path) self.save_state(save_path)
# If return history is enabled then return the response and history
if self.return_history: if self.return_history:
return response, history return response, history
@ -1113,7 +1069,7 @@ class Flow:
run_code = self.code_executor.run(parsed_code) run_code = self.code_executor.run(parsed_code)
return run_code return run_code
def tool_prompt_prep(self, api_docs: str = None, required_api: str = None): def tools_prompt_prep(self, docs: str = None, scenarios: str = None):
""" """
Prepare the tool prompt Prepare the tool prompt
""" """
@ -1160,19 +1116,14 @@ class Flow:
response. response.
Deliver your response in this format: Deliver your response in this format:
- Scenario 1: <Scenario1> {scenarios}
- Scenario 2: <Scenario2>
- Scenario 3: <Scenario3>
# APIs # APIs
{api_docs} {docs}
# Response # Response
Required API: {required_api}
Scenarios with >=5 API calls:
- Scenario 1: <Scenario1>
""" """
def self_healing(self, **kwargs): def self_healing(self, **kwargs):

@ -6,7 +6,9 @@ from typing import Callable, List, Dict, Any, Sequence
class Task: class Task:
def __init__(self, id: str, task: str, flows: Sequence[Flow], dependencies: List[str] = []): def __init__(
self, id: str, task: str, flows: Sequence[Flow], dependencies: List[str] = []
):
self.id = id self.id = id
self.task = task self.task = task
self.flows = flows self.flows = flows
@ -62,8 +64,15 @@ flow4 = Flow(llm, max_loops=1)
# Create tasks with their respective Flows and task strings # Create tasks with their respective Flows and task strings
task1 = Task("task1", "Generate a summary on Quantum field theory", [flow1]) task1 = Task("task1", "Generate a summary on Quantum field theory", [flow1])
task2 = Task("task2", "Elaborate on the summary of topic X", [flow2, flow3], dependencies=["task1"]) task2 = Task(
task3 = Task("task3", "Generate conclusions for topic X", [flow4], dependencies=["task1"]) "task2",
"Elaborate on the summary of topic X",
[flow2, flow3],
dependencies=["task1"],
)
task3 = Task(
"task3", "Generate conclusions for topic X", [flow4], dependencies=["task1"]
)
# Create a workflow and add tasks # Create a workflow and add tasks
workflow = Workflow() workflow = Workflow()
@ -76,4 +85,4 @@ workflow.run()
# Get results # Get results
results = workflow.get_results() results = workflow.get_results()
print(results) print(results)

@ -260,10 +260,6 @@ class SequentialWorkflow:
-------------------------------- --------------------------------
Metadata: Metadata:
kwargs: {kwargs} kwargs: {kwargs}
""", """,
"cyan", "cyan",
attrs=["bold", "underline"], attrs=["bold", "underline"],

@ -1,4 +1,4 @@
from swarms.utils.display_markdown import display_markdown_message from swarms.utils.markdown_message import display_markdown_message
from swarms.utils.futures import execute_futures_dict from swarms.utils.futures import execute_futures_dict
from swarms.utils.code_interpreter import SubprocessCodeInterpreter from swarms.utils.code_interpreter import SubprocessCodeInterpreter
from swarms.utils.parse_code import extract_code_in_backticks_in_string from swarms.utils.parse_code import extract_code_in_backticks_in_string

Loading…
Cancel
Save