tools fix, parse docs, inject tools docs into prompts, and attempt to execute tools, display markdown

pull/158/head^2
Kye 1 year ago
parent fa52e09414
commit 4ae59df890

@ -42,10 +42,8 @@ api_key = ""
# Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC
llm = OpenAIChat(
# model_name="gpt-4"
openai_api_key=api_key,
temperature=0.5,
# max_tokens=100,
)
## Initialize the workflow
@ -53,24 +51,10 @@ flow = Flow(
llm=llm,
max_loops=2,
dashboard=True,
# stopping_condition=None, # You can define a stopping condition as needed.
# loop_interval=1,
# retry_attempts=3,
# retry_interval=1,
# interactive=False, # Set to 'True' for interactive mode.
# dynamic_temperature=False, # Set to 'True' for dynamic temperature handling.
)
# out = flow.load_state("flow_state.json")
# temp = flow.dynamic_temperature()
# filter = flow.add_response_filter("Trump")
out = flow.run("Generate a 10,000 word blog on health and wellness.")
# out = flow.validate_response(out)
# out = flow.analyze_feedback(out)
# out = flow.print_history_and_memory()
# # out = flow.save_state("flow_state.json")
# print(out)
```

@ -1,37 +1,15 @@
from swarms.models import OpenAIChat
from swarms.structs import Flow
# Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC
# Initialize the language model
llm = OpenAIChat(
# model_name="gpt-4"
# openai_api_key=api_key,
temperature=0.5,
# max_tokens=100,
)
## Initialize the workflow
flow = Flow(
llm=llm,
max_loops=2,
dashboard=True,
# tools=[search_api]
# stopping_condition=None, # You can define a stopping condition as needed.
# loop_interval=1,
# retry_attempts=3,
# retry_interval=1,
# interactive=False, # Set to 'True' for interactive mode.
# dynamic_temperature=False, # Set to 'True' for dynamic temperature handling.
)
flow = Flow(llm=llm, max_loops=1, dashboard=True)
# Run the workflow on a task
out = flow.run("Generate a 10,000 word blog on health and wellness.")
# out = flow.load_state("flow_state.json")
# temp = flow.dynamic_temperature()
# filter = flow.add_response_filter("Trump")
out = flow.run(
"Generate a 10,000 word blog on mental clarity and the benefits of meditation."
)
# out = flow.validate_response(out)
# out = flow.analyze_feedback(out)
# out = flow.print_history_and_memory()
# # out = flow.save_state("flow_state.json")
# print(out)

@ -11,6 +11,7 @@ from termcolor import colored
from swarms.utils.code_interpreter import SubprocessCodeInterpreter
from swarms.utils.parse_code import extract_code_in_backticks_in_string
from swarms.tools.tool import BaseTool
# System prompt
FLOW_SYSTEM_PROMPT = f"""
@ -25,7 +26,6 @@ to aid in these complex tasks. Your responses should be coherent, contextually r
"""
# Prompts
DYNAMIC_STOP_PROMPT = """
@ -36,7 +36,6 @@ This will enable you to leave the autonomous loop.
"""
# Make it able to handle multi input tools
DYNAMICAL_TOOL_USAGE = """
You have access to the following tools:
@ -53,6 +52,11 @@ commands: {
"tool1": "inputs",
"tool1": "inputs"
}
"tool3: "tool_name",
"params": {
"tool1": "inputs",
"tool1": "inputs"
}
}
}
@ -60,6 +64,29 @@ commands: {
{tools}
"""
SCENARIOS = """
commands: {
"tools": {
tool1: "tool_name",
"params": {
"tool1": "inputs",
"tool1": "inputs"
}
"tool2: "tool_name",
"params": {
"tool1": "inputs",
"tool1": "inputs"
}
"tool3: "tool_name",
"params": {
"tool1": "inputs",
"tool1": "inputs"
}
}
}
"""
def autonomous_agent_prompt(
tools_prompt: str = DYNAMICAL_TOOL_USAGE,
@ -198,7 +225,7 @@ class Flow:
def __init__(
self,
llm: Any,
template: str,
template: Optional[str] = None,
max_loops=5,
stopping_condition: Optional[Callable[[str], bool]] = None,
loop_interval: int = 1,
@ -212,7 +239,7 @@ class Flow:
agent_name: str = " Autonomous Agent XYZ1B",
agent_description: str = None,
system_prompt: str = FLOW_SYSTEM_PROMPT,
# tools: List[Any] = None,
tools: List[BaseTool] = None,
dynamic_temperature: bool = False,
sop: str = None,
saved_state_path: Optional[str] = "flow_state.json",
@ -246,7 +273,7 @@ class Flow:
# The max_loops will be set dynamically if the dynamic_loop
if self.dynamic_loops:
self.max_loops = "auto"
# self.tools = tools or []
self.tools = tools or []
self.system_prompt = system_prompt
self.agent_name = agent_name
self.agent_description = agent_description
@ -310,68 +337,81 @@ class Flow:
# # Parse the text for tool usage
# pass
# def get_tool_description(self):
# """Get the tool description"""
# tool_descriptions = []
# for tool in self.tools:
# description = f"{tool.name}: {tool.description}"
# tool_descriptions.append(description)
# return "\n".join(tool_descriptions)
# def find_tool_by_name(self, name: str):
# """Find a tool by name"""
# for tool in self.tools:
# if tool.name == name:
# return tool
# return None
# def construct_dynamic_prompt(self):
# """Construct the dynamic prompt"""
# tools_description = self.get_tool_description()
# return DYNAMICAL_TOOL_USAGE.format(tools=tools_description)
# def extract_tool_commands(self, text: str):
# """
# Extract the tool commands from the text
# Example:
# ```json
# {
# "tool": "tool_name",
# "params": {
# "tool1": "inputs",
# "param2": "value2"
# }
# }
# ```
def get_tool_description(self):
"""Get the tool description"""
if self.tools:
try:
tool_descriptions = []
for tool in self.tools:
description = f"{tool.name}: {tool.description}"
tool_descriptions.append(description)
return "\n".join(tool_descriptions)
except Exception as error:
print(
f"Error getting tool description: {error} try adding a description to the tool or removing the tool"
)
else:
return "No tools available"
# """
# # Regex to find JSON like strings
# pattern = r"```json(.+?)```"
# matches = re.findall(pattern, text, re.DOTALL)
# json_commands = []
# for match in matches:
# try:
# json_commands = json.loads(match)
# json_commands.append(json_commands)
# except Exception as error:
# print(f"Error parsing JSON command: {error}")
# def parse_and_execute_tools(self, response):
# """Parse and execute the tools"""
# json_commands = self.extract_tool_commands(response)
# for command in json_commands:
# tool_name = command.get("tool")
# params = command.get("parmas", {})
# self.execute_tool(tool_name, params)
# def execute_tools(self, tool_name, params):
# """Execute the tool with the provided params"""
# tool = self.tool_find_by_name(tool_name)
# if tool:
# # Execute the tool with the provided parameters
# tool_result = tool.run(**params)
# print(tool_result)
def find_tool_by_name(self, name: str):
"""Find a tool by name"""
for tool in self.tools:
if tool.name == name:
return tool
return None
def construct_dynamic_prompt(self):
"""Construct the dynamic prompt"""
tools_description = self.get_tool_description()
tool_prompt = self.tool_prompt_prep(tools_description, SCENARIOS)
return tool_prompt
# return DYNAMICAL_TOOL_USAGE.format(tools=tools_description)
def extract_tool_commands(self, text: str):
"""
Extract the tool commands from the text
Example:
```json
{
"tool": "tool_name",
"params": {
"tool1": "inputs",
"param2": "value2"
}
}
```
"""
# Regex to find JSON like strings
pattern = r"```json(.+?)```"
matches = re.findall(pattern, text, re.DOTALL)
json_commands = []
for match in matches:
try:
json_commands = json.loads(match)
json_commands.append(json_commands)
except Exception as error:
print(f"Error parsing JSON command: {error}")
def parse_and_execute_tools(self, response: str):
"""Parse and execute the tools"""
json_commands = self.extract_tool_commands(response)
for command in json_commands:
tool_name = command.get("tool")
params = command.get("parmas", {})
self.execute_tool(tool_name, params)
def execute_tools(self, tool_name, params):
"""Execute the tool with the provided params"""
tool = self.tool_find_by_name(tool_name)
if tool:
# Execute the tool with the provided parameters
tool_result = tool.run(**params)
print(tool_result)
def truncate_history(self):
"""
@ -483,12 +523,12 @@ class Flow:
self.print_dashboard(task)
loop_count = 0
# for i in range(self.max_loops):
while self.max_loops == "auto" or loop_count < self.max_loops:
loop_count += 1
print(colored(f"\nLoop {loop_count} of {self.max_loops}", "blue"))
print("\n")
# Check to see if stopping token is in the output to stop the loop
if self.stopping_token:
if self._check_stopping_condition(response) or parse_done_token(
response
@ -510,111 +550,22 @@ class Flow:
**kwargs,
)
# If code interpreter is enabled then run the code
if self.code_interpreter:
self.run_code(response)
# If there are any tools then parse and execute them
# if self.tools:
# self.parse_and_execute_tools(response)
if self.interactive:
print(f"AI: {response}")
history.append(f"AI: {response}")
response = input("You: ")
history.append(f"Human: {response}")
else:
print(f"AI: {response}")
history.append(f"AI: {response}")
# print(response)
break
except Exception as e:
logging.error(f"Error generating response: {e}")
attempt += 1
time.sleep(self.retry_interval)
history.append(response)
time.sleep(self.loop_interval)
self.memory.append(history)
if self.autosave:
save_path = self.saved_state_path or "flow_state.json"
print(colored(f"Autosaving flow state to {save_path}", "green"))
self.save_state(save_path)
if self.return_history:
return response, history
return response
except Exception as error:
print(f"Error running flow: {error}")
raise
def __call__(self, task: str, **kwargs):
"""
Run the autonomous agent loop
Args:
task (str): The initial task to run
Flow:
1. Generate a response
2. Check stopping condition
3. If stopping condition is met, stop
4. If stopping condition is not met, generate a response
5. Repeat until stopping condition is met or max_loops is reached
"""
try:
# dynamic_prompt = self.construct_dynamic_prompt()
# combined_prompt = f"{dynamic_prompt}\n{task}"
# Activate Autonomous agent message
self.activate_autonomous_agent()
response = task # or combined_prompt
history = [f"{self.user_name}: {task}"]
# If dashboard = True then print the dashboard
if self.dashboard:
self.print_dashboard(task)
loop_count = 0
# for i in range(self.max_loops):
while self.max_loops == "auto" or loop_count < self.max_loops:
loop_count += 1
print(colored(f"\nLoop {loop_count} of {self.max_loops}", "blue"))
print("\n")
if self.stopping_token:
if self._check_stopping_condition(response) or parse_done_token(
response
):
break
# Adjust temperature, comment if no work
if self.dynamic_temperature:
self.dynamic_temperature()
# Preparing the prompt
task = self.agent_history_prompt(FLOW_SYSTEM_PROMPT, response)
attempt = 0
while attempt < self.retry_attempts:
try:
response = self.llm(
task,
**kwargs,
)
if self.code_interpreter:
self.run_code(response)
# If there are any tools then parse and execute them
# if self.tools:
# self.parse_and_execute_tools(response)
if self.tools:
self.parse_and_execute_tools(response)
# If interactive mode is enabled then print the response and get user input
if self.interactive:
print(f"AI: {response}")
history.append(f"AI: {response}")
response = input("You: ")
history.append(f"Human: {response}")
# If interactive mode is not enabled then print the response
else:
print(f"AI: {response}")
history.append(f"AI: {response}")
@ -624,15 +575,20 @@ class Flow:
logging.error(f"Error generating response: {e}")
attempt += 1
time.sleep(self.retry_interval)
# Add the response to the history
history.append(response)
time.sleep(self.loop_interval)
# Add the history to the memory
self.memory.append(history)
# If autosave is enabled then save the state
if self.autosave:
save_path = self.saved_state_path or "flow_state.json"
print(colored(f"Autosaving flow state to {save_path}", "green"))
self.save_state(save_path)
# If return history is enabled then return the response and history
if self.return_history:
return response, history
@ -1113,7 +1069,7 @@ class Flow:
run_code = self.code_executor.run(parsed_code)
return run_code
def tool_prompt_prep(self, api_docs: str = None, required_api: str = None):
def tools_prompt_prep(self, docs: str = None, scenarios: str = None):
"""
Prepare the tool prompt
"""
@ -1160,19 +1116,14 @@ class Flow:
response.
Deliver your response in this format:
- Scenario 1: <Scenario1>
- Scenario 2: <Scenario2>
- Scenario 3: <Scenario3>
{scenarios}
# APIs
{api_docs}
{docs}
# Response
Required API: {required_api}
Scenarios with >=5 API calls:
- Scenario 1: <Scenario1>
"""
def self_healing(self, **kwargs):

@ -6,7 +6,9 @@ from typing import Callable, List, Dict, Any, Sequence
class Task:
def __init__(self, id: str, task: str, flows: Sequence[Flow], dependencies: List[str] = []):
def __init__(
self, id: str, task: str, flows: Sequence[Flow], dependencies: List[str] = []
):
self.id = id
self.task = task
self.flows = flows
@ -62,8 +64,15 @@ flow4 = Flow(llm, max_loops=1)
# Create tasks with their respective Flows and task strings
task1 = Task("task1", "Generate a summary on Quantum field theory", [flow1])
task2 = Task("task2", "Elaborate on the summary of topic X", [flow2, flow3], dependencies=["task1"])
task3 = Task("task3", "Generate conclusions for topic X", [flow4], dependencies=["task1"])
task2 = Task(
"task2",
"Elaborate on the summary of topic X",
[flow2, flow3],
dependencies=["task1"],
)
task3 = Task(
"task3", "Generate conclusions for topic X", [flow4], dependencies=["task1"]
)
# Create a workflow and add tasks
workflow = Workflow()
@ -76,4 +85,4 @@ workflow.run()
# Get results
results = workflow.get_results()
print(results)
print(results)

@ -260,10 +260,6 @@ class SequentialWorkflow:
--------------------------------
Metadata:
kwargs: {kwargs}
""",
"cyan",
attrs=["bold", "underline"],

@ -1,4 +1,4 @@
from swarms.utils.display_markdown import display_markdown_message
from swarms.utils.markdown_message import display_markdown_message
from swarms.utils.futures import execute_futures_dict
from swarms.utils.code_interpreter import SubprocessCodeInterpreter
from swarms.utils.parse_code import extract_code_in_backticks_in_string

Loading…
Cancel
Save