feat: Inital tool flow integraton

pull/85/head
Zack 2 years ago
parent cc3e48b800
commit 199c332b28

@ -1,5 +1,6 @@
from swarms.models import OpenAIChat from swarms.models import OpenAIChat
from swarms.structs import Flow from swarms.structs import Flow
from swarms.tools.interpreter_tool import compile
api_key = "" api_key = ""
@ -11,21 +12,21 @@ llm = OpenAIChat(
) )
# Initialize the flow # Initialize the flow
flow = Flow(llm=llm, max_loops=5, dashboard=True,) flow = Flow(llm=llm, max_loops=5, dashboard=True, tools=[compile])
flow = Flow( # flow = Flow(
llm=llm, # llm=llm,
max_loops=5, # max_loops=5,
dashboard=True, # dashboard=True,
# stopping_condition=None, # You can define a stopping condition as needed. # # stopping_condition=None, # You can define a stopping condition as needed.
# loop_interval=1, # # loop_interval=1,
# retry_attempts=3, # # retry_attempts=3,
# retry_interval=1, # # retry_interval=1,
# interactive=False, # Set to 'True' for interactive mode. # # interactive=False, # Set to 'True' for interactive mode.
# dynamic_temperature=False, # Set to 'True' for dynamic temperature handling. # # dynamic_temperature=False, # Set to 'True' for dynamic temperature handling.
) # )
out = flow.run("Generate a 10,000 word blog on health and wellness.") out = flow.run("Use your open interpreter tool to print hello world to the terminal")
print(out) print(out)

@ -8,7 +8,6 @@ TODO:
import json import json
import logging import logging
import re
import time import time
from typing import Any, Callable, Dict, List, Optional, Tuple, Generator from typing import Any, Callable, Dict, List, Optional, Tuple, Generator
from termcolor import colored from termcolor import colored
@ -16,8 +15,6 @@ import inspect
import random import random
from swarms.tools.tool import BaseTool from swarms.tools.tool import BaseTool
from swarms.models.openai_models import OpenAIChat
# Constants # Constants
FLOW_SYSTEM_PROMPT = """ FLOW_SYSTEM_PROMPT = """
@ -97,8 +94,8 @@ class Flow:
def __init__( def __init__(
self, self,
llm: Any,
# template: str, # template: str,
llm: Any,
max_loops: int = 5, max_loops: int = 5,
stopping_condition: Optional[Callable[[str], bool]] = None, stopping_condition: Optional[Callable[[str], bool]] = None,
loop_interval: int = 1, loop_interval: int = 1,
@ -111,11 +108,6 @@ class Flow:
**kwargs: Any, **kwargs: Any,
): ):
# self.template = template # self.template = template
self.processors = {
'text': self.process_text,
'image': self.process_image,
'audio': self.process_audio,
}
self.llm = llm self.llm = llm
self.max_loops = max_loops self.max_loops = max_loops
self.stopping_condition = stopping_condition self.stopping_condition = stopping_condition
@ -129,45 +121,22 @@ class Flow:
self.interactive = interactive self.interactive = interactive
self.dashboard = dashboard self.dashboard = dashboard
self.dynamic_temperature = dynamic_temperature self.dynamic_temperature = dynamic_temperature
self.tools = tools self.tools = tools or []
def __call__(self, task, **kwargs): def run(self, task: str, **kwargs):
"""Invoke the flow by providing a template and its variables.""" for i in range(self.max_loops):
subtasks = self.break_down_task(task) for tool in self.tools:
responses = [] tool_prompt = f"\n\nTool: {tool.__name__}\n{tool.__doc__}"
for subtask in subtasks: reponse = self.llm(
mode = self.determine_mode(subtask) f"""
processor = self.processors.get(mode) {FLOW_SYSTEM_PROMPT}
if processor: {tool_prompt}
refined_prompt = self.text_model(f"Define the task '{subtask}' as it relates to the original task '{task}'.")
response = processor(refined_prompt, task)
responses.append(response)
else:
raise ValueError(f'Invalid mode: {mode}')
return responses
def break_down_task(self, task):
# Break down the task into subtasks
subtasks = re.split(r' with | and ', task)
return subtasks
def determine_mode(self, subtask):
result = self.classifier(subtask, candidate_labels=['text', 'image', 'audio', 'video'])
return result['labels'][0]
def process_image(self, image_description): History: {reponse}
response = self.image_model(image_description)
return response
def process_audio(self, audio_description): """, **kwargs
response = self.audio_model(audio_description) )
return response
def process_video(self, video_description):
response = self.video_model(video_description)
return response
return "Video generated from description: " + video_description
def provide_feedback(self, feedback: str) -> None: def provide_feedback(self, feedback: str) -> None:
"""Allow users to provide feedback on the responses.""" """Allow users to provide feedback on the responses."""
self.feedback.append(feedback) self.feedback.append(feedback)
@ -179,6 +148,11 @@ class Flow:
return self.stopping_condition(response) return self.stopping_condition(response)
return False return False
def __call__(self, prompt, **kwargs) -> str:
"""Invoke the flow by providing a template and its variables."""
response = self.llm(prompt, **kwargs)
return response
def dynamic_temperature(self): def dynamic_temperature(self):
""" """
1. Check the self.llm object for the temperature 1. Check the self.llm object for the temperature

@ -1,7 +1,9 @@
import os import os
from swarms.tools import tool
import interpreter import interpreter
@tool
def compile(task: str): def compile(task: str):
""" """
Open Interpreter lets LLMs run code (Python, Javascript, Shell, and more) locally. You can chat with Open Interpreter through a ChatGPT-like interface in your terminal by running $ interpreter after installing. Open Interpreter lets LLMs run code (Python, Javascript, Shell, and more) locally. You can chat with Open Interpreter through a ChatGPT-like interface in your terminal by running $ interpreter after installing.

Loading…
Cancel
Save