|
|
|
@ -9,12 +9,15 @@ TODO:
|
|
|
|
|
|
|
|
|
|
import json
|
|
|
|
|
import logging
|
|
|
|
|
import re
|
|
|
|
|
import time
|
|
|
|
|
from typing import Any, Callable, Dict, List, Optional, Tuple, Generator
|
|
|
|
|
from termcolor import colored
|
|
|
|
|
import inspect
|
|
|
|
|
import random
|
|
|
|
|
|
|
|
|
|
from swarms.models.openai_models import OpenAIChat
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Constants
|
|
|
|
|
FLOW_SYSTEM_PROMPT = """
|
|
|
|
@ -95,8 +98,13 @@ class Flow:
|
|
|
|
|
dynamic_temperature: bool = False,
|
|
|
|
|
**kwargs: Any,
|
|
|
|
|
):
|
|
|
|
|
self.llm = llm
|
|
|
|
|
# self.template = template
|
|
|
|
|
self.processors = {
|
|
|
|
|
'text': self.process_text,
|
|
|
|
|
'image': self.process_image,
|
|
|
|
|
'audio': self.process_audio,
|
|
|
|
|
}
|
|
|
|
|
self.llm = llm
|
|
|
|
|
self.max_loops = max_loops
|
|
|
|
|
self.stopping_condition = stopping_condition
|
|
|
|
|
self.loop_interval = loop_interval
|
|
|
|
@ -110,6 +118,43 @@ class Flow:
|
|
|
|
|
self.dashboard = dashboard
|
|
|
|
|
self.dynamic_temperature = dynamic_temperature
|
|
|
|
|
|
|
|
|
|
def __call__(self, task, **kwargs):
|
|
|
|
|
"""Invoke the flow by providing a template and its variables."""
|
|
|
|
|
subtasks = self.break_down_task(task)
|
|
|
|
|
responses = []
|
|
|
|
|
for subtask in subtasks:
|
|
|
|
|
mode = self.determine_mode(subtask)
|
|
|
|
|
processor = self.processors.get(mode)
|
|
|
|
|
if processor:
|
|
|
|
|
refined_prompt = self.text_model(f"Define the task '{subtask}' as it relates to the original task '{task}'.")
|
|
|
|
|
response = processor(refined_prompt, task)
|
|
|
|
|
responses.append(response)
|
|
|
|
|
else:
|
|
|
|
|
raise ValueError(f'Invalid mode: {mode}')
|
|
|
|
|
return responses
|
|
|
|
|
|
|
|
|
|
def break_down_task(self, task):
|
|
|
|
|
# Break down the task into subtasks
|
|
|
|
|
subtasks = re.split(r' with | and ', task)
|
|
|
|
|
return subtasks
|
|
|
|
|
|
|
|
|
|
def determine_mode(self, subtask):
|
|
|
|
|
result = self.classifier(subtask, candidate_labels=['text', 'image', 'audio', 'video'])
|
|
|
|
|
return result['labels'][0]
|
|
|
|
|
|
|
|
|
|
def process_image(self, image_description):
|
|
|
|
|
response = self.image_model(image_description)
|
|
|
|
|
return response
|
|
|
|
|
|
|
|
|
|
def process_audio(self, audio_description):
|
|
|
|
|
response = self.audio_model(audio_description)
|
|
|
|
|
return response
|
|
|
|
|
|
|
|
|
|
def process_video(self, video_description):
|
|
|
|
|
response = self.video_model(video_description)
|
|
|
|
|
return response
|
|
|
|
|
|
|
|
|
|
return "Video generated from description: " + video_description
|
|
|
|
|
def provide_feedback(self, feedback: str) -> None:
|
|
|
|
|
"""Allow users to provide feedback on the responses."""
|
|
|
|
|
self.feedback.append(feedback)
|
|
|
|
@ -121,11 +166,6 @@ class Flow:
|
|
|
|
|
return self.stopping_condition(response)
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
def __call__(self, prompt, **kwargs) -> str:
|
|
|
|
|
"""Invoke the flow by providing a template and its variables."""
|
|
|
|
|
response = self.llm(prompt, **kwargs)
|
|
|
|
|
return response
|
|
|
|
|
|
|
|
|
|
def dynamic_temperature(self):
|
|
|
|
|
"""
|
|
|
|
|
1. Check the self.llm object for the temperature
|
|
|
|
|