diff --git a/playground/demos/autotemp/autotemp.py b/playground/demos/autotemp/autotemp.py index baf8f091..ed38a621 100644 --- a/playground/demos/autotemp/autotemp.py +++ b/playground/demos/autotemp/autotemp.py @@ -1,31 +1,19 @@ import re from swarms.models.openai_models import OpenAIChat - class AutoTemp: """ AutoTemp is a tool for automatically selecting the best temperature setting for a given task. It generates responses at different temperatures, evaluates them, and ranks them based on quality. """ - def __init__( - self, - api_key, - default_temp=0.0, - alt_temps=None, - auto_select=True, - max_workers=6, - ): + def __init__(self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6): self.api_key = api_key self.default_temp = default_temp - self.alt_temps = ( - alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] - ) + self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] self.auto_select = auto_select self.max_workers = max_workers - self.llm = OpenAIChat( - openai_api_key=self.api_key, temperature=self.default_temp - ) + self.llm = OpenAIChat(openai_api_key=self.api_key, temperature=self.default_temp) def evaluate_output(self, output, temperature): print(f"Evaluating output at temperature {temperature}...") @@ -46,20 +34,12 @@ class AutoTemp: --- """ score_text = self.llm(eval_prompt, temperature=0.5) - score_match = re.search(r"\b\d+(\.\d)?\b", score_text) - return ( - round(float(score_match.group()), 1) - if score_match - else 0.0 - ) + score_match = re.search(r'\b\d+(\.\d)?\b', score_text) + return round(float(score_match.group()), 1) if score_match else 0.0 def run(self, prompt, temperature_string): print("Starting generation process...") - temperature_list = [ - float(temp.strip()) - for temp in temperature_string.split(",") - if temp.strip() - ] + temperature_list = [float(temp.strip()) for temp in temperature_string.split(',') if temp.strip()] outputs = {} scores = {} for temp in temperature_list: @@ -73,15 +53,12 @@ class AutoTemp: if not scores: return "No valid outputs generated.", None - sorted_scores = sorted( - scores.items(), key=lambda item: item[1], reverse=True - ) + sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True) best_temp, best_score = sorted_scores[0] best_output = outputs[best_temp] return ( - f"Best AutoTemp Output (Temp {best_temp} | Score:" - f" {best_score}):\n{best_output}" + f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}" if self.auto_select else "\n".join( f"Temp {temp} | Score: {score}:\n{outputs[temp]}"