|
|
@ -1,31 +1,19 @@
|
|
|
|
import re
|
|
|
|
import re
|
|
|
|
from swarms.models.openai_models import OpenAIChat
|
|
|
|
from swarms.models.openai_models import OpenAIChat
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class AutoTemp:
|
|
|
|
class AutoTemp:
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
AutoTemp is a tool for automatically selecting the best temperature setting for a given task.
|
|
|
|
AutoTemp is a tool for automatically selecting the best temperature setting for a given task.
|
|
|
|
It generates responses at different temperatures, evaluates them, and ranks them based on quality.
|
|
|
|
It generates responses at different temperatures, evaluates them, and ranks them based on quality.
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
def __init__(
|
|
|
|
def __init__(self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6):
|
|
|
|
self,
|
|
|
|
|
|
|
|
api_key,
|
|
|
|
|
|
|
|
default_temp=0.0,
|
|
|
|
|
|
|
|
alt_temps=None,
|
|
|
|
|
|
|
|
auto_select=True,
|
|
|
|
|
|
|
|
max_workers=6,
|
|
|
|
|
|
|
|
):
|
|
|
|
|
|
|
|
self.api_key = api_key
|
|
|
|
self.api_key = api_key
|
|
|
|
self.default_temp = default_temp
|
|
|
|
self.default_temp = default_temp
|
|
|
|
self.alt_temps = (
|
|
|
|
self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4]
|
|
|
|
alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4]
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
self.auto_select = auto_select
|
|
|
|
self.auto_select = auto_select
|
|
|
|
self.max_workers = max_workers
|
|
|
|
self.max_workers = max_workers
|
|
|
|
self.llm = OpenAIChat(
|
|
|
|
self.llm = OpenAIChat(openai_api_key=self.api_key, temperature=self.default_temp)
|
|
|
|
openai_api_key=self.api_key, temperature=self.default_temp
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def evaluate_output(self, output, temperature):
|
|
|
|
def evaluate_output(self, output, temperature):
|
|
|
|
print(f"Evaluating output at temperature {temperature}...")
|
|
|
|
print(f"Evaluating output at temperature {temperature}...")
|
|
|
@ -46,20 +34,12 @@ class AutoTemp:
|
|
|
|
---
|
|
|
|
---
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
score_text = self.llm(eval_prompt, temperature=0.5)
|
|
|
|
score_text = self.llm(eval_prompt, temperature=0.5)
|
|
|
|
score_match = re.search(r"\b\d+(\.\d)?\b", score_text)
|
|
|
|
score_match = re.search(r'\b\d+(\.\d)?\b', score_text)
|
|
|
|
return (
|
|
|
|
return round(float(score_match.group()), 1) if score_match else 0.0
|
|
|
|
round(float(score_match.group()), 1)
|
|
|
|
|
|
|
|
if score_match
|
|
|
|
|
|
|
|
else 0.0
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run(self, prompt, temperature_string):
|
|
|
|
def run(self, prompt, temperature_string):
|
|
|
|
print("Starting generation process...")
|
|
|
|
print("Starting generation process...")
|
|
|
|
temperature_list = [
|
|
|
|
temperature_list = [float(temp.strip()) for temp in temperature_string.split(',') if temp.strip()]
|
|
|
|
float(temp.strip())
|
|
|
|
|
|
|
|
for temp in temperature_string.split(",")
|
|
|
|
|
|
|
|
if temp.strip()
|
|
|
|
|
|
|
|
]
|
|
|
|
|
|
|
|
outputs = {}
|
|
|
|
outputs = {}
|
|
|
|
scores = {}
|
|
|
|
scores = {}
|
|
|
|
for temp in temperature_list:
|
|
|
|
for temp in temperature_list:
|
|
|
@ -73,15 +53,12 @@ class AutoTemp:
|
|
|
|
if not scores:
|
|
|
|
if not scores:
|
|
|
|
return "No valid outputs generated.", None
|
|
|
|
return "No valid outputs generated.", None
|
|
|
|
|
|
|
|
|
|
|
|
sorted_scores = sorted(
|
|
|
|
sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True)
|
|
|
|
scores.items(), key=lambda item: item[1], reverse=True
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
best_temp, best_score = sorted_scores[0]
|
|
|
|
best_temp, best_score = sorted_scores[0]
|
|
|
|
best_output = outputs[best_temp]
|
|
|
|
best_output = outputs[best_temp]
|
|
|
|
|
|
|
|
|
|
|
|
return (
|
|
|
|
return (
|
|
|
|
f"Best AutoTemp Output (Temp {best_temp} | Score:"
|
|
|
|
f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}"
|
|
|
|
f" {best_score}):\n{best_output}"
|
|
|
|
|
|
|
|
if self.auto_select
|
|
|
|
if self.auto_select
|
|
|
|
else "\n".join(
|
|
|
|
else "\n".join(
|
|
|
|
f"Temp {temp} | Score: {score}:\n{outputs[temp]}"
|
|
|
|
f"Temp {temp} | Score: {score}:\n{outputs[temp]}"
|
|
|
|