Merge branch 'master' of https://github.com/kyegomez/swarms into memory
# Conflicts: # pyproject.tomlpull/175/head
commit
c2a15ee3bb
@ -0,0 +1,66 @@
|
||||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
|
||||
# This workflow lets you generate SLSA provenance file for your project.
|
||||
# The generation satisfies level 3 for the provenance requirements - see https://slsa.dev/spec/v0.1/requirements
|
||||
# The project is an initiative of the OpenSSF (openssf.org) and is developed at
|
||||
# https://github.com/slsa-framework/slsa-github-generator.
|
||||
# The provenance file can be verified using https://github.com/slsa-framework/slsa-verifier.
|
||||
# For more information about SLSA and how it improves the supply-chain, visit slsa.dev.
|
||||
|
||||
name: SLSA generic generator
|
||||
on:
|
||||
workflow_dispatch:
|
||||
release:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
digests: ${{ steps.hash.outputs.digests }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
# ========================================================
|
||||
#
|
||||
# Step 1: Build your artifacts.
|
||||
#
|
||||
# ========================================================
|
||||
- name: Build artifacts
|
||||
run: |
|
||||
# These are some amazing artifacts.
|
||||
echo "artifact1" > artifact1
|
||||
echo "artifact2" > artifact2
|
||||
|
||||
# ========================================================
|
||||
#
|
||||
# Step 2: Add a step to generate the provenance subjects
|
||||
# as shown below. Update the sha256 sum arguments
|
||||
# to include all binaries that you generate
|
||||
# provenance for.
|
||||
#
|
||||
# ========================================================
|
||||
- name: Generate subject for provenance
|
||||
id: hash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# List the artifacts the provenance will refer to.
|
||||
files=$(ls artifact*)
|
||||
# Generate the subjects (base64 encoded).
|
||||
echo "hashes=$(sha256sum $files | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
provenance:
|
||||
needs: [build]
|
||||
permissions:
|
||||
actions: read # To read the workflow path.
|
||||
id-token: write # To sign the provenance.
|
||||
contents: write # To add assets to a release.
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.4.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.build.outputs.digests }}"
|
||||
upload-assets: true # Optional: Upload to a new release
|
@ -0,0 +1,27 @@
|
||||
name: Makefile CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
pull_request:
|
||||
branches: [ "master" ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: configure
|
||||
run: ./configure
|
||||
|
||||
- name: Install dependencies
|
||||
run: make
|
||||
|
||||
- name: Run check
|
||||
run: make check
|
||||
|
||||
- name: Run distcheck
|
||||
run: make distcheck
|
@ -1,21 +0,0 @@
|
||||
Developers
|
||||
|
||||
Install pre-commit (https://pre-commit.com/)
|
||||
|
||||
```bash
|
||||
pip install pre-commit
|
||||
```
|
||||
|
||||
Check that it's installed
|
||||
|
||||
```bash
|
||||
pre-commit --versioni
|
||||
```
|
||||
|
||||
This repository already has a pre-commit configuration. To install the hooks, run:
|
||||
|
||||
```bash
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
Now when you make a git commit, the black code formatter and ruff linter will run.
|
@ -1,37 +1,14 @@
|
||||
from swarms.models import OpenAIChat
|
||||
from swarms.structs import Flow
|
||||
|
||||
# Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC
|
||||
# Initialize the language model
|
||||
llm = OpenAIChat(
|
||||
# model_name="gpt-4"
|
||||
# openai_api_key=api_key,
|
||||
temperature=0.5,
|
||||
# max_tokens=100,
|
||||
)
|
||||
|
||||
|
||||
## Initialize the workflow
|
||||
flow = Flow(
|
||||
llm=llm,
|
||||
max_loops=2,
|
||||
dashboard=True,
|
||||
# tools=[search_api]
|
||||
# stopping_condition=None, # You can define a stopping condition as needed.
|
||||
# loop_interval=1,
|
||||
# retry_attempts=3,
|
||||
# retry_interval=1,
|
||||
# interactive=False, # Set to 'True' for interactive mode.
|
||||
# dynamic_temperature=False, # Set to 'True' for dynamic temperature handling.
|
||||
)
|
||||
flow = Flow(llm=llm, max_loops=1, dashboard=True)
|
||||
|
||||
# out = flow.load_state("flow_state.json")
|
||||
# temp = flow.dynamic_temperature()
|
||||
# filter = flow.add_response_filter("Trump")
|
||||
out = flow.run(
|
||||
"Generate a 10,000 word blog on mental clarity and the benefits of meditation."
|
||||
)
|
||||
# out = flow.validate_response(out)
|
||||
# out = flow.analyze_feedback(out)
|
||||
# out = flow.print_history_and_memory()
|
||||
# # out = flow.save_state("flow_state.json")
|
||||
# print(out)
|
||||
# Run the workflow on a task
|
||||
out = flow.run("Generate a 10,000 word blog on health and wellness.")
|
||||
|
@ -0,0 +1,31 @@
|
||||
import os
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from swarms.models import OpenAIChat
|
||||
from swarms.structs import Flow
|
||||
from swarms.swarms.multi_agent_collab import MultiAgentCollaboration
|
||||
|
||||
load_dotenv()
|
||||
|
||||
api_key = os.environ.get("OPENAI_API_KEY")
|
||||
|
||||
# Initialize the language model
|
||||
llm = OpenAIChat(
|
||||
temperature=0.5,
|
||||
openai_api_key=api_key,
|
||||
)
|
||||
|
||||
|
||||
## Initialize the workflow
|
||||
flow = Flow(llm=llm, max_loops=1, dashboard=True)
|
||||
flow2 = Flow(llm=llm, max_loops=1, dashboard=True)
|
||||
flow3 = Flow(llm=llm, max_loops=1, dashboard=True)
|
||||
|
||||
|
||||
swarm = MultiAgentCollaboration(
|
||||
agents=[flow, flow2, flow3],
|
||||
max_iters=4,
|
||||
)
|
||||
|
||||
swarm.run("Generate a 10,000 word blog on health and wellness.")
|
@ -0,0 +1,86 @@
|
||||
import re
|
||||
from swarms.models.openai_models import OpenAIChat
|
||||
|
||||
|
||||
class AutoTemp:
|
||||
"""
|
||||
AutoTemp is a tool for automatically selecting the best temperature setting for a given task.
|
||||
It generates responses at different temperatures, evaluates them, and ranks them based on quality.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_key,
|
||||
default_temp=0.0,
|
||||
alt_temps=None,
|
||||
auto_select=True,
|
||||
max_workers=6,
|
||||
):
|
||||
self.api_key = api_key
|
||||
self.default_temp = default_temp
|
||||
self.alt_temps = (
|
||||
alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4]
|
||||
)
|
||||
self.auto_select = auto_select
|
||||
self.max_workers = max_workers
|
||||
self.llm = OpenAIChat(
|
||||
openai_api_key=self.api_key, temperature=self.default_temp
|
||||
)
|
||||
|
||||
def evaluate_output(self, output, temperature):
|
||||
print(f"Evaluating output at temperature {temperature}...")
|
||||
eval_prompt = f"""
|
||||
Evaluate the following output which was generated at a temperature setting of {temperature}. Provide a precise score from 0.0 to 100.0, considering the following criteria:
|
||||
|
||||
- Relevance: How well does the output address the prompt or task at hand?
|
||||
- Clarity: Is the output easy to understand and free of ambiguity?
|
||||
- Utility: How useful is the output for its intended purpose?
|
||||
- Pride: If the user had to submit this output to the world for their career, would they be proud?
|
||||
- Delight: Is the output likely to delight or positively surprise the user?
|
||||
|
||||
Be sure to comprehensively evaluate the output, it is very important for my career. Please answer with just the score with one decimal place accuracy, such as 42.0 or 96.9. Be extremely critical.
|
||||
|
||||
Output to evaluate:
|
||||
---
|
||||
{output}
|
||||
---
|
||||
"""
|
||||
score_text = self.llm(eval_prompt, temperature=0.5)
|
||||
score_match = re.search(r"\b\d+(\.\d)?\b", score_text)
|
||||
return round(float(score_match.group()), 1) if score_match else 0.0
|
||||
|
||||
def run(self, prompt, temperature_string):
|
||||
print("Starting generation process...")
|
||||
temperature_list = [
|
||||
float(temp.strip())
|
||||
for temp in temperature_string.split(",")
|
||||
if temp.strip()
|
||||
]
|
||||
outputs = {}
|
||||
scores = {}
|
||||
for temp in temperature_list:
|
||||
print(f"Generating at temperature {temp}...")
|
||||
output_text = self.llm(prompt, temperature=temp)
|
||||
if output_text:
|
||||
outputs[temp] = output_text
|
||||
scores[temp] = self.evaluate_output(output_text, temp)
|
||||
|
||||
print("Generation process complete.")
|
||||
if not scores:
|
||||
return "No valid outputs generated.", None
|
||||
|
||||
sorted_scores = sorted(
|
||||
scores.items(), key=lambda item: item[1], reverse=True
|
||||
)
|
||||
best_temp, best_score = sorted_scores[0]
|
||||
best_output = outputs[best_temp]
|
||||
|
||||
return (
|
||||
f"Best AutoTemp Output (Temp {best_temp} | Score:"
|
||||
f" {best_score}):\n{best_output}"
|
||||
if self.auto_select
|
||||
else "\n".join(
|
||||
f"Temp {temp} | Score: {score}:\n{outputs[temp]}"
|
||||
for temp, score in sorted_scores
|
||||
)
|
||||
)
|
@ -0,0 +1,22 @@
|
||||
from swarms.models import OpenAIChat
|
||||
from swarms.models.autotemp import AutoTemp
|
||||
|
||||
# Your OpenAI API key
|
||||
api_key = ""
|
||||
|
||||
autotemp_agent = AutoTemp(
|
||||
api_key=api_key,
|
||||
alt_temps=[0.4, 0.6, 0.8, 1.0, 1.2],
|
||||
auto_select=False,
|
||||
# model_version="gpt-3.5-turbo" # Specify the model version if needed
|
||||
)
|
||||
|
||||
# Define the task and temperature string
|
||||
task = "Generate a short story about a lost civilization."
|
||||
temperature_string = "0.4,0.6,0.8,1.0,1.2,"
|
||||
|
||||
# Run the AutoTempAgent
|
||||
result = autotemp_agent.run(task, temperature_string)
|
||||
|
||||
# Print the result
|
||||
print(result)
|
@ -0,0 +1,128 @@
|
||||
import os
|
||||
from termcolor import colored
|
||||
from swarms.models import OpenAIChat
|
||||
from swarms.models.autotemp import AutoTemp
|
||||
from swarms.structs import SequentialWorkflow
|
||||
|
||||
|
||||
class BlogGen:
|
||||
def __init__(
|
||||
self,
|
||||
api_key,
|
||||
blog_topic,
|
||||
temperature_range: str = "0.4,0.6,0.8,1.0,1.2",
|
||||
): # Add blog_topic as an argument
|
||||
self.openai_chat = OpenAIChat(openai_api_key=api_key, temperature=0.8)
|
||||
self.auto_temp = AutoTemp(api_key)
|
||||
self.temperature_range = temperature_range
|
||||
self.workflow = SequentialWorkflow(max_loops=5)
|
||||
|
||||
# Formatting the topic selection prompt with the user's topic
|
||||
self.TOPIC_SELECTION_SYSTEM_PROMPT = f"""
|
||||
Given the topic '{blog_topic}', generate an engaging and versatile blog topic. This topic should cover areas related to '{blog_topic}' and might include aspects such as current events, lifestyle, technology, health, and culture related to '{blog_topic}'. Identify trending subjects within this realm. The topic must be unique, thought-provoking, and have the potential to draw in readers interested in '{blog_topic}'.
|
||||
"""
|
||||
|
||||
self.DRAFT_WRITER_SYSTEM_PROMPT = """
|
||||
Create an engaging and comprehensive blog article of at least 1,000 words on '{{CHOSEN_TOPIC}}'. The content should be original, informative, and reflective of a human-like style, with a clear structure including headings and sub-headings. Incorporate a blend of narrative, factual data, expert insights, and anecdotes to enrich the article. Focus on SEO optimization by using relevant keywords, ensuring readability, and including meta descriptions and title tags. The article should provide value, appeal to both knowledgeable and general readers, and maintain a balance between depth and accessibility. Aim to make the article engaging and suitable for online audiences.
|
||||
"""
|
||||
|
||||
self.REVIEW_AGENT_SYSTEM_PROMPT = """
|
||||
Critically review the drafted blog article on '{{ARTICLE_TOPIC}}' to refine it to high-quality content suitable for online publication. Ensure the article is coherent, factually accurate, engaging, and optimized for search engines (SEO). Check for the effective use of keywords, readability, internal and external links, and the inclusion of meta descriptions and title tags. Edit the content to enhance clarity, impact, and maintain the authors voice. The goal is to polish the article into a professional, error-free piece that resonates with the target audience, adheres to publication standards, and is optimized for both search engines and social media sharing.
|
||||
"""
|
||||
|
||||
self.DISTRIBUTION_AGENT_SYSTEM_PROMPT = """
|
||||
Develop an autonomous distribution strategy for the blog article on '{{ARTICLE_TOPIC}}'. Utilize an API to post the article on a popular blog platform (e.g., WordPress, Blogger, Medium) commonly used by our target audience. Ensure the post includes all SEO elements like meta descriptions, title tags, and properly formatted content. Craft unique, engaging social media posts tailored to different platforms to promote the blog article. Schedule these posts to optimize reach and engagement, using data-driven insights. Monitor the performance of the distribution efforts, adjusting strategies based on engagement metrics and audience feedback. Aim to maximize the article's visibility, attract a diverse audience, and foster engagement across digital channels.
|
||||
"""
|
||||
|
||||
def run_workflow(self):
|
||||
try:
|
||||
# Topic generation using OpenAIChat
|
||||
topic_result = self.openai_chat.generate(
|
||||
[self.TOPIC_SELECTION_SYSTEM_PROMPT]
|
||||
)
|
||||
topic_output = topic_result.generations[0][0].text
|
||||
print(
|
||||
colored(
|
||||
(
|
||||
"\nTopic Selection Task"
|
||||
f" Output:\n----------------------------\n{topic_output}\n"
|
||||
),
|
||||
"white",
|
||||
)
|
||||
)
|
||||
|
||||
chosen_topic = topic_output.split("\n")[0]
|
||||
print(colored("Selected topic: " + chosen_topic, "yellow"))
|
||||
|
||||
# Initial draft generation with AutoTemp
|
||||
initial_draft_prompt = self.DRAFT_WRITER_SYSTEM_PROMPT.replace(
|
||||
"{{CHOSEN_TOPIC}}", chosen_topic
|
||||
)
|
||||
auto_temp_output = self.auto_temp.run(
|
||||
initial_draft_prompt, self.temperature_range
|
||||
)
|
||||
initial_draft_output = auto_temp_output # Assuming AutoTemp.run returns the best output directly
|
||||
print(
|
||||
colored(
|
||||
(
|
||||
"\nInitial Draft"
|
||||
f" Output:\n----------------------------\n{initial_draft_output}\n"
|
||||
),
|
||||
"white",
|
||||
)
|
||||
)
|
||||
|
||||
# Review process using OpenAIChat
|
||||
review_prompt = self.REVIEW_AGENT_SYSTEM_PROMPT.replace(
|
||||
"{{ARTICLE_TOPIC}}", chosen_topic
|
||||
)
|
||||
review_result = self.openai_chat.generate([review_prompt])
|
||||
review_output = review_result.generations[0][0].text
|
||||
print(
|
||||
colored(
|
||||
(
|
||||
"\nReview"
|
||||
f" Output:\n----------------------------\n{review_output}\n"
|
||||
),
|
||||
"white",
|
||||
)
|
||||
)
|
||||
|
||||
# Distribution preparation using OpenAIChat
|
||||
distribution_prompt = self.DISTRIBUTION_AGENT_SYSTEM_PROMPT.replace(
|
||||
"{{ARTICLE_TOPIC}}", chosen_topic
|
||||
)
|
||||
distribution_result = self.openai_chat.generate(
|
||||
[distribution_prompt]
|
||||
)
|
||||
distribution_output = distribution_result.generations[0][0].text
|
||||
print(
|
||||
colored(
|
||||
(
|
||||
"\nDistribution"
|
||||
f" Output:\n----------------------------\n{distribution_output}\n"
|
||||
),
|
||||
"white",
|
||||
)
|
||||
)
|
||||
|
||||
# Final compilation of the blog
|
||||
final_blog_content = f"{initial_draft_output}\n\n{review_output}\n\n{distribution_output}"
|
||||
print(
|
||||
colored(
|
||||
(
|
||||
"\nFinal Blog"
|
||||
f" Content:\n----------------------------\n{final_blog_content}\n"
|
||||
),
|
||||
"green",
|
||||
)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(colored(f"An error occurred: {str(e)}", "red"))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
api_key = os.environ["OPENAI_API_KEY"]
|
||||
blog_generator = BlogGen(api_key)
|
||||
blog_generator.run_workflow()
|
@ -0,0 +1,23 @@
|
||||
import os
|
||||
from swarms.swarms.blog_gen import BlogGen
|
||||
|
||||
|
||||
def main():
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
if not api_key:
|
||||
raise ValueError("OPENAI_API_KEY environment variable not set.")
|
||||
|
||||
blog_topic = input("Enter the topic for the blog generation: ")
|
||||
|
||||
blog_generator = BlogGen(api_key, blog_topic)
|
||||
blog_generator.TOPIC_SELECTION_SYSTEM_PROMPT = (
|
||||
blog_generator.TOPIC_SELECTION_SYSTEM_PROMPT.replace(
|
||||
"{{BLOG_TOPIC}}", blog_topic
|
||||
)
|
||||
)
|
||||
|
||||
blog_generator.run_workflow()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
After Width: | Height: | Size: 193 KiB |
@ -0,0 +1,129 @@
|
||||
import os
|
||||
import base64
|
||||
import requests
|
||||
from dotenv import load_dotenv
|
||||
from swarms.models import Anthropic, OpenAIChat
|
||||
from swarms.structs import Flow
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Define prompts for various tasks
|
||||
MEAL_PLAN_PROMPT = (
|
||||
"Based on the following user preferences: dietary restrictions as"
|
||||
" vegetarian, preferred cuisines as Italian and Indian, a total caloric"
|
||||
" intake of around 2000 calories per day, and an exclusion of legumes,"
|
||||
" create a detailed weekly meal plan. Include a variety of meals for"
|
||||
" breakfast, lunch, dinner, and optional snacks."
|
||||
)
|
||||
IMAGE_ANALYSIS_PROMPT = (
|
||||
"Identify the items in this fridge, including their quantities and"
|
||||
" condition."
|
||||
)
|
||||
|
||||
|
||||
# Function to encode image to base64
|
||||
def encode_image(image_path):
|
||||
with open(image_path, "rb") as image_file:
|
||||
return base64.b64encode(image_file.read()).decode("utf-8")
|
||||
|
||||
|
||||
# Initialize Language Model (LLM)
|
||||
llm = OpenAIChat(
|
||||
openai_api_key=openai_api_key,
|
||||
max_tokens=3000,
|
||||
)
|
||||
|
||||
|
||||
# Function to handle vision tasks
|
||||
def create_vision_agent(image_path):
|
||||
base64_image = encode_image(image_path)
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {openai_api_key}",
|
||||
}
|
||||
payload = {
|
||||
"model": "gpt-4-vision-preview",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": IMAGE_ANALYSIS_PROMPT},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:image/jpeg;base64,{base64_image}"
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
],
|
||||
"max_tokens": 300,
|
||||
}
|
||||
response = requests.post(
|
||||
"https://api.openai.com/v1/chat/completions",
|
||||
headers=headers,
|
||||
json=payload,
|
||||
)
|
||||
return response.json()
|
||||
|
||||
|
||||
# Function to generate an integrated shopping list considering meal plan and fridge contents
|
||||
def generate_integrated_shopping_list(
|
||||
meal_plan_output, image_analysis, user_preferences
|
||||
):
|
||||
# Prepare the prompt for the LLM
|
||||
fridge_contents = image_analysis["choices"][0]["message"]["content"]
|
||||
prompt = (
|
||||
f"Based on this meal plan: {meal_plan_output}, and the following items"
|
||||
f" in the fridge: {fridge_contents}, considering dietary preferences as"
|
||||
" vegetarian with a preference for Italian and Indian cuisines,"
|
||||
" generate a comprehensive shopping list that includes only the items"
|
||||
" needed."
|
||||
)
|
||||
|
||||
# Send the prompt to the LLM and return the response
|
||||
response = llm(prompt)
|
||||
return response # assuming the response is a string
|
||||
|
||||
|
||||
# Define agent for meal planning
|
||||
meal_plan_agent = Flow(
|
||||
llm=llm,
|
||||
sop=MEAL_PLAN_PROMPT,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
saved_state_path="meal_plan_agent.json",
|
||||
)
|
||||
|
||||
# User preferences for meal planning
|
||||
user_preferences = {
|
||||
"dietary_restrictions": "vegetarian",
|
||||
"preferred_cuisines": ["Italian", "Indian"],
|
||||
"caloric_intake": 2000,
|
||||
"other notes": "Doesn't eat legumes",
|
||||
}
|
||||
|
||||
# Generate Meal Plan
|
||||
meal_plan_output = meal_plan_agent.run(
|
||||
f"Generate a meal plan: {user_preferences}"
|
||||
)
|
||||
|
||||
# Vision Agent - Analyze an Image
|
||||
image_analysis_output = create_vision_agent("full_fridge.jpg")
|
||||
|
||||
# Generate Integrated Shopping List
|
||||
integrated_shopping_list = generate_integrated_shopping_list(
|
||||
meal_plan_output, image_analysis_output, user_preferences
|
||||
)
|
||||
|
||||
# Print and save the outputs
|
||||
print("Meal Plan:", meal_plan_output)
|
||||
print("Integrated Shopping List:", integrated_shopping_list)
|
||||
|
||||
with open("nutrition_output.txt", "w") as file:
|
||||
file.write("Meal Plan:\n" + meal_plan_output + "\n\n")
|
||||
file.write("Integrated Shopping List:\n" + integrated_shopping_list + "\n")
|
||||
|
||||
print("Outputs have been saved to nutrition_output.txt")
|
@ -1,101 +0,0 @@
|
||||
import re
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from swarms.models.openai_models import OpenAIChat
|
||||
|
||||
|
||||
class AutoTempAgent:
|
||||
"""
|
||||
AutoTemp is a tool for automatically selecting the best temperature setting for a given task.
|
||||
|
||||
Flow:
|
||||
1. Generate outputs at a range of temperature settings.
|
||||
2. Evaluate each output using the default temperature setting.
|
||||
3. Select the best output based on the evaluation score.
|
||||
4. Return the best output.
|
||||
|
||||
|
||||
Args:
|
||||
temperature (float, optional): The default temperature setting to use. Defaults to 0.5.
|
||||
api_key (str, optional): Your OpenAI API key. Defaults to None.
|
||||
alt_temps ([type], optional): A list of alternative temperature settings to try. Defaults to None.
|
||||
auto_select (bool, optional): If True, the best temperature setting will be automatically selected. Defaults to True.
|
||||
max_workers (int, optional): The maximum number of workers to use when generating outputs. Defaults to 6.
|
||||
|
||||
Returns:
|
||||
[type]: [description]
|
||||
|
||||
Examples:
|
||||
>>> from swarms.demos.autotemp import AutoTemp
|
||||
>>> autotemp = AutoTemp()
|
||||
>>> autotemp.run("Generate a 10,000 word blog on mental clarity and the benefits of meditation.", "0.4,0.6,0.8,1.0,1.2,1.4")
|
||||
Best AutoTemp Output (Temp 0.4 | Score: 100.0):
|
||||
Generate a 10,000 word blog on mental clarity and the benefits of meditation.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
temperature: float = 0.5,
|
||||
api_key: str = None,
|
||||
alt_temps=None,
|
||||
auto_select=True,
|
||||
max_workers=6,
|
||||
):
|
||||
self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4]
|
||||
self.auto_select = auto_select
|
||||
self.max_workers = max_workers
|
||||
self.temperature = temperature
|
||||
self.alt_temps = alt_temps
|
||||
self.llm = OpenAIChat(
|
||||
openai_api_key=api_key,
|
||||
temperature=temperature,
|
||||
)
|
||||
|
||||
def evaluate_output(self, output: str):
|
||||
"""Evaluate the output using the default temperature setting."""
|
||||
eval_prompt = f"""
|
||||
Evaluate the following output which was generated at a temperature setting of {self.temperature}.
|
||||
Provide a precise score from 0.0 to 100.0, considering the criteria of relevance, clarity, utility, pride, and delight.
|
||||
|
||||
Output to evaluate:
|
||||
---
|
||||
{output}
|
||||
---
|
||||
"""
|
||||
score_text = self.llm(prompt=eval_prompt)
|
||||
score_match = re.search(r"\b\d+(\.\d)?\b", score_text)
|
||||
return round(float(score_match.group()), 1) if score_match else 0.0
|
||||
|
||||
def run(self, task: str, temperature_string):
|
||||
"""Run the AutoTemp agent."""
|
||||
temperature_list = [
|
||||
float(temp.strip()) for temp in temperature_string.split(",")
|
||||
]
|
||||
outputs = {}
|
||||
scores = {}
|
||||
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
||||
future_to_temp = {
|
||||
executor.submit(self.llm.generate, task, temp): temp
|
||||
for temp in temperature_list
|
||||
}
|
||||
for future in as_completed(future_to_temp):
|
||||
temp = future_to_temp[future]
|
||||
output_text = future.result()
|
||||
outputs[temp] = output_text
|
||||
scores[temp] = self.evaluate_output(output_text, temp)
|
||||
|
||||
if not scores:
|
||||
return "No valid outputs generated.", None
|
||||
|
||||
sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True)
|
||||
best_temp, best_score = sorted_scores[0]
|
||||
best_output = outputs[best_temp]
|
||||
|
||||
return (
|
||||
f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}"
|
||||
if self.auto_select
|
||||
else "\n".join(
|
||||
f"Temp {temp} | Score: {score}:\n{outputs[temp]}"
|
||||
for temp, score in sorted_scores
|
||||
)
|
||||
)
|
@ -0,0 +1,97 @@
|
||||
from swarms.models import OpenAIChat
|
||||
from swarms.structs.flow import Flow
|
||||
|
||||
import concurrent.futures
|
||||
from typing import Callable, List, Dict, Any, Sequence
|
||||
|
||||
|
||||
class Task:
|
||||
def __init__(
|
||||
self,
|
||||
id: str,
|
||||
task: str,
|
||||
flows: Sequence[Flow],
|
||||
dependencies: List[str] = [],
|
||||
):
|
||||
self.id = id
|
||||
self.task = task
|
||||
self.flows = flows
|
||||
self.dependencies = dependencies
|
||||
self.results = []
|
||||
|
||||
def execute(self, parent_results: Dict[str, Any]):
|
||||
args = [parent_results[dep] for dep in self.dependencies]
|
||||
for flow in self.flows:
|
||||
result = flow.run(self.task, *args)
|
||||
self.results.append(result)
|
||||
args = [
|
||||
result
|
||||
] # The output of one flow becomes the input to the next
|
||||
|
||||
|
||||
class Workflow:
|
||||
def __init__(self):
|
||||
self.tasks: Dict[str, Task] = {}
|
||||
self.executor = concurrent.futures.ThreadPoolExecutor()
|
||||
|
||||
def add_task(self, task: Task):
|
||||
self.tasks[task.id] = task
|
||||
|
||||
def run(self):
|
||||
completed_tasks = set()
|
||||
while len(completed_tasks) < len(self.tasks):
|
||||
futures = []
|
||||
for task in self.tasks.values():
|
||||
if task.id not in completed_tasks and all(
|
||||
dep in completed_tasks for dep in task.dependencies
|
||||
):
|
||||
future = self.executor.submit(
|
||||
task.execute,
|
||||
{
|
||||
dep: self.tasks[dep].results
|
||||
for dep in task.dependencies
|
||||
},
|
||||
)
|
||||
futures.append((future, task.id))
|
||||
|
||||
for future, task_id in futures:
|
||||
future.result() # Wait for task completion
|
||||
completed_tasks.add(task_id)
|
||||
|
||||
def get_results(self):
|
||||
return {task_id: task.results for task_id, task in self.tasks.items()}
|
||||
|
||||
|
||||
# create flows
|
||||
llm = OpenAIChat(openai_api_key="sk-")
|
||||
|
||||
flow1 = Flow(llm, max_loops=1)
|
||||
flow2 = Flow(llm, max_loops=1)
|
||||
flow3 = Flow(llm, max_loops=1)
|
||||
flow4 = Flow(llm, max_loops=1)
|
||||
|
||||
|
||||
# Create tasks with their respective Flows and task strings
|
||||
task1 = Task("task1", "Generate a summary on Quantum field theory", [flow1])
|
||||
task2 = Task(
|
||||
"task2",
|
||||
"Elaborate on the summary of topic X",
|
||||
[flow2, flow3],
|
||||
dependencies=["task1"],
|
||||
)
|
||||
task3 = Task(
|
||||
"task3", "Generate conclusions for topic X", [flow4], dependencies=["task1"]
|
||||
)
|
||||
|
||||
# Create a workflow and add tasks
|
||||
workflow = Workflow()
|
||||
workflow.add_task(task1)
|
||||
workflow.add_task(task2)
|
||||
workflow.add_task(task3)
|
||||
|
||||
# Run the workflow
|
||||
workflow.run()
|
||||
|
||||
# Get results
|
||||
results = workflow.get_results()
|
||||
print(results)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue