Merge branch 'master' of https://github.com/kyegomez/swarms into memory
commit
285d36ca6f
@ -0,0 +1,76 @@
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from swarms.models import OpenAIChat
|
||||
from swarms.models.stable_diffusion import StableDiffusion
|
||||
from swarms.structs import Agent, SequentialWorkflow
|
||||
import swarms.prompts.education as edu_prompts
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
stability_api_key = os.getenv("STABILITY_API_KEY")
|
||||
|
||||
# Initialize language model
|
||||
llm = OpenAIChat(
|
||||
openai_api_key=api_key, temperature=0.5, max_tokens=3000
|
||||
)
|
||||
|
||||
# Initialize Stable Diffusion
|
||||
sd_api = StableDiffusion(api_key=stability_api_key)
|
||||
|
||||
# User preferences (can be dynamically set in a real application)
|
||||
user_preferences = {
|
||||
"subjects": "Cognitive Architectures",
|
||||
"learning_style": "Visual",
|
||||
"challenge_level": "Moderate",
|
||||
}
|
||||
|
||||
# Formatted prompts from user preferences
|
||||
curriculum_prompt = edu_prompts.CURRICULUM_DESIGN_PROMPT.format(
|
||||
**user_preferences
|
||||
)
|
||||
interactive_prompt = edu_prompts.INTERACTIVE_LEARNING_PROMPT.format(
|
||||
**user_preferences
|
||||
)
|
||||
sample_prompt = edu_prompts.SAMPLE_TEST_PROMPT.format(
|
||||
**user_preferences
|
||||
)
|
||||
image_prompt = edu_prompts.IMAGE_GENERATION_PROMPT.format(
|
||||
**user_preferences
|
||||
)
|
||||
|
||||
# Initialize agents for different educational tasks
|
||||
curriculum_agent = Agent(llm=llm, max_loops=1, sop=curriculum_prompt)
|
||||
interactive_learning_agent = Agent(
|
||||
llm=llm, max_loops=1, sop=interactive_prompt
|
||||
)
|
||||
sample_lesson_agent = Agent(llm=llm, max_loops=1, sop=sample_prompt)
|
||||
|
||||
# Create Sequential Workflow
|
||||
workflow = SequentialWorkflow(max_loops=1)
|
||||
|
||||
# Add tasks to workflow with personalized prompts
|
||||
workflow.add(curriculum_agent, "Generate a curriculum")
|
||||
workflow.add(
|
||||
interactive_learning_agent, "Generate an interactive lesson"
|
||||
)
|
||||
workflow.add(sample_lesson_agent, "Generate a practice test")
|
||||
|
||||
# Execute the workflow for text-based tasks
|
||||
workflow.run()
|
||||
|
||||
# Generate an image using Stable Diffusion
|
||||
image_result = sd_api.run(image_prompt)
|
||||
|
||||
# Output results for each task
|
||||
for task in workflow.tasks:
|
||||
print(
|
||||
f"Task Description: {task.description}\nResult:"
|
||||
f" {task.result}\n"
|
||||
)
|
||||
|
||||
# Output image result
|
||||
print(
|
||||
"Image Generation Task: Generate an image for the interactive"
|
||||
f" lesson\nResult: {image_result}"
|
||||
)
|
@ -0,0 +1,149 @@
|
||||
import os
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from swarms.models import OpenAIChat
|
||||
from swarms.prompts.code_interpreter import CODE_INTERPRETER
|
||||
from swarms.structs import Agent
|
||||
from swarms.prompts.programming import TEST_SOP, DOCUMENTATION_SOP
|
||||
from termcolor import colored
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
FEATURE = (
|
||||
"Implement an all-new signup system in typescript using supabase"
|
||||
)
|
||||
|
||||
CODEBASE = """
|
||||
import React, { useState } from 'react';
|
||||
import UpperPanel from './UpperPanel';
|
||||
import LowerPanel from './LowerPanel';
|
||||
|
||||
const MainPanel = () => {
|
||||
const [promptInstructionForLowerPanel, setPromptInstructionForLowerPanel] = useState('');
|
||||
const [formData, setFormData] = useState('');
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
|
||||
return (
|
||||
<div className="flex h-screen">
|
||||
<UpperPanel setPromptInstructionForLowerPanel={setPromptInstructionForLowerPanel}
|
||||
isLoading={isLoading}
|
||||
setIsLoading={setIsLoading}
|
||||
/>
|
||||
<LowerPanel promptInstruction={promptInstructionForLowerPanel} isLoading={isLoading} />
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default MainPanel;
|
||||
|
||||
|
||||
"""
|
||||
|
||||
# Load the environment variables
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Initialize the language agent
|
||||
llm = OpenAIChat(
|
||||
model_name="gpt-4",
|
||||
openai_api_key=api_key,
|
||||
temperature=0.5,
|
||||
max_tokens=4000,
|
||||
)
|
||||
|
||||
# Product Manager Agent init
|
||||
product_manager_agent = Agent(
|
||||
llm=llm, max_loops=1, sop=CODE_INTERPRETER, autosave=True
|
||||
)
|
||||
|
||||
# Initialize the agent with the language agent
|
||||
feature_implementer_frontend = Agent(
|
||||
llm=llm, max_loops=1, sop=CODE_INTERPRETER, autosave=True
|
||||
)
|
||||
|
||||
# Create another agent for a different task
|
||||
feature_implementer_backend = Agent(
|
||||
llm=llm, max_loops=1, sop=CODE_INTERPRETER, autosave=True
|
||||
)
|
||||
|
||||
# Create another agent for a different task
|
||||
tester_agent = Agent(
|
||||
llm=llm, max_loops=1, sop=TEST_SOP, autosave=True
|
||||
)
|
||||
|
||||
# Create another agent for a different task
|
||||
documenting_agent = Agent(
|
||||
llm=llm, max_loops=1, sop=DOCUMENTATION_SOP, autosave=True
|
||||
)
|
||||
|
||||
|
||||
# Product Agent prompt
|
||||
def feature_codebase_product_agentprompt(
|
||||
feature: str, codebase: str
|
||||
) -> str:
|
||||
prompt = (
|
||||
"Create an algorithmic pseudocode for an all-new feature:"
|
||||
f" {feature} based on this codebase: {codebase}"
|
||||
)
|
||||
return prompt
|
||||
|
||||
|
||||
# Product Manager Agent
|
||||
product_manager_out = product_manager_agent.run(
|
||||
feature_codebase_product_agentprompt(FEATURE, CODEBASE)
|
||||
)
|
||||
print(
|
||||
colored(
|
||||
(
|
||||
"---------------------------- Product Manager Plan:"
|
||||
f" {product_manager_out}"
|
||||
),
|
||||
"cyan",
|
||||
)
|
||||
)
|
||||
|
||||
# Feature Implementer Agent
|
||||
agent1_out = feature_implementer_frontend.run(
|
||||
f"Create the backend code for {FEATURE} in markdown based off of"
|
||||
f" this algorithmic pseudocode: {product_manager_out} the logic"
|
||||
f" based on the following codebase: {CODEBASE}"
|
||||
)
|
||||
print(
|
||||
colored(
|
||||
(
|
||||
"--------------------- Feature Implementer Code logic:"
|
||||
f" {agent1_out}"
|
||||
),
|
||||
"cyan",
|
||||
)
|
||||
)
|
||||
|
||||
# Tester agent
|
||||
tester_agent_out = tester_agent.run(
|
||||
f"Create tests for the following code: {agent1_out}"
|
||||
)
|
||||
print(
|
||||
colored(
|
||||
(
|
||||
"---------------------------- Tests for the logic:"
|
||||
f" {tester_agent_out}"
|
||||
),
|
||||
"green",
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# Documentation Agent
|
||||
documenter_agent_out = documenting_agent.run(
|
||||
f"Document the following code: {agent1_out}"
|
||||
)
|
||||
print(
|
||||
colored(
|
||||
(
|
||||
"---------------------------- Documentation for the"
|
||||
f" logic: {documenter_agent_out}"
|
||||
),
|
||||
"yellow",
|
||||
)
|
||||
)
|
@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Define the base URL
|
||||
base_url="http://localhost:8000"
|
||||
|
||||
# Define the JSON payload
|
||||
payload='{"feature": "login system", "codebase": "existing codebase here"}'
|
||||
|
||||
# Send POST request
|
||||
echo "Sending request to /agent/ endpoint..."
|
||||
response=$(curl -s -X POST "$base_url/agent/" -H "Content-Type: application/json" -d "$payload")
|
||||
|
||||
echo "Response: $response"
|
@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Define the base URL
|
||||
base_url="http://localhost:8000"
|
||||
|
||||
# Define the JSON payload
|
||||
payload='{"feature": "login system", "codebase": "existing codebase here"}'
|
||||
|
||||
# Send POST request
|
||||
echo "Sending request to /agent/ endpoint..."
|
||||
response=$(curl -s -X POST "$base_url/agent/" -H "Content-Type: application/json" -d "$payload")
|
||||
|
||||
echo "Response: $response"
|
@ -1,96 +0,0 @@
|
||||
import os
|
||||
import datetime
|
||||
from dotenv import load_dotenv
|
||||
from swarms.models.stable_diffusion import StableDiffusion
|
||||
from swarms.models.gpt4_vision_api import GPT4VisionAPI
|
||||
from swarms.models import OpenAIChat
|
||||
from swarms.structs import Agent
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
stability_api_key = os.getenv("STABILITY_API_KEY")
|
||||
|
||||
# Initialize the models
|
||||
vision_api = GPT4VisionAPI(api_key=openai_api_key)
|
||||
sd_api = StableDiffusion(api_key=stability_api_key)
|
||||
gpt_api = OpenAIChat(openai_api_key=openai_api_key)
|
||||
|
||||
|
||||
class Idea2Image(Agent):
|
||||
def __init__(self, llm, vision_api):
|
||||
super().__init__(llm=llm)
|
||||
self.vision_api = vision_api
|
||||
|
||||
def run(self, initial_prompt, num_iterations, run_folder):
|
||||
current_prompt = initial_prompt
|
||||
|
||||
for i in range(num_iterations):
|
||||
print(f"Iteration {i}: Image generation and analysis")
|
||||
|
||||
if i == 0:
|
||||
current_prompt = self.enrich_prompt(current_prompt)
|
||||
print(f"Enriched Prompt: {current_prompt}")
|
||||
|
||||
img = sd_api.generate_and_move_image(
|
||||
current_prompt, i, run_folder
|
||||
)
|
||||
if not img:
|
||||
print("Failed to generate image")
|
||||
break
|
||||
print(f"Generated image at: {img}")
|
||||
|
||||
analysis = (
|
||||
self.vision_api.run(img, current_prompt)
|
||||
if img
|
||||
else None
|
||||
)
|
||||
if analysis:
|
||||
current_prompt += (
|
||||
". " + analysis[:500]
|
||||
) # Ensure the analysis is concise
|
||||
print(f"Image Analysis: {analysis}")
|
||||
else:
|
||||
print(f"Failed to analyze image at: {img}")
|
||||
|
||||
def enrich_prompt(self, prompt):
|
||||
enrichment_task = (
|
||||
"Create a concise and effective image generation prompt"
|
||||
" within 400 characters or less, based on Stable"
|
||||
" Diffusion and Dalle best practices. Starting prompt:"
|
||||
f" \n\n'{prompt}'\n\nImprove the prompt with any"
|
||||
" applicable details or keywords by considering the"
|
||||
" following aspects: \n1. Subject details (like actions,"
|
||||
" emotions, environment) \n2. Artistic style (such as"
|
||||
" surrealism, hyperrealism) \n3. Medium (digital"
|
||||
" painting, oil on canvas) \n4. Color themes and"
|
||||
" lighting (like warm colors, cinematic lighting) \n5."
|
||||
" Composition and framing (close-up, wide-angle) \n6."
|
||||
" Additional elements (like a specific type of"
|
||||
" background, weather conditions) \n7. Any other"
|
||||
" artistic or thematic details that can make the image"
|
||||
" more vivid and compelling."
|
||||
)
|
||||
llm_result = self.llm.generate([enrichment_task])
|
||||
return (
|
||||
llm_result.generations[0][0].text[:500]
|
||||
if llm_result.generations
|
||||
else None
|
||||
)
|
||||
|
||||
|
||||
# User input and setup
|
||||
user_prompt = input("Prompt for image generation: ")
|
||||
num_iterations = int(
|
||||
input("Enter the number of iterations for image improvement: ")
|
||||
)
|
||||
run_folder = os.path.join(
|
||||
"runs", datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
)
|
||||
os.makedirs(run_folder, exist_ok=True)
|
||||
|
||||
# Initialize and run the agent
|
||||
idea2image_agent = Idea2Image(gpt_api, vision_api)
|
||||
idea2image_agent.run(user_prompt, num_iterations, run_folder)
|
||||
|
||||
print("Image improvement process completed.")
|
@ -1,7 +0,0 @@
|
||||
"""
|
||||
Idea 2 img
|
||||
|
||||
task -> gpt4 text -> dalle3 img -> gpt4vision img + text analyze img -> dalle3 img -> loop
|
||||
|
||||
"""
|
||||
from swarms.models.gpt4_vision_api import GPT4VisionAPI
|
@ -0,0 +1,185 @@
|
||||
import datetime
|
||||
import os
|
||||
|
||||
import streamlit as st
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from swarms.models import OpenAIChat
|
||||
from swarms.models.gpt4_vision_api import GPT4VisionAPI
|
||||
from swarms.models.stable_diffusion import StableDiffusion
|
||||
from swarms.structs import Agent
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
stability_api_key = os.getenv("STABLE_API_KEY")
|
||||
|
||||
# Initialize the models
|
||||
vision_api = GPT4VisionAPI(api_key=openai_api_key)
|
||||
sd_api = StableDiffusion(api_key=stability_api_key)
|
||||
gpt_api = OpenAIChat(openai_api_key=openai_api_key)
|
||||
|
||||
|
||||
class Idea2Image(Agent):
|
||||
def __init__(self, llm, vision_api):
|
||||
super().__init__(llm=llm)
|
||||
self.vision_api = vision_api
|
||||
|
||||
def run(self, initial_prompt, num_iterations, run_folder):
|
||||
current_prompt = initial_prompt
|
||||
|
||||
for i in range(num_iterations):
|
||||
print(f"Iteration {i}: Image generation and analysis")
|
||||
|
||||
if i == 0:
|
||||
current_prompt = self.enrich_prompt(current_prompt)
|
||||
print(f"Enriched Prompt: {current_prompt}")
|
||||
|
||||
img = sd_api.generate_and_move_image(
|
||||
current_prompt, i, run_folder
|
||||
)
|
||||
if not img:
|
||||
print("Failed to generate image")
|
||||
break
|
||||
print(f"Generated image at: {img}")
|
||||
|
||||
analysis = (
|
||||
self.vision_api.run(img, current_prompt)
|
||||
if img
|
||||
else None
|
||||
)
|
||||
if analysis:
|
||||
current_prompt += (
|
||||
". " + analysis[:500]
|
||||
) # Ensure the analysis is concise
|
||||
print(f"Image Analysis: {analysis}")
|
||||
else:
|
||||
print(f"Failed to analyze image at: {img}")
|
||||
|
||||
def enrich_prompt(self, prompt):
|
||||
enrichment_task = (
|
||||
"Create a concise and effective image generation prompt"
|
||||
" within 400 characters or less, based on Stable"
|
||||
" Diffusion and Dalle best practices to help it create"
|
||||
" much better images. Starting prompt:"
|
||||
f" \n\n'{prompt}'\n\nImprove the prompt with any"
|
||||
" applicable details or keywords by considering the"
|
||||
" following aspects: \n1. Subject details (like actions,"
|
||||
" emotions, environment) \n2. Artistic style (such as"
|
||||
" surrealism, hyperrealism) \n3. Medium (digital"
|
||||
" painting, oil on canvas) \n4. Color themes and"
|
||||
" lighting (like warm colors, cinematic lighting) \n5."
|
||||
" Composition and framing (close-up, wide-angle) \n6."
|
||||
" Additional elements (like a specific type of"
|
||||
" background, weather conditions) \n7. Any other"
|
||||
" artistic or thematic details that can make the image"
|
||||
" more vivid and compelling. Help the image generator"
|
||||
" create better images by enriching the prompt."
|
||||
)
|
||||
llm_result = self.llm.generate([enrichment_task])
|
||||
return (
|
||||
llm_result.generations[0][0].text[:500]
|
||||
if llm_result.generations
|
||||
else None
|
||||
)
|
||||
|
||||
def run_gradio(self, initial_prompt, num_iterations, run_folder):
|
||||
results = []
|
||||
current_prompt = initial_prompt
|
||||
|
||||
for i in range(num_iterations):
|
||||
enriched_prompt = (
|
||||
self.enrich_prompt(current_prompt)
|
||||
if i == 0
|
||||
else current_prompt
|
||||
)
|
||||
img_path = sd_api.generate_and_move_image(
|
||||
enriched_prompt, i, run_folder
|
||||
)
|
||||
analysis = (
|
||||
self.vision_api.run(img_path, enriched_prompt)
|
||||
if img_path
|
||||
else None
|
||||
)
|
||||
|
||||
if analysis:
|
||||
current_prompt += (
|
||||
". " + analysis[:500]
|
||||
) # Ensuring the analysis is concise
|
||||
results.append((enriched_prompt, img_path, analysis))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
# print(
|
||||
# colored("---------------------------------------- MultiModal Tree of Thought agents for Image Generation", "cyan", attrs=["bold"])
|
||||
# )
|
||||
# # User input and setup
|
||||
# user_prompt = input("Prompt for image generation: ")
|
||||
# num_iterations = int(
|
||||
# input("Enter the number of iterations for image improvement: ")
|
||||
# )
|
||||
# run_folder = os.path.join(
|
||||
# "runs", datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
# )
|
||||
# os.makedirs(run_folder, exist_ok=True)
|
||||
|
||||
# print(
|
||||
# colored(
|
||||
# f"---------------------------------- Running Multi-Modal Tree of thoughts agent with {num_iterations} iterations", "green"
|
||||
# )
|
||||
# )
|
||||
# # Initialize and run the agent
|
||||
# idea2image_agent = Idea2Image(gpt_api, vision_api)
|
||||
# idea2image_agent.run(user_prompt, num_iterations, run_folder)
|
||||
|
||||
# print("Idea space has been traversed.")
|
||||
|
||||
|
||||
# Load environment variables and initialize the models
|
||||
load_dotenv()
|
||||
openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
stability_api_key = os.getenv("STABLE_API_KEY")
|
||||
vision_api = GPT4VisionAPI(api_key=openai_api_key)
|
||||
sd_api = StableDiffusion(api_key=stability_api_key)
|
||||
gpt_api = OpenAIChat(openai_api_key=openai_api_key)
|
||||
|
||||
# Define the modified Idea2Image class here
|
||||
|
||||
# Streamlit UI layout
|
||||
st.title(
|
||||
"Explore the infinite Multi-Modal Idea Space with Idea2Image"
|
||||
)
|
||||
user_prompt = st.text_input("Prompt for image generation:")
|
||||
num_iterations = st.number_input(
|
||||
"Enter the number of iterations for image improvement:",
|
||||
min_value=1,
|
||||
step=1,
|
||||
)
|
||||
|
||||
if st.button("Generate Image"):
|
||||
run_folder = os.path.join(
|
||||
"runs", datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
)
|
||||
os.makedirs(run_folder, exist_ok=True)
|
||||
idea2image_agent = Idea2Image(gpt_api, vision_api)
|
||||
|
||||
results = idea2image_agent.run_gradio(
|
||||
user_prompt, num_iterations, run_folder
|
||||
)
|
||||
|
||||
for i, (enriched_prompt, img_path, analysis) in enumerate(
|
||||
results
|
||||
):
|
||||
st.write(f"Iteration {i+1}:")
|
||||
st.write("Enriched Prompt:", enriched_prompt)
|
||||
if img_path:
|
||||
st.image(img_path, caption="Generated Image")
|
||||
else:
|
||||
st.error("Failed to generate image")
|
||||
if analysis:
|
||||
st.write("Image Analysis:", analysis)
|
||||
|
||||
st.success("Idea space has been traversed.")
|
||||
|
||||
# [Add any additional necessary code adjustments]
|
@ -0,0 +1,114 @@
|
||||
"""
|
||||
Multi Modal tree of thoughts that leverages the GPT-4 language model and the
|
||||
Stable Diffusion model to generate a multimodal output and evaluate the
|
||||
output based a metric from 0.0 to 1.0 and then run a search algorithm using DFS and BFS and return the best output.
|
||||
|
||||
|
||||
task: Generate an image of a swarm of bees -> Image generator -> GPT4V evaluates the img from 0.0 to 1.0 -> DFS/BFS -> return the best output
|
||||
|
||||
|
||||
- GPT4Vision will evaluate the image from 0.0 to 1.0 based on how likely it accomplishes the task
|
||||
- DFS/BFS will search for the best output based on the evaluation from GPT4Vision
|
||||
- The output will be a multimodal output that is a combination of the image and the text
|
||||
- The output will be evaluated by GPT4Vision
|
||||
- The prompt to the image generator will be optimized from the output of GPT4Vision and the search
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from swarms.models.gpt4_vision_api import GPT4VisionAPI
|
||||
from swarms.models.stable_diffusion import StableDiffusion
|
||||
from termcolor import colored
|
||||
|
||||
# Load the environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Get the API key from the environment
|
||||
api_key = os.environ.get("OPENAI_API_KEY")
|
||||
stable_api_key = os.environ.get("STABLE_API_KEY")
|
||||
|
||||
|
||||
# Initialize the language model
|
||||
llm = GPT4VisionAPI(
|
||||
openai_api_key=api_key,
|
||||
max_tokens=500,
|
||||
)
|
||||
|
||||
# IMG Generator
|
||||
img_generator = StableDiffusion(api_key=stable_api_key)
|
||||
|
||||
|
||||
# Initialize the language model
|
||||
task = "Garden of Eden futuristic city graphic art"
|
||||
|
||||
|
||||
def evaluate_img(llm, task: str, img: str):
|
||||
EVAL_IMG = f"""
|
||||
Evaluate the image: {img} on a scale from 0.0 to 1.0 based on how likely it accomplishes the task: {task}. Output nothing than the float representing the evaluated img.
|
||||
"""
|
||||
out = llm.run(task=EVAL_IMG, img=img)
|
||||
out = float(out)
|
||||
return out
|
||||
|
||||
|
||||
def enrichment_prompt(starting_prompt: str, evaluated_img: str):
|
||||
enrichment_task = (
|
||||
"Create a concise and effective image generation prompt"
|
||||
" within 400 characters or less, based on Stable Diffusion"
|
||||
" and Dalle best practices. Starting prompt:"
|
||||
f" \n\n'{starting_prompt}'\n\nImprove the prompt with any"
|
||||
" applicable details or keywords by considering the"
|
||||
" following aspects: \n1. Subject details (like actions,"
|
||||
" emotions, environment) \n2. Artistic style (such as"
|
||||
" surrealism, hyperrealism) \n3. Medium (digital painting,"
|
||||
" oil on canvas) \n4. Color themes and lighting (like warm"
|
||||
" colors, cinematic lighting) \n5. Composition and framing"
|
||||
" (close-up, wide-angle) \n6. Additional elements (like a"
|
||||
" specific type of background, weather conditions) \n7. Any"
|
||||
" other artistic or thematic details that can make the image"
|
||||
" more vivid and compelling. 8. Based on the evaluation of"
|
||||
" the first generated prompt used by the first prompt:"
|
||||
f" {evaluated_img} Enrich the prompt to generate a more"
|
||||
" compelling image. Output only a new prompt to create a"
|
||||
" better image"
|
||||
)
|
||||
return enrichment_task
|
||||
|
||||
|
||||
# Main loop
|
||||
max_iterations = 10 # Define the maximum number of iterations
|
||||
best_score = 0
|
||||
best_image = None
|
||||
|
||||
for _ in range(max_iterations):
|
||||
# Generate an image and get its path
|
||||
print(colored(f"Generating img for Task: {task}", "purple"))
|
||||
|
||||
img_path = img_generator.run(
|
||||
task=task
|
||||
) # This should return the file path of the generated image
|
||||
img_path = img_path[0]
|
||||
print(colored(f"Generated Image Path: {img_path}", "green"))
|
||||
|
||||
# Evaluate the image by passing the file path
|
||||
score = evaluate_img(llm, task, img_path)
|
||||
print(
|
||||
colored(
|
||||
f"Evaluated Image Score: {score} for {img_path}", "cyan"
|
||||
)
|
||||
)
|
||||
|
||||
# Update the best score and image path if necessary
|
||||
if score > best_score:
|
||||
best_score = score
|
||||
best_image_path = img_path
|
||||
|
||||
# Enrich the prompt based on the evaluation
|
||||
prompt = enrichment_prompt(task, score)
|
||||
print(colored(f"Enrichment Prompt: {prompt}", "yellow"))
|
||||
|
||||
|
||||
# Output the best result
|
||||
print("Best Image Path:", best_image_path)
|
||||
print("Best Score:", best_score)
|
After Width: | Height: | Size: 451 KiB |
@ -0,0 +1,47 @@
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from swarms.models import OpenAIChat, GPT4VisionAPI
|
||||
from swarms.structs import Agent, SequentialWorkflow
|
||||
import swarms.prompts.urban_planning as upp
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
stability_api_key = os.getenv("STABILITY_API_KEY")
|
||||
|
||||
# Initialize language model
|
||||
llm = OpenAIChat(openai_api_key=api_key, temperature=0.5, max_tokens=3000)
|
||||
|
||||
# Initialize Vision model
|
||||
vision_api = GPT4VisionAPI(api_key=api_key)
|
||||
|
||||
# Initialize agents for urban planning tasks
|
||||
architecture_analysis_agent = Agent(llm=llm, max_loops=1, sop=upp.ARCHITECTURE_ANALYSIS_PROMPT)
|
||||
infrastructure_evaluation_agent = Agent(llm=llm, max_loops=1, sop=upp.INFRASTRUCTURE_EVALUATION_PROMPT)
|
||||
traffic_flow_analysis_agent = Agent(llm=llm, max_loops=1, sop=upp.TRAFFIC_FLOW_ANALYSIS_PROMPT)
|
||||
environmental_impact_assessment_agent = Agent(llm=llm, max_loops=1, sop=upp.ENVIRONMENTAL_IMPACT_ASSESSMENT_PROMPT)
|
||||
public_space_utilization_agent = Agent(llm=llm, max_loops=1, sop=upp.PUBLIC_SPACE_UTILIZATION_PROMPT)
|
||||
socioeconomic_impact_analysis_agent = Agent(llm=llm, max_loops=1, sop=upp.SOCIOECONOMIC_IMPACT_ANALYSIS_PROMPT)
|
||||
|
||||
# Initialize the final planning agent
|
||||
final_plan_agent = Agent(llm=llm, max_loops=1, sop=upp.FINAL_URBAN_IMPROVEMENT_PLAN_PROMPT)
|
||||
|
||||
# Create Sequential Workflow
|
||||
workflow = SequentialWorkflow(max_loops=1)
|
||||
|
||||
# Add tasks to workflow with personalized prompts
|
||||
workflow.add(architecture_analysis_agent, "Architecture Analysis")
|
||||
workflow.add(infrastructure_evaluation_agent, "Infrastructure Evaluation")
|
||||
workflow.add(traffic_flow_analysis_agent, "Traffic Flow Analysis")
|
||||
workflow.add(environmental_impact_assessment_agent, "Environmental Impact Assessment")
|
||||
workflow.add(public_space_utilization_agent, "Public Space Utilization")
|
||||
workflow.add(socioeconomic_impact_analysis_agent, "Socioeconomic Impact Analysis")
|
||||
workflow.add(final_plan_agent, "Generate the final urban improvement plan based on all previous agent's findings")
|
||||
# Run the workflow for individual analysis tasks
|
||||
|
||||
# Execute the workflow for the final planning
|
||||
workflow.run()
|
||||
|
||||
# Output results for each task and the final plan
|
||||
for task in workflow.tasks:
|
||||
print(f"Task Description: {task.description}\nResult: {task.result}\n")
|
@ -1,67 +0,0 @@
|
||||
from swarms.models import Anthropic
|
||||
from swarms.structs import Agent
|
||||
from swarms.tools.tool import tool
|
||||
|
||||
import asyncio
|
||||
|
||||
|
||||
llm = Anthropic(
|
||||
anthropic_api_key="",
|
||||
)
|
||||
|
||||
|
||||
async def async_load_playwright(url: str) -> str:
|
||||
"""Load the specified URLs using Playwright and parse using BeautifulSoup."""
|
||||
from bs4 import BeautifulSoup
|
||||
from playwright.async_api import async_playwright
|
||||
|
||||
results = ""
|
||||
async with async_playwright() as p:
|
||||
browser = await p.chromium.launch(headless=True)
|
||||
try:
|
||||
page = await browser.new_page()
|
||||
await page.goto(url)
|
||||
|
||||
page_source = await page.content()
|
||||
soup = BeautifulSoup(page_source, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
|
||||
text = soup.get_text()
|
||||
lines = (line.strip() for line in text.splitlines())
|
||||
chunks = (
|
||||
phrase.strip()
|
||||
for line in lines
|
||||
for phrase in line.split(" ")
|
||||
)
|
||||
results = "\n".join(chunk for chunk in chunks if chunk)
|
||||
except Exception as e:
|
||||
results = f"Error: {e}"
|
||||
await browser.close()
|
||||
return results
|
||||
|
||||
|
||||
def run_async(coro):
|
||||
event_loop = asyncio.get_event_loop()
|
||||
return event_loop.run_until_complete(coro)
|
||||
|
||||
|
||||
@tool
|
||||
def browse_web_page(url: str) -> str:
|
||||
"""Verbose way to scrape a whole webpage. Likely to cause issues parsing."""
|
||||
return run_async(async_load_playwright(url))
|
||||
|
||||
|
||||
## Initialize the workflow
|
||||
agent = Agent(
|
||||
llm=llm,
|
||||
max_loops=5,
|
||||
tools=[browse_web_page],
|
||||
dashboard=True,
|
||||
)
|
||||
|
||||
out = agent.run(
|
||||
"Generate a 10,000 word blog on mental clarity and the benefits"
|
||||
" of meditation."
|
||||
)
|
@ -0,0 +1,19 @@
|
||||
from swarms.tools.tool import tool
|
||||
from swarms.tools.tool_func_doc_scraper import scrape_tool_func_docs
|
||||
|
||||
|
||||
@tool
|
||||
def search_api(query: str) -> str:
|
||||
"""Search API
|
||||
|
||||
Args:
|
||||
query (str): _description_
|
||||
|
||||
Returns:
|
||||
str: _description_
|
||||
"""
|
||||
print(f"Searching API for {query}")
|
||||
|
||||
|
||||
tool_docs = scrape_tool_func_docs(search_api)
|
||||
print(tool_docs)
|
@ -0,0 +1,39 @@
|
||||
import os
|
||||
from swarms.models import OpenAIChat
|
||||
from swarms.structs import Agent
|
||||
from swarms.tools.tool import tool
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
api_key = os.environ.get("OPENAI_API_KEY")
|
||||
|
||||
|
||||
llm = OpenAIChat(api_key=api_key)
|
||||
|
||||
# @tool
|
||||
# def search_api(query: str) -> str:
|
||||
# """Search API
|
||||
|
||||
# Args:
|
||||
# query (str): _description_
|
||||
|
||||
# Returns:
|
||||
# str: _description_
|
||||
# """
|
||||
# print(f"Searching API for {query}")
|
||||
|
||||
|
||||
## Initialize the workflow
|
||||
agent = Agent(
|
||||
llm=llm,
|
||||
max_loops=5,
|
||||
# tools=[search_api],
|
||||
dashboard=True,
|
||||
)
|
||||
|
||||
out = agent.run(
|
||||
"Use the search api to find the best restaurants in New York"
|
||||
" City."
|
||||
)
|
||||
print(out)
|
@ -0,0 +1,22 @@
|
||||
from swarms.tools.tool import tool
|
||||
from swarms.tools.tool_func_doc_scraper import scrape_tool_func_docs
|
||||
|
||||
# Define a tool by decorating a function with the tool decorator and providing a docstring
|
||||
|
||||
|
||||
@tool(return_direct=True)
|
||||
def search_api(query: str):
|
||||
"""Search the web for the query
|
||||
|
||||
Args:
|
||||
query (str): _description_
|
||||
|
||||
Returns:
|
||||
_type_: _description_
|
||||
"""
|
||||
return f"Search results for {query}"
|
||||
|
||||
|
||||
# Scrape the tool func docs to prepare for injection into the agent prompt
|
||||
out = scrape_tool_func_docs(search_api)
|
||||
print(out)
|
@ -0,0 +1,60 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict
|
||||
|
||||
|
||||
class VectorDatabase(ABC):
|
||||
@abstractmethod
|
||||
def add(
|
||||
self, vector: Dict[str, Any], metadata: Dict[str, Any]
|
||||
) -> None:
|
||||
"""
|
||||
add a vector into the database.
|
||||
|
||||
Args:
|
||||
vector (Dict[str, Any]): The vector to add.
|
||||
metadata (Dict[str, Any]): Metadata associated with the vector.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def query(
|
||||
self, vector: Dict[str, Any], num_results: int
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Query the database for vectors similar to the given vector.
|
||||
|
||||
Args:
|
||||
vector (Dict[str, Any]): The vector to compare against.
|
||||
num_results (int): The number of similar vectors to return.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: The most similar vectors and their associated metadata.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def delete(self, vector_id: str) -> None:
|
||||
"""
|
||||
Delete a vector from the database.
|
||||
|
||||
Args:
|
||||
vector_id (str): The ID of the vector to delete.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def update(
|
||||
self,
|
||||
vector_id: str,
|
||||
vector: Dict[str, Any],
|
||||
metadata: Dict[str, Any],
|
||||
) -> None:
|
||||
"""
|
||||
Update a vector in the database.
|
||||
|
||||
Args:
|
||||
vector_id (str): The ID of the vector to update.
|
||||
vector (Dict[str, Any]): The new vector.
|
||||
metadata (Dict[str, Any]): The new metadata.
|
||||
"""
|
||||
pass
|
@ -0,0 +1,172 @@
|
||||
import os
|
||||
from termcolor import colored
|
||||
import logging
|
||||
from typing import Dict, List, Optional
|
||||
import chromadb
|
||||
import tiktoken as tiktoken
|
||||
from chromadb.config import Settings
|
||||
from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
|
||||
from dotenv import load_dotenv
|
||||
from swarms.utils.token_count_tiktoken import limit_tokens_from_string
|
||||
|
||||
load_dotenv()
|
||||
|
||||
# ChromaDB settings
|
||||
client = chromadb.Client(Settings(anonymized_telemetry=False))
|
||||
|
||||
|
||||
# ChromaDB client
|
||||
def get_chromadb_client():
|
||||
return client
|
||||
|
||||
|
||||
# OpenAI API key
|
||||
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
|
||||
# Results storage using local ChromaDB
|
||||
class ChromaDB:
|
||||
"""
|
||||
|
||||
ChromaDB database
|
||||
|
||||
Args:
|
||||
metric (str): _description_
|
||||
RESULTS_STORE_NAME (str): _description_
|
||||
LLM_MODEL (str): _description_
|
||||
openai_api_key (str): _description_
|
||||
|
||||
Methods:
|
||||
add: _description_
|
||||
query: _description_
|
||||
|
||||
Examples:
|
||||
>>> chromadb = ChromaDB(
|
||||
>>> metric="cosine",
|
||||
>>> RESULTS_STORE_NAME="results",
|
||||
>>> LLM_MODEL="gpt3",
|
||||
>>> openai_api_key=OPENAI_API_KEY,
|
||||
>>> )
|
||||
>>> chromadb.add(task, result, result_id)
|
||||
>>> chromadb.query(query, top_results_num)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
metric: str,
|
||||
RESULTS_STORE_NAME: str,
|
||||
LLM_MODEL: str,
|
||||
openai_api_key: str = OPENAI_API_KEY,
|
||||
top_results_num: int = 3,
|
||||
limit_tokens: Optional[int] = 1000,
|
||||
):
|
||||
self.metric = metric
|
||||
self.RESULTS_STORE_NAME = RESULTS_STORE_NAME
|
||||
self.LLM_MODEL = LLM_MODEL
|
||||
self.openai_api_key = openai_api_key
|
||||
self.top_results_num = top_results_num
|
||||
self.limit_tokens = limit_tokens
|
||||
|
||||
# Disable ChromaDB logging
|
||||
logging.getLogger("chromadb").setLevel(logging.ERROR)
|
||||
# Create Chroma collection
|
||||
chroma_persist_dir = "chroma"
|
||||
chroma_client = chromadb.PersistentClient(
|
||||
settings=chromadb.config.Settings(
|
||||
persist_directory=chroma_persist_dir,
|
||||
)
|
||||
)
|
||||
|
||||
# Create embedding function
|
||||
embedding_function = OpenAIEmbeddingFunction(
|
||||
api_key=openai_api_key
|
||||
)
|
||||
|
||||
# Create Chroma collection
|
||||
self.collection = chroma_client.get_or_create_collection(
|
||||
name=RESULTS_STORE_NAME,
|
||||
metadata={"hnsw:space": metric},
|
||||
embedding_function=embedding_function,
|
||||
)
|
||||
|
||||
def add(self, task: Dict, result: str, result_id: str):
|
||||
"""Adds a result to the ChromaDB collection
|
||||
|
||||
Args:
|
||||
task (Dict): _description_
|
||||
result (str): _description_
|
||||
result_id (str): _description_
|
||||
"""
|
||||
|
||||
try:
|
||||
# Embed the result
|
||||
embeddings = (
|
||||
self.collection.embedding_function.embed([result])[0]
|
||||
.tolist()
|
||||
.copy()
|
||||
)
|
||||
|
||||
# If the result is a list, flatten it
|
||||
if (
|
||||
len(
|
||||
self.collection.get(ids=[result_id], include=[])[
|
||||
"ids"
|
||||
]
|
||||
)
|
||||
> 0
|
||||
): # Check if the result already exists
|
||||
self.collection.update(
|
||||
ids=result_id,
|
||||
embeddings=embeddings,
|
||||
documents=result,
|
||||
metadatas={
|
||||
"task": task["task_name"],
|
||||
"result": result,
|
||||
},
|
||||
)
|
||||
|
||||
# If the result is not a list, add it
|
||||
else:
|
||||
self.collection.add(
|
||||
ids=result_id,
|
||||
embeddings=embeddings,
|
||||
documents=result,
|
||||
metadatas={
|
||||
"task": task["task_name"],
|
||||
"result": result,
|
||||
},
|
||||
)
|
||||
except Exception as error:
|
||||
print(
|
||||
colored(f"Error adding to ChromaDB: {error}", "red")
|
||||
)
|
||||
|
||||
def query(
|
||||
self,
|
||||
query: str,
|
||||
) -> List[dict]:
|
||||
"""Queries the ChromaDB collection with a query for the top results
|
||||
|
||||
Args:
|
||||
query (str): _description_
|
||||
top_results_num (int): _description_
|
||||
|
||||
Returns:
|
||||
List[dict]: _description_
|
||||
"""
|
||||
try:
|
||||
count: int = self.collection.count()
|
||||
if count == 0:
|
||||
return []
|
||||
results = self.collection.query(
|
||||
query_texts=query,
|
||||
n_results=min(self.top_results_num, count),
|
||||
include=["metadatas"],
|
||||
)
|
||||
out = [item["task"] for item in results["metadatas"][0]]
|
||||
out = limit_tokens_from_string(
|
||||
out, "gpt-4", self.limit_tokens
|
||||
)
|
||||
return out
|
||||
except Exception as error:
|
||||
print(colored(f"Error querying ChromaDB: {error}", "red"))
|
@ -0,0 +1,34 @@
|
||||
user_preferences = {
|
||||
"subjects": "AI Cognitive Architectures",
|
||||
"learning_style": "Visual",
|
||||
"challenge_level": "Moderate",
|
||||
}
|
||||
|
||||
# Extracting individual preferences
|
||||
subjects = user_preferences["subjects"]
|
||||
learning_style = user_preferences["learning_style"]
|
||||
challenge_level = user_preferences["challenge_level"]
|
||||
|
||||
|
||||
# Curriculum Design Prompt
|
||||
CURRICULUM_DESIGN_PROMPT = f"""
|
||||
Develop a semester-long curriculum tailored to student interests in {subjects}. Focus on incorporating diverse teaching methods suitable for a {learning_style} learning style.
|
||||
The curriculum should challenge students at a {challenge_level} level, integrating both theoretical knowledge and practical applications. Provide a detailed structure, including
|
||||
weekly topics, key objectives, and essential resources needed.
|
||||
"""
|
||||
|
||||
# Interactive Learning Session Prompt
|
||||
INTERACTIVE_LEARNING_PROMPT = f"""
|
||||
Basedon the curriculum, generate an interactive lesson plan for a student of {subjects} that caters to a {learning_style} learning style. Incorporate engaging elements and hands-on activities.
|
||||
"""
|
||||
|
||||
# Sample Lesson Prompt
|
||||
SAMPLE_TEST_PROMPT = f"""
|
||||
Create a comprehensive sample test for the first week of the {subjects} curriculum, tailored for a {learning_style} learning style and at a {challenge_level} challenge level.
|
||||
"""
|
||||
|
||||
# Image Generation for Education Prompt
|
||||
IMAGE_GENERATION_PROMPT = f"""
|
||||
Develop a stable diffusion prompt for an educational image/visual aid that align with the {subjects} curriculum, specifically designed to enhance understanding for students with a {learning_style}
|
||||
learning style. This might include diagrams, infographics, and illustrative representations to simplify complex concepts. Ensure you output a 10/10 descriptive image generation prompt only.
|
||||
"""
|
@ -0,0 +1,40 @@
|
||||
# urban_planning_prompts.py
|
||||
|
||||
# Architecture Analysis Prompt
|
||||
ARCHITECTURE_ANALYSIS_PROMPT = """
|
||||
Analyze the architectural styles, building designs, and construction materials visible in the urban area image provided. Provide insights on the historical influences, modern trends, and architectural diversity observed.
|
||||
"""
|
||||
|
||||
# Infrastructure Evaluation Prompt
|
||||
INFRASTRUCTURE_EVALUATION_PROMPT = """
|
||||
Evaluate the infrastructure in the urban area image, focusing on roads, bridges, public transport, utilities, and communication networks. Assess their condition, capacity, and how they meet the needs of the urban population.
|
||||
"""
|
||||
|
||||
# Traffic Flow Analysis Prompt
|
||||
TRAFFIC_FLOW_ANALYSIS_PROMPT = """
|
||||
Analyze the traffic flow and transportation systems visible in the urban area image. Identify key traffic routes, congestion points, and the effectiveness of public transportation in addressing urban mobility.
|
||||
"""
|
||||
|
||||
# Environmental Impact Assessment Prompt
|
||||
ENVIRONMENTAL_IMPACT_ASSESSMENT_PROMPT = """
|
||||
Assess the environmental impact of the urban area shown in the image. Look for green spaces, pollution sources, and sustainability practices. Provide insights into the balance between urban development and environmental conservation.
|
||||
"""
|
||||
|
||||
# Public Space Utilization Prompt
|
||||
PUBLIC_SPACE_UTILIZATION_PROMPT = """
|
||||
Evaluate the public spaces in the urban area, such as parks, squares, and recreational areas, as shown in the image. Assess their accessibility, condition, and how they contribute to the community's quality of life.
|
||||
"""
|
||||
|
||||
# Socioeconomic Impact Analysis Prompt
|
||||
SOCIOECONOMIC_IMPACT_ANALYSIS_PROMPT = """
|
||||
Analyze the socioeconomic impact of the urban environment as depicted in the image. Consider factors such as housing, employment opportunities, commercial activities, and social disparities.
|
||||
"""
|
||||
|
||||
# Final Urban Improvement Plan Prompt
|
||||
FINAL_URBAN_IMPROVEMENT_PLAN_PROMPT = """
|
||||
Based on the architecture analysis, infrastructure evaluation, traffic flow analysis, environmental impact assessment, public space utilization, and socioeconomic impact analysis provided by the previous agents, develop a comprehensive urban improvement plan. The plan should address key issues identified, propose practical solutions, and outline strategies to enhance the overall quality of life, sustainability, and efficiency of the urban area.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
# Additional or custom prompts can be added below as needed.
|
@ -0,0 +1,3 @@
|
||||
from swarms.tools.tool_func_doc_scraper import scrape_tool_func_docs
|
||||
|
||||
__all__ = ["scrape_tool_func_docs"]
|
@ -0,0 +1,45 @@
|
||||
import inspect
|
||||
from typing import Callable
|
||||
from termcolor import colored
|
||||
|
||||
|
||||
def scrape_tool_func_docs(fn: Callable) -> str:
|
||||
"""
|
||||
Scrape the docstrings and parameters of a function decorated with `tool` and return a formatted string.
|
||||
|
||||
Args:
|
||||
fn (Callable): The function to scrape.
|
||||
|
||||
Returns:
|
||||
str: A string containing the function's name, documentation string, and a list of its parameters. Each parameter is represented as a line containing the parameter's name, default value, and annotation.
|
||||
"""
|
||||
try:
|
||||
# If the function is a tool, get the original function
|
||||
if hasattr(fn, "func"):
|
||||
fn = fn.func
|
||||
|
||||
signature = inspect.signature(fn)
|
||||
parameters = []
|
||||
for name, param in signature.parameters.items():
|
||||
parameters.append(
|
||||
f"Name: {name}, Type:"
|
||||
f" {param.default if param.default is not param.empty else 'None'},"
|
||||
" Annotation:"
|
||||
f" {param.annotation if param.annotation is not param.empty else 'None'}"
|
||||
)
|
||||
parameters_str = "\n".join(parameters)
|
||||
return (
|
||||
f"Function: {fn.__name__}\nDocstring:"
|
||||
f" {inspect.getdoc(fn)}\nParameters:\n{parameters_str}"
|
||||
)
|
||||
except Exception as error:
|
||||
print(
|
||||
colored(
|
||||
(
|
||||
f"Error scraping tool function docs {error} try"
|
||||
" optimizing your inputs with different"
|
||||
" variables and attempt once more."
|
||||
),
|
||||
"red",
|
||||
)
|
||||
)
|
@ -0,0 +1,48 @@
|
||||
import re
|
||||
import json
|
||||
|
||||
|
||||
def extract_tool_commands(self, text: str):
|
||||
"""
|
||||
Extract the tool commands from the text
|
||||
|
||||
Example:
|
||||
```json
|
||||
{
|
||||
"tool": "tool_name",
|
||||
"params": {
|
||||
"tool1": "inputs",
|
||||
"param2": "value2"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
"""
|
||||
# Regex to find JSON like strings
|
||||
pattern = r"```json(.+?)```"
|
||||
matches = re.findall(pattern, text, re.DOTALL)
|
||||
json_commands = []
|
||||
for match in matches:
|
||||
try:
|
||||
json_commands = json.loads(match)
|
||||
json_commands.append(json_commands)
|
||||
except Exception as error:
|
||||
print(f"Error parsing JSON command: {error}")
|
||||
|
||||
|
||||
def parse_and_execute_tools(response: str):
|
||||
"""Parse and execute the tools"""
|
||||
json_commands = extract_tool_commands(response)
|
||||
for command in json_commands:
|
||||
tool_name = command.get("tool")
|
||||
params = command.get("parmas", {})
|
||||
execute_tools(tool_name, params)
|
||||
|
||||
|
||||
def execute_tools(self, tool_name, params):
|
||||
"""Execute the tool with the provided params"""
|
||||
tool = self.tool_find_by_name(tool_name)
|
||||
if tool:
|
||||
# Execute the tool with the provided parameters
|
||||
tool_result = tool.run(**params)
|
||||
print(tool_result)
|
@ -0,0 +1,42 @@
|
||||
from concurrent import futures
|
||||
from concurrent.futures import Future
|
||||
from typing import TypeVar, Dict
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def execute_futures_dict(
|
||||
fs_dict: Dict[str, Future[T]]
|
||||
) -> Dict[str, T]:
|
||||
"""Execute a dictionary of futures and return the results.
|
||||
|
||||
Args:
|
||||
fs_dict (dict[str, futures.Future[T]]): _description_
|
||||
|
||||
Returns:
|
||||
dict[str, T]: _description_
|
||||
|
||||
Example:
|
||||
>>> import concurrent.futures
|
||||
>>> import time
|
||||
>>> import random
|
||||
>>> import swarms.utils.futures
|
||||
>>> def f(x):
|
||||
... time.sleep(random.random())
|
||||
... return x
|
||||
>>> with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
... fs_dict = {
|
||||
... str(i): executor.submit(f, i)
|
||||
... for i in range(10)
|
||||
... }
|
||||
... print(swarms.utils.futures.execute_futures_dict(fs_dict))
|
||||
{'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}
|
||||
|
||||
"""
|
||||
futures.wait(
|
||||
fs_dict.values(),
|
||||
timeout=None,
|
||||
return_when=futures.ALL_COMPLETED,
|
||||
)
|
||||
|
||||
return {key: future.result() for key, future in fs_dict.items()}
|
@ -1,16 +0,0 @@
|
||||
from concurrent import futures
|
||||
from typing import TypeVar
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def execute_futures_dict(
|
||||
fs_dict: dict[str, futures.Future[T]]
|
||||
) -> dict[str, T]:
|
||||
futures.wait(
|
||||
fs_dict.values(),
|
||||
timeout=None,
|
||||
return_when=futures.ALL_COMPLETED,
|
||||
)
|
||||
|
||||
return {key: future.result() for key, future in fs_dict.items()}
|
@ -1,15 +1,31 @@
|
||||
import re
|
||||
|
||||
# def extract_code_in_backticks_in_string(s: str) -> str:
|
||||
# """
|
||||
# Extracts code blocks from a markdown string.
|
||||
|
||||
def extract_code_in_backticks_in_string(message: str) -> str:
|
||||
# Args:
|
||||
# s (str): The markdown string to extract code from.
|
||||
|
||||
# Returns:
|
||||
# list: A list of tuples. Each tuple contains the language of the code block (if specified) and the code itself.
|
||||
# """
|
||||
# pattern = r"```([\w\+\#\-\.\s]*)\n(.*?)```"
|
||||
# matches = re.findall(pattern, s, re.DOTALL)
|
||||
# out = [(match[0], match[1].strip()) for match in matches]
|
||||
# print(out)
|
||||
|
||||
|
||||
def extract_code_in_backticks_in_string(s: str) -> str:
|
||||
"""
|
||||
To extract code from a string in markdown and return a string
|
||||
Extracts code blocks from a markdown string.
|
||||
|
||||
Args:
|
||||
s (str): The markdown string to extract code from.
|
||||
|
||||
Returns:
|
||||
str: A string containing all the code blocks.
|
||||
"""
|
||||
pattern = ( # Non-greedy match between six backticks
|
||||
r"`` ``(.*?)`` "
|
||||
)
|
||||
match = re.search(
|
||||
pattern, message, re.DOTALL
|
||||
) # re.DOTALL to match newline chars
|
||||
return match.group(1).strip() if match else None
|
||||
pattern = r"```([\w\+\#\-\.\s]*)(.*?)```"
|
||||
matches = re.findall(pattern, s, re.DOTALL)
|
||||
return "\n".join(match[1].strip() for match in matches)
|
||||
|
@ -0,0 +1,27 @@
|
||||
import tiktoken
|
||||
|
||||
|
||||
def limit_tokens_from_string(
|
||||
string: str, model: str = "gpt-4", limit: int = 500
|
||||
) -> str:
|
||||
"""Limits the number of tokens in a string
|
||||
|
||||
Args:
|
||||
string (str): _description_
|
||||
model (str): _description_
|
||||
limit (int): _description_
|
||||
|
||||
Returns:
|
||||
str: _description_
|
||||
"""
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except Exception:
|
||||
encoding = tiktoken.encoding_for_model(
|
||||
"gpt2"
|
||||
) # Fallback for others.
|
||||
|
||||
encoded = encoding.encode(string)
|
||||
|
||||
out = encoding.decode(encoded[:limit])
|
||||
return out
|
Loading…
Reference in new issue