diff --git a/playground/demos/education/education.py b/playground/demos/education/education.py new file mode 100644 index 00000000..5b9d9df7 --- /dev/null +++ b/playground/demos/education/education.py @@ -0,0 +1,56 @@ +import os +from dotenv import load_dotenv +from swarms.models import OpenAIChat +from swarms.models.stable_diffusion import StableDiffusion +from swarms.structs import Agent, SequentialWorkflow +import swarms.prompts.education as edu_prompts + +# Load environment variables +load_dotenv() +api_key = os.getenv("OPENAI_API_KEY") +stability_api_key = os.getenv("STABILITY_API_KEY") + +# Initialize language model +llm = OpenAIChat(openai_api_key=api_key, temperature=0.5, max_tokens=3000) + +# Initialize Stable Diffusion +sd_api = StableDiffusion(api_key=stability_api_key) + +# User preferences (can be dynamically set in a real application) +user_preferences = { + "subjects": "Cognitive Architectures", + "learning_style": "Visual", + "challenge_level": "Moderate" +} + +# Formatted prompts from user preferences +curriculum_prompt = edu_prompts.CURRICULUM_DESIGN_PROMPT.format(**user_preferences) +interactive_prompt = edu_prompts.INTERACTIVE_LEARNING_PROMPT.format(**user_preferences) +sample_prompt = edu_prompts.SAMPLE_TEST_PROMPT.format(**user_preferences) +image_prompt = edu_prompts.IMAGE_GENERATION_PROMPT.format(**user_preferences) + +# Initialize agents for different educational tasks +curriculum_agent = Agent(llm=llm, max_loops=1, sop=curriculum_prompt) +interactive_learning_agent = Agent(llm=llm, max_loops=1, sop=interactive_prompt) +sample_lesson_agent = Agent(llm=llm, max_loops=1, sop=sample_prompt) + +# Create Sequential Workflow +workflow = SequentialWorkflow(max_loops=1) + +# Add tasks to workflow with personalized prompts +workflow.add(curriculum_agent, "Generate a curriculum") +workflow.add(interactive_learning_agent, "Generate an interactive lesson") +workflow.add(sample_lesson_agent, "Generate a practice test") + +# Execute the workflow for text-based tasks +workflow.run() + +# Generate an image using Stable Diffusion +image_result = sd_api.run(image_prompt) + +# Output results for each task +for task in workflow.tasks: + print(f"Task Description: {task.description}\nResult: {task.result}\n") + +# Output image result +print(f"Image Generation Task: Generate an image for the interactive lesson\nResult: {image_result}") diff --git a/swarms/models/gpt4_vision_api.py b/swarms/models/gpt4_vision_api.py index 6efb68f4..cd6e5ddb 100644 --- a/swarms/models/gpt4_vision_api.py +++ b/swarms/models/gpt4_vision_api.py @@ -117,19 +117,13 @@ class GPT4VisionAPI: pass # Function to handle vision tasks - def run( - self, - task: Optional[str] = None, - img: Optional[str] = None, - *args, - **kwargs, - ): + def run(self, img, task): """Run the model.""" try: base64_image = self.encode_image(img) headers = { "Content-Type": "application/json", - "Authorization": f"Bearer {openai_api_key}", + "Authorization": f"Bearer {self.openai_api_key}", } payload = { "model": self.model_name, @@ -154,28 +148,24 @@ class GPT4VisionAPI: "max_tokens": self.max_tokens, } response = requests.post( - self.openai_proxy, - headers=headers, - json=payload, + self.openai_proxy, headers=headers, json=payload ) out = response.json() - content = out["choices"][0]["message"]["content"] - - if self.streaming_enabled: - content = self.stream_response(content) + if "choices" in out and out["choices"]: + content = ( + out["choices"][0] + .get("message", {}) + .get("content", None) + ) + return content else: - pass - - if self.beautify: - content = colored(content, "cyan") - print(content) - else: - print(content) + print("No valid response in 'choices'") + return None except Exception as error: print(f"Error with the request: {error}") - raise error + return None def video_prompt(self, frames): """ diff --git a/swarms/prompts/education.py b/swarms/prompts/education.py new file mode 100644 index 00000000..0e7742d9 --- /dev/null +++ b/swarms/prompts/education.py @@ -0,0 +1,35 @@ +user_preferences = { + "subjects": "AI Cognitive Architectures", + "learning_style": "Visual", + "challenge_level": "Moderate", +} + +# Extracting individual preferences +subjects = user_preferences["subjects"] +learning_style = user_preferences["learning_style"] +challenge_level = user_preferences["challenge_level"] + + +# Curriculum Design Prompt +CURRICULUM_DESIGN_PROMPT = f""" +Develop a semester-long curriculum tailored to student interests in {subjects}. Focus on incorporating diverse teaching methods suitable for a {learning_style} learning style. +The curriculum should challenge students at a {challenge_level} level, integrating both theoretical knowledge and practical applications. Provide a detailed structure, including +weekly topics, key objectives, and essential resources needed. +""" + +# Interactive Learning Session Prompt +INTERACTIVE_LEARNING_PROMPT = f""" +Basedon the curriculum, generate an interactive lesson plan for a student of {subjects} that caters to a {learning_style} learning style. Incorporate engaging elements and hands-on activities. +""" + +# Sample Lesson Prompt +SAMPLE_TEST_PROMPT = f""" +Create a comprehensive sample test for the first week of the {subjects} curriculum, tailored for a {learning_style} learning style and at a {challenge_level} challenge level. +""" + +# Image Generation for Education Prompt +IMAGE_GENERATION_PROMPT = f""" +Develop a stable diffusion prompt for an educational image/visual aid that align with the {subjects} curriculum, specifically designed to enhance understanding for students with a {learning_style} +learning style. This might include diagrams, infographics, and illustrative representations to simplify complex concepts. Ensure you output a 10/10 descriptive image generation prompt only. +""" +