diff --git a/README.md b/README.md index f94221d4..a80a307a 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,7 @@ We have a small gallery of examples to run here, [for more check out the docs to ### `Flow` Example - The `Flow` is a superior iteratioin of the `LLMChain` from Langchain, our intent with `Flow` is to create the most reliable loop structure that gives the agents their "autonomy" through 3 main methods of interaction, one through user specified loops, then dynamic where the agent parses a token, and or an interactive human input verison, or a mix of all 3. + ```python from swarms.models import OpenAIChat @@ -47,22 +48,37 @@ from swarms.structs import Flow api_key = "" - -# Initialize the language model, -# This model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC +# Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC llm = OpenAIChat( + # model_name="gpt-4" openai_api_key=api_key, temperature=0.5, + # max_tokens=100, ) -# Initialize the flow +## Initialize the workflow flow = Flow( llm=llm, - max_loops=5, + max_loops=2, + dashboard=True, + # stopping_condition=None, # You can define a stopping condition as needed. + # loop_interval=1, + # retry_attempts=3, + # retry_interval=1, + # interactive=False, # Set to 'True' for interactive mode. + # dynamic_temperature=False, # Set to 'True' for dynamic temperature handling. ) -out = flow.run("Generate a 10,000 word blog, say Stop when done") -print(out) +# out = flow.load_state("flow_state.json") +# temp = flow.dynamic_temperature() +# filter = flow.add_response_filter("Trump") +out = flow.run("Generate a 10,000 word blog on health and wellness.") +# out = flow.validate_response(out) +# out = flow.analyze_feedback(out) +# out = flow.print_history_and_memory() +# # out = flow.save_state("flow_state.json") +# print(out) + ``` diff --git a/pyproject.toml b/pyproject.toml index e3a29e78..d8a561bd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "1.9.5" +version = "1.9.6" description = "Swarms - Pytorch" license = "MIT" authors = ["Kye Gomez "] @@ -28,7 +28,6 @@ openai = "*" langchain = "*" asyncio = "*" nest_asyncio = "*" -pegasusx = "*" einops = "*" google-generativeai = "*" torch = "*" @@ -48,10 +47,8 @@ beautifulsoup4 = "*" huggingface-hub = "*" pydantic = "*" tenacity = "*" -redis = "*" Pillow = "*" chromadb = "*" -agent-protocol = "*" open-interpreter = "*" tabulate = "*" termcolor = "*" diff --git a/swarms/models/__init__.py b/swarms/models/__init__.py index a0bec07f..b2a2b433 100644 --- a/swarms/models/__init__.py +++ b/swarms/models/__init__.py @@ -16,6 +16,8 @@ from swarms.models.kosmos_two import Kosmos from swarms.models.vilt import Vilt from swarms.models.nougat import Nougat from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA +from swarms.models.gpt4v import GPT4Vision +from swarms.models.dalle3 import Dalle3 # from swarms.models.distilled_whisperx import DistilWhisperModel @@ -43,4 +45,6 @@ __all__ = [ "HuggingfaceLLM", "MPT7B", "WizardLLMStoryTeller", + "GPT4Vision", + "Dalle3", ] diff --git a/swarms/models/dalle3.py b/swarms/models/dalle3.py index f22b11e0..73edf502 100644 --- a/swarms/models/dalle3.py +++ b/swarms/models/dalle3.py @@ -1,15 +1,14 @@ -import openai import logging import os from dataclasses import dataclass -from functools import lru_cache -from termcolor import colored -from openai import OpenAI -from dotenv import load_dotenv -from pydantic import BaseModel, validator -from PIL import Image from io import BytesIO +import openai +from dotenv import load_dotenv +from openai import OpenAI +from PIL import Image +from pydantic import validator +from termcolor import colored load_dotenv() @@ -111,10 +110,10 @@ class Dalle3: try: # Making a call to the the Dalle3 API response = self.client.images.generate( - # model=self.model, + model=self.model, prompt=task, - # size=self.size, - # quality=self.quality, + size=self.size, + quality=self.quality, n=self.n, ) # Extracting the image url from the response