Dependencies clean up

pull/100/head
Kye 1 year ago
parent 4fb38c1c62
commit a70a2b05b5

@ -40,6 +40,7 @@ We have a small gallery of examples to run here, [for more check out the docs to
### `Flow` Example ### `Flow` Example
- The `Flow` is a superior iteratioin of the `LLMChain` from Langchain, our intent with `Flow` is to create the most reliable loop structure that gives the agents their "autonomy" through 3 main methods of interaction, one through user specified loops, then dynamic where the agent parses a <DONE> token, and or an interactive human input verison, or a mix of all 3. - The `Flow` is a superior iteratioin of the `LLMChain` from Langchain, our intent with `Flow` is to create the most reliable loop structure that gives the agents their "autonomy" through 3 main methods of interaction, one through user specified loops, then dynamic where the agent parses a <DONE> token, and or an interactive human input verison, or a mix of all 3.
```python ```python
from swarms.models import OpenAIChat from swarms.models import OpenAIChat
@ -47,22 +48,37 @@ from swarms.structs import Flow
api_key = "" api_key = ""
# Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC
# Initialize the language model,
# This model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC
llm = OpenAIChat( llm = OpenAIChat(
# model_name="gpt-4"
openai_api_key=api_key, openai_api_key=api_key,
temperature=0.5, temperature=0.5,
# max_tokens=100,
) )
# Initialize the flow ## Initialize the workflow
flow = Flow( flow = Flow(
llm=llm, llm=llm,
max_loops=5, max_loops=2,
dashboard=True,
# stopping_condition=None, # You can define a stopping condition as needed.
# loop_interval=1,
# retry_attempts=3,
# retry_interval=1,
# interactive=False, # Set to 'True' for interactive mode.
# dynamic_temperature=False, # Set to 'True' for dynamic temperature handling.
) )
out = flow.run("Generate a 10,000 word blog, say Stop when done") # out = flow.load_state("flow_state.json")
print(out) # temp = flow.dynamic_temperature()
# filter = flow.add_response_filter("Trump")
out = flow.run("Generate a 10,000 word blog on health and wellness.")
# out = flow.validate_response(out)
# out = flow.analyze_feedback(out)
# out = flow.print_history_and_memory()
# # out = flow.save_state("flow_state.json")
# print(out)
``` ```

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry] [tool.poetry]
name = "swarms" name = "swarms"
version = "1.9.5" version = "1.9.6"
description = "Swarms - Pytorch" description = "Swarms - Pytorch"
license = "MIT" license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"] authors = ["Kye Gomez <kye@apac.ai>"]
@ -28,7 +28,6 @@ openai = "*"
langchain = "*" langchain = "*"
asyncio = "*" asyncio = "*"
nest_asyncio = "*" nest_asyncio = "*"
pegasusx = "*"
einops = "*" einops = "*"
google-generativeai = "*" google-generativeai = "*"
torch = "*" torch = "*"
@ -48,10 +47,8 @@ beautifulsoup4 = "*"
huggingface-hub = "*" huggingface-hub = "*"
pydantic = "*" pydantic = "*"
tenacity = "*" tenacity = "*"
redis = "*"
Pillow = "*" Pillow = "*"
chromadb = "*" chromadb = "*"
agent-protocol = "*"
open-interpreter = "*" open-interpreter = "*"
tabulate = "*" tabulate = "*"
termcolor = "*" termcolor = "*"

@ -16,6 +16,8 @@ from swarms.models.kosmos_two import Kosmos
from swarms.models.vilt import Vilt from swarms.models.vilt import Vilt
from swarms.models.nougat import Nougat from swarms.models.nougat import Nougat
from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA
from swarms.models.gpt4v import GPT4Vision
from swarms.models.dalle3 import Dalle3
# from swarms.models.distilled_whisperx import DistilWhisperModel # from swarms.models.distilled_whisperx import DistilWhisperModel
@ -43,4 +45,6 @@ __all__ = [
"HuggingfaceLLM", "HuggingfaceLLM",
"MPT7B", "MPT7B",
"WizardLLMStoryTeller", "WizardLLMStoryTeller",
"GPT4Vision",
"Dalle3",
] ]

@ -1,15 +1,14 @@
import openai
import logging import logging
import os import os
from dataclasses import dataclass from dataclasses import dataclass
from functools import lru_cache
from termcolor import colored
from openai import OpenAI
from dotenv import load_dotenv
from pydantic import BaseModel, validator
from PIL import Image
from io import BytesIO from io import BytesIO
import openai
from dotenv import load_dotenv
from openai import OpenAI
from PIL import Image
from pydantic import validator
from termcolor import colored
load_dotenv() load_dotenv()
@ -111,10 +110,10 @@ class Dalle3:
try: try:
# Making a call to the the Dalle3 API # Making a call to the the Dalle3 API
response = self.client.images.generate( response = self.client.images.generate(
# model=self.model, model=self.model,
prompt=task, prompt=task,
# size=self.size, size=self.size,
# quality=self.quality, quality=self.quality,
n=self.n, n=self.n,
) )
# Extracting the image url from the response # Extracting the image url from the response

Loading…
Cancel
Save