From 2aa88b893d0f57412391c132320368d9c9e641e5 Mon Sep 17 00:00:00 2001 From: Kye Date: Fri, 20 Oct 2023 01:12:53 -0400 Subject: [PATCH] zephyr Former-commit-id: 736382fd452f9ee52bd066842e0080b27a4a4738 --- requirements.txt | 1 + swarms/models/__init__.py | 1 + swarms/models/bing_chat.py | 23 ++++++----------- swarms/models/idefics.py | 7 +++-- swarms/models/zephyr.py | 53 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 68 insertions(+), 17 deletions(-) create mode 100644 swarms/models/zephyr.py diff --git a/requirements.txt b/requirements.txt index 03db2ded..e61247e5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,6 +11,7 @@ playwright wget==3.2 simpleaichat httpx +torch ggl beautifulsoup4 google-search-results==2.4.2 diff --git a/swarms/models/__init__.py b/swarms/models/__init__.py index 3b1d50b3..84b26f3e 100644 --- a/swarms/models/__init__.py +++ b/swarms/models/__init__.py @@ -8,3 +8,4 @@ from swarms.models.openai_models import OpenAI, AzureOpenAI, OpenAIChat from swarms.models.idefics import Idefics from swarms.models.kosmos_two import Kosmos from swarms.models.vilt import Vilt +from swarms.models.zephyr import Zephyr \ No newline at end of file diff --git a/swarms/models/bing_chat.py b/swarms/models/bing_chat.py index f1c38b14..c91690e5 100644 --- a/swarms/models/bing_chat.py +++ b/swarms/models/bing_chat.py @@ -1,10 +1,11 @@ """EdgeGPT model by OpenAI""" import asyncio import json +from pathlib import Path + from EdgeGPT.EdgeGPT import Chatbot, ConversationStyle -from EdgeGPT.EdgeUtils import ImageQuery, Query, Cookie +from EdgeGPT.EdgeUtils import Cookie, ImageQuery, Query from EdgeGPT.ImageGen import ImageGen -from pathlib import Path class BingChat: @@ -28,22 +29,14 @@ class BingChat: self.cookies = json.loads(open(cookies_path, encoding="utf-8").read()) self.bot = asyncio.run(Chatbot.create(cookies=self.cookies)) - def __call__( - self, prompt: str, style: ConversationStyle = ConversationStyle.creative - ) -> str: + def __call__(self, prompt: str, style: ConversationStyle = ConversationStyle.creative) -> str: """ Get a text response using the EdgeGPT model based on the provided prompt. """ - response = asyncio.run( - self.bot.ask( - prompt=prompt, conversation_style=style, simplify_response=True - ) - ) - return response["text"] + response = asyncio.run(self.bot.ask(prompt=prompt, conversation_style=style, simplify_response=True)) + return response['text'] - def create_img( - self, prompt: str, output_dir: str = "./output", auth_cookie: str = None - ) -> str: + def create_img(self, prompt: str, output_dir: str = "./output", auth_cookie: str = None) -> str: """ Generate an image based on the provided prompt and save it in the given output directory. Returns the path of the generated image. @@ -55,7 +48,7 @@ class BingChat: images = image_generator.get_images(prompt) image_generator.save_images(images, output_dir=output_dir) - return Path(output_dir) / images[0]["path"] + return Path(output_dir) / images[0]['path'] @staticmethod def set_cookie_dir_path(path: str): diff --git a/swarms/models/idefics.py b/swarms/models/idefics.py index fd790d37..747def16 100644 --- a/swarms/models/idefics.py +++ b/swarms/models/idefics.py @@ -87,7 +87,8 @@ class Idefics: prompts : list A list of prompts. Each prompt is a list of text strings and images. batched_mode : bool, optional - Whether to process the prompts in batched mode. If True, all prompts are processed together. If False, only the first prompt is processed (default is True). + Whether to process the prompts in batched mode. If True, all prompts are + processed together. If False, only the first prompt is processed (default is True). Returns ------- @@ -130,7 +131,9 @@ class Idefics: prompts : list A list of prompts. Each prompt is a list of text strings and images. batched_mode : bool, optional - Whether to process the prompts in batched mode. If True, all prompts are processed together. If False, only the first prompt is processed (default is True). + Whether to process the prompts in batched mode. + If True, all prompts are processed together. + If False, only the first prompt is processed (default is True). Returns ------- diff --git a/swarms/models/zephyr.py b/swarms/models/zephyr.py new file mode 100644 index 00000000..02641a96 --- /dev/null +++ b/swarms/models/zephyr.py @@ -0,0 +1,53 @@ +"""Zephyr by HF""" +import torch +from transformers import pipeline + + + +class Zephyr: + """ + Zehpyr model from HF + + + Args: + + + + Usage: + >>> model = Zephyr() + >>> output = model("Generate hello world in python") + + + """ + def __init__( + self, + max_new_tokens: int = 300, + temperature: float = 0.5, + top_k: float = 50, + top_p: float = 0.95, + ): + super().__init__() + self.max_new_tokens = max_new_tokens + self.temperature = temperature + self.top_k = top_k + self.top_p = top_p + + self.pipe = pipeline( + "text-generation", + model="HuggingFaceH4/zephyr-7b-alpha", + torch_dtype=torch.bfloa16, + device_map="auto" + ) + self.messages = [ + { + "role": "system", + "content": "You are a friendly chatbot who always responds in the style of a pirate", + }, + {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, + ] + + def __call__(self, text: str): + """Call the model""" + prompt = self.pipe.tokenizer.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) + outputs = self.pipe(prompt, max_new_token=self.max_new_tokens) + print(outputs[0])["generated_text"] \ No newline at end of file