pull/64/head
Kye 1 year ago
parent 42ce6cf18c
commit 736382fd45

@ -11,6 +11,7 @@ playwright
wget==3.2
simpleaichat
httpx
torch
ggl
beautifulsoup4
google-search-results==2.4.2

@ -8,3 +8,4 @@ from swarms.models.openai_models import OpenAI, AzureOpenAI, OpenAIChat
from swarms.models.idefics import Idefics
from swarms.models.kosmos_two import Kosmos
from swarms.models.vilt import Vilt
from swarms.models.zephyr import Zephyr

@ -1,10 +1,11 @@
"""EdgeGPT model by OpenAI"""
import asyncio
import json
from pathlib import Path
from EdgeGPT.EdgeGPT import Chatbot, ConversationStyle
from EdgeGPT.EdgeUtils import ImageQuery, Query, Cookie
from EdgeGPT.EdgeUtils import Cookie, ImageQuery, Query
from EdgeGPT.ImageGen import ImageGen
from pathlib import Path
class BingChat:
@ -28,22 +29,14 @@ class BingChat:
self.cookies = json.loads(open(cookies_path, encoding="utf-8").read())
self.bot = asyncio.run(Chatbot.create(cookies=self.cookies))
def __call__(
self, prompt: str, style: ConversationStyle = ConversationStyle.creative
) -> str:
def __call__(self, prompt: str, style: ConversationStyle = ConversationStyle.creative) -> str:
"""
Get a text response using the EdgeGPT model based on the provided prompt.
"""
response = asyncio.run(
self.bot.ask(
prompt=prompt, conversation_style=style, simplify_response=True
)
)
return response["text"]
response = asyncio.run(self.bot.ask(prompt=prompt, conversation_style=style, simplify_response=True))
return response['text']
def create_img(
self, prompt: str, output_dir: str = "./output", auth_cookie: str = None
) -> str:
def create_img(self, prompt: str, output_dir: str = "./output", auth_cookie: str = None) -> str:
"""
Generate an image based on the provided prompt and save it in the given output directory.
Returns the path of the generated image.
@ -55,7 +48,7 @@ class BingChat:
images = image_generator.get_images(prompt)
image_generator.save_images(images, output_dir=output_dir)
return Path(output_dir) / images[0]["path"]
return Path(output_dir) / images[0]['path']
@staticmethod
def set_cookie_dir_path(path: str):

@ -87,7 +87,8 @@ class Idefics:
prompts : list
A list of prompts. Each prompt is a list of text strings and images.
batched_mode : bool, optional
Whether to process the prompts in batched mode. If True, all prompts are processed together. If False, only the first prompt is processed (default is True).
Whether to process the prompts in batched mode. If True, all prompts are
processed together. If False, only the first prompt is processed (default is True).
Returns
-------
@ -130,7 +131,9 @@ class Idefics:
prompts : list
A list of prompts. Each prompt is a list of text strings and images.
batched_mode : bool, optional
Whether to process the prompts in batched mode. If True, all prompts are processed together. If False, only the first prompt is processed (default is True).
Whether to process the prompts in batched mode.
If True, all prompts are processed together.
If False, only the first prompt is processed (default is True).
Returns
-------

@ -0,0 +1,53 @@
"""Zephyr by HF"""
import torch
from transformers import pipeline
class Zephyr:
"""
Zehpyr model from HF
Args:
Usage:
>>> model = Zephyr()
>>> output = model("Generate hello world in python")
"""
def __init__(
self,
max_new_tokens: int = 300,
temperature: float = 0.5,
top_k: float = 50,
top_p: float = 0.95,
):
super().__init__()
self.max_new_tokens = max_new_tokens
self.temperature = temperature
self.top_k = top_k
self.top_p = top_p
self.pipe = pipeline(
"text-generation",
model="HuggingFaceH4/zephyr-7b-alpha",
torch_dtype=torch.bfloa16,
device_map="auto"
)
self.messages = [
{
"role": "system",
"content": "You are a friendly chatbot who always responds in the style of a pirate",
},
{"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
]
def __call__(self, text: str):
"""Call the model"""
prompt = self.pipe.tokenizer.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True)
outputs = self.pipe(prompt, max_new_token=self.max_new_tokens)
print(outputs[0])["generated_text"]
Loading…
Cancel
Save