feat: Add disord_bing example

pull/68/head
Zack 2 years ago
parent cbf4a07d4a
commit a16a96bfb8

@ -35,6 +35,7 @@ REDIS_PORT=
#dbs #dbs
PINECONE_API_KEY="" PINECONE_API_KEY=""
BING_COOKIE="" BING_COOKIE=""
BING_AUTH=""
# RevGpt Configuration # RevGpt Configuration
ACCESS_TOKEN="your_access_token_here" ACCESS_TOKEN="your_access_token_here"

4
.gitignore vendored

@ -47,6 +47,8 @@ share/python-wheels/
.installed.cfg .installed.cfg
*.egg *.egg
MANIFEST MANIFEST
output/*
cookes.json
# PyInstaller # PyInstaller
# Usually these files are written by a python script from a template # Usually these files are written by a python script from a template
@ -179,4 +181,4 @@ cython_debug/
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear # and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder. # option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/ #.idea/

@ -86,14 +86,13 @@ class Bot:
# image_generator.py # image_generator.py
@self.bot.command() @self.bot.command()
async def generate_image(ctx, *, prompt: str): async def generate_image(ctx, *, prompt: str = None, imggen: str = None):
"""generates images based on the provided prompt""" """generates images based on the provided prompt"""
await ctx.send(f"generating images for prompt: `{prompt}`...") await ctx.send(f"generating images for prompt: `{prompt}`...")
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
# initialize a future object for the dalle instance # initialize a future object for the dalle instance
model_instance = dalle3() future = loop.run_in_executor(Executor, imggen, prompt)
future = loop.run_in_executor(Executor, model_instance.run, prompt)
try: try:
# wait for the dalle request to complete, with a timeout of 60 seconds # wait for the dalle request to complete, with a timeout of 60 seconds
@ -111,8 +110,8 @@ class Bot:
print(f"sending {len(latest_files)} images to discord...") print(f"sending {len(latest_files)} images to discord...")
# send all the latest images in a single message # send all the latest images in a single message
storage_service = os.environ("STORAGE_SERVICE") # "https://storage.googleapis.com/your-bucket-name/ # storage_service = os.environ("STORAGE_SERVICE") # "https://storage.googleapis.com/your-bucket-name/
await ctx.send(files=[storage_service.upload(filepath) for filepath in latest_files]) # await ctx.send(files=[storage_service.upload(filepath) for filepath in latest_files])
except asyncio.timeouterror: except asyncio.timeouterror:
await ctx.send("the request took too long! it might have been censored or you're out of boosts. please try entering the prompt again.") await ctx.send("the request took too long! it might have been censored or you're out of boosts. please try entering the prompt again.")
@ -125,7 +124,7 @@ class Bot:
if use_agent: if use_agent:
response = self.agent.run(text) response = self.agent.run(text)
else: else:
response = self.llm.run(text) response = self.llm.__call__(text)
await ctx.send(response) await ctx.send(response)
def add_command(self, name, func): def add_command(self, name, func):

@ -0,0 +1,32 @@
from swarms.models.bing_chat import BingChat
from swarms.workers.worker import Worker
from swarms.tools.autogpt import EdgeGPTTool, tool
from swarms.models import OpenAIChat
import os
api_key = os.getenv("OPENAI_API_KEY")
# Initialize the EdgeGPTModel
edgegpt = BingChat(cookies_path="./cookies.txt")
@tool
def edgegpt(task: str = None):
"""A tool to run infrence on the EdgeGPT Model"""
return EdgeGPTTool.run(task)
# Initialize the language model,
# This model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC
llm = OpenAIChat(
openai_api_key=api_key,
temperature=0.5,
)
# Initialize the Worker with the custom tool
worker = Worker(llm=llm, ai_name="EdgeGPT Worker", external_tools=[edgegpt])
# Use the worker to process a task
task = "Hello, my name is ChatGPT"
response = worker.run(task)
print(response)

@ -0,0 +1,14 @@
import os
from swarms.models.bing_chat import BingChat
from apps.discord import Bot
from dotenv import load_dotenv
# Initialize the EdgeGPTModel
cookie = os.environ.get("BING_COOKIE")
auth = os.environ.get("AUTH_COOKIE")
bing = BingChat(cookies_path="./cookies.txt", bing_cookie=cookie, auth_cookie=auth)
bot = Bot(llm=bing, cookie=cookie, auth=auth)
bot.generate_image(imggen=bing.create_img())
bot.send_text(use_agent=False)

@ -1,32 +1,19 @@
from swarms.models.bing_chat import BingChat
from swarms.workers.worker import Worker
from swarms.tools.autogpt import EdgeGPTTool, tool
from swarms.models import OpenAIChat
import os import os
from swarms.models.bing_chat import BingChat
from dotenv import load_dotenv
api_key = os.getenv("OPENAI_API_KEY") load_dotenv()
# Initialize the EdgeGPTModel # Initialize the EdgeGPTModel
edgegpt = BingChat(cookies_path="./cookies.txt") edgegpt = BingChat(cookies_path="./cookies.json")
cookie = os.environ.get("BING_COOKIE")
auth = os.environ.get("AUTH_COOKIE")
@tool
def edgegpt(task: str = None):
"""A tool to run infrence on the EdgeGPT Model"""
return EdgeGPTTool.run(task)
# Use the worker to process a task
task = "hi"
# img_task = "Sunset over mountains"
# Initialize the language model, response = edgegpt(task)
# This model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC # response = edgegpt.create_img(auth_cookie=cookie,auth_cookie_SRCHHPGUSR=auth,prompt=img_task)
llm = OpenAIChat(
openai_api_key=api_key,
temperature=0.5,
)
# Initialize the Worker with the custom tool
worker = Worker(llm=llm, ai_name="EdgeGPT Worker", external_tools=[edgegpt])
# Use the worker to process a task
task = "Hello, my name is ChatGPT"
response = worker.run(task)
print(response) print(response)

@ -1,4 +1,4 @@
"""EdgeGPT model by OpenAI""" """Bing-Chat model by Micorsoft"""
import asyncio import asyncio
import json import json
from pathlib import Path from pathlib import Path
@ -25,9 +25,11 @@ class BingChat:
""" """
def __init__(self, cookies_path: str): def __init__(self, cookies_path: str, bing_cookie: str = None, auth_cookie: str = None):
self.cookies = json.loads(open(cookies_path, encoding="utf-8").read()) self.cookies = json.loads(open(cookies_path, encoding="utf-8").read())
self.bot = asyncio.run(Chatbot.create(cookies=self.cookies)) self.bot = asyncio.run(Chatbot.create(cookies=self.cookies))
self.auth_cookie = auth_cookie
self.auth_cookie_SRCHHPGUSR = bing_cookie
def __call__( def __call__(
self, prompt: str, style: ConversationStyle = ConversationStyle.creative self, prompt: str, style: ConversationStyle = ConversationStyle.creative
@ -43,7 +45,7 @@ class BingChat:
return response["text"] return response["text"]
def create_img( def create_img(
self, prompt: str, output_dir: str = "./output", auth_cookie: str = None self, prompt: str, output_dir: str = "./output", auth_cookie: str = None, auth_cookie_SRCHHPGUSR: str = None
) -> str: ) -> str:
""" """
Generate an image based on the provided prompt and save it in the given output directory. Generate an image based on the provided prompt and save it in the given output directory.
@ -52,7 +54,7 @@ class BingChat:
if not auth_cookie: if not auth_cookie:
raise ValueError("Auth cookie is required for image generation.") raise ValueError("Auth cookie is required for image generation.")
image_generator = ImageGen(auth_cookie, quiet=True) image_generator = ImageGen(auth_cookie, auth_cookie_SRCHHPGUSR, quiet=True, )
images = image_generator.get_images(prompt) images = image_generator.get_images(prompt)
image_generator.save_images(images, output_dir=output_dir) image_generator.save_images(images, output_dir=output_dir)

Loading…
Cancel
Save