From a16a96bfb85a0c9e372abe40a6a9bf842d4e419b Mon Sep 17 00:00:00 2001 From: Zack Date: Mon, 23 Oct 2023 11:37:24 -0500 Subject: [PATCH 01/13] feat: Add disord_bing example --- .env.example | 1 + .gitignore | 4 +++- apps/discord.py | 11 +++++------ playground/agents/bingchat.py | 32 ++++++++++++++++++++++++++++++ playground/apps/bing_discord.py | 14 +++++++++++++ playground/models/bingchat.py | 35 +++++++++++---------------------- swarms/models/bing_chat.py | 10 ++++++---- 7 files changed, 72 insertions(+), 35 deletions(-) create mode 100644 playground/agents/bingchat.py create mode 100644 playground/apps/bing_discord.py diff --git a/.env.example b/.env.example index c0023751..8c73ae02 100644 --- a/.env.example +++ b/.env.example @@ -35,6 +35,7 @@ REDIS_PORT= #dbs PINECONE_API_KEY="" BING_COOKIE="" +BING_AUTH="" # RevGpt Configuration ACCESS_TOKEN="your_access_token_here" diff --git a/.gitignore b/.gitignore index 09ebd159..92dd6c81 100644 --- a/.gitignore +++ b/.gitignore @@ -47,6 +47,8 @@ share/python-wheels/ .installed.cfg *.egg MANIFEST +output/* +cookes.json # PyInstaller # Usually these files are written by a python script from a template @@ -179,4 +181,4 @@ cython_debug/ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ \ No newline at end of file +#.idea/ diff --git a/apps/discord.py b/apps/discord.py index a03d0835..aa07f4e5 100644 --- a/apps/discord.py +++ b/apps/discord.py @@ -86,14 +86,13 @@ class Bot: # image_generator.py @self.bot.command() - async def generate_image(ctx, *, prompt: str): + async def generate_image(ctx, *, prompt: str = None, imggen: str = None): """generates images based on the provided prompt""" await ctx.send(f"generating images for prompt: `{prompt}`...") loop = asyncio.get_event_loop() # initialize a future object for the dalle instance - model_instance = dalle3() - future = loop.run_in_executor(Executor, model_instance.run, prompt) + future = loop.run_in_executor(Executor, imggen, prompt) try: # wait for the dalle request to complete, with a timeout of 60 seconds @@ -111,8 +110,8 @@ class Bot: print(f"sending {len(latest_files)} images to discord...") # send all the latest images in a single message - storage_service = os.environ("STORAGE_SERVICE") # "https://storage.googleapis.com/your-bucket-name/ - await ctx.send(files=[storage_service.upload(filepath) for filepath in latest_files]) + # storage_service = os.environ("STORAGE_SERVICE") # "https://storage.googleapis.com/your-bucket-name/ + # await ctx.send(files=[storage_service.upload(filepath) for filepath in latest_files]) except asyncio.timeouterror: await ctx.send("the request took too long! it might have been censored or you're out of boosts. please try entering the prompt again.") @@ -125,7 +124,7 @@ class Bot: if use_agent: response = self.agent.run(text) else: - response = self.llm.run(text) + response = self.llm.__call__(text) await ctx.send(response) def add_command(self, name, func): diff --git a/playground/agents/bingchat.py b/playground/agents/bingchat.py new file mode 100644 index 00000000..bf06ecc6 --- /dev/null +++ b/playground/agents/bingchat.py @@ -0,0 +1,32 @@ +from swarms.models.bing_chat import BingChat +from swarms.workers.worker import Worker +from swarms.tools.autogpt import EdgeGPTTool, tool +from swarms.models import OpenAIChat +import os + +api_key = os.getenv("OPENAI_API_KEY") + +# Initialize the EdgeGPTModel +edgegpt = BingChat(cookies_path="./cookies.txt") + + +@tool +def edgegpt(task: str = None): + """A tool to run infrence on the EdgeGPT Model""" + return EdgeGPTTool.run(task) + + +# Initialize the language model, +# This model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC +llm = OpenAIChat( + openai_api_key=api_key, + temperature=0.5, +) + +# Initialize the Worker with the custom tool +worker = Worker(llm=llm, ai_name="EdgeGPT Worker", external_tools=[edgegpt]) + +# Use the worker to process a task +task = "Hello, my name is ChatGPT" +response = worker.run(task) +print(response) diff --git a/playground/apps/bing_discord.py b/playground/apps/bing_discord.py new file mode 100644 index 00000000..fc179d1c --- /dev/null +++ b/playground/apps/bing_discord.py @@ -0,0 +1,14 @@ +import os +from swarms.models.bing_chat import BingChat +from apps.discord import Bot +from dotenv import load_dotenv + + +# Initialize the EdgeGPTModel +cookie = os.environ.get("BING_COOKIE") +auth = os.environ.get("AUTH_COOKIE") +bing = BingChat(cookies_path="./cookies.txt", bing_cookie=cookie, auth_cookie=auth) + +bot = Bot(llm=bing, cookie=cookie, auth=auth) +bot.generate_image(imggen=bing.create_img()) +bot.send_text(use_agent=False) diff --git a/playground/models/bingchat.py b/playground/models/bingchat.py index bf06ecc6..6e44cbb5 100644 --- a/playground/models/bingchat.py +++ b/playground/models/bingchat.py @@ -1,32 +1,19 @@ -from swarms.models.bing_chat import BingChat -from swarms.workers.worker import Worker -from swarms.tools.autogpt import EdgeGPTTool, tool -from swarms.models import OpenAIChat import os +from swarms.models.bing_chat import BingChat +from dotenv import load_dotenv -api_key = os.getenv("OPENAI_API_KEY") +load_dotenv() # Initialize the EdgeGPTModel -edgegpt = BingChat(cookies_path="./cookies.txt") - - -@tool -def edgegpt(task: str = None): - """A tool to run infrence on the EdgeGPT Model""" - return EdgeGPTTool.run(task) +edgegpt = BingChat(cookies_path="./cookies.json") +cookie = os.environ.get("BING_COOKIE") +auth = os.environ.get("AUTH_COOKIE") +# Use the worker to process a task +task = "hi" +# img_task = "Sunset over mountains" -# Initialize the language model, -# This model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC -llm = OpenAIChat( - openai_api_key=api_key, - temperature=0.5, -) - -# Initialize the Worker with the custom tool -worker = Worker(llm=llm, ai_name="EdgeGPT Worker", external_tools=[edgegpt]) +response = edgegpt(task) +# response = edgegpt.create_img(auth_cookie=cookie,auth_cookie_SRCHHPGUSR=auth,prompt=img_task) -# Use the worker to process a task -task = "Hello, my name is ChatGPT" -response = worker.run(task) print(response) diff --git a/swarms/models/bing_chat.py b/swarms/models/bing_chat.py index 1d2eb503..cb90f97e 100644 --- a/swarms/models/bing_chat.py +++ b/swarms/models/bing_chat.py @@ -1,4 +1,4 @@ -"""EdgeGPT model by OpenAI""" +"""Bing-Chat model by Micorsoft""" import asyncio import json from pathlib import Path @@ -25,9 +25,11 @@ class BingChat: """ - def __init__(self, cookies_path: str): + def __init__(self, cookies_path: str, bing_cookie: str = None, auth_cookie: str = None): self.cookies = json.loads(open(cookies_path, encoding="utf-8").read()) self.bot = asyncio.run(Chatbot.create(cookies=self.cookies)) + self.auth_cookie = auth_cookie + self.auth_cookie_SRCHHPGUSR = bing_cookie def __call__( self, prompt: str, style: ConversationStyle = ConversationStyle.creative @@ -43,7 +45,7 @@ class BingChat: return response["text"] def create_img( - self, prompt: str, output_dir: str = "./output", auth_cookie: str = None + self, prompt: str, output_dir: str = "./output", auth_cookie: str = None, auth_cookie_SRCHHPGUSR: str = None ) -> str: """ Generate an image based on the provided prompt and save it in the given output directory. @@ -52,7 +54,7 @@ class BingChat: if not auth_cookie: raise ValueError("Auth cookie is required for image generation.") - image_generator = ImageGen(auth_cookie, quiet=True) + image_generator = ImageGen(auth_cookie, auth_cookie_SRCHHPGUSR, quiet=True, ) images = image_generator.get_images(prompt) image_generator.save_images(images, output_dir=output_dir) From 581e9b9b697e712c9be5352149dbd3a4753289a0 Mon Sep 17 00:00:00 2001 From: Zack Date: Mon, 23 Oct 2023 20:27:49 -0500 Subject: [PATCH 02/13] Add BingBot --- apps/BingBot/.env.example | 4 + apps/BingBot/Dockerfile | 6 + apps/BingBot/bing_bot.py | 115 +++++++++ apps/BingBot/cogs/edgegpt.py | 148 ++++++++++++ apps/BingBot/cogs/event.py | 223 ++++++++++++++++++ apps/BingBot/cogs/help.py | 17 ++ apps/BingBot/compose.yaml | 10 + apps/BingBot/cookies.json | 6 + apps/BingBot/core/classes.py | 5 + apps/BingBot/requirements.txt | 4 + apps/BingBot/src/imageCreate.py | 31 +++ apps/BingBot/src/log.py | 67 ++++++ apps/BingBot/src/response.py | 117 ++++++++++ apps/discord.py | 233 +++++++++---------- playground/models/bingchat.py => bingchat.py | 8 +- playground/apps/bing_discord.py | 7 +- swarms/models/bing_chat.py | 2 +- 17 files changed, 875 insertions(+), 128 deletions(-) create mode 100644 apps/BingBot/.env.example create mode 100644 apps/BingBot/Dockerfile create mode 100644 apps/BingBot/bing_bot.py create mode 100644 apps/BingBot/cogs/edgegpt.py create mode 100644 apps/BingBot/cogs/event.py create mode 100644 apps/BingBot/cogs/help.py create mode 100644 apps/BingBot/compose.yaml create mode 100644 apps/BingBot/cookies.json create mode 100644 apps/BingBot/core/classes.py create mode 100644 apps/BingBot/requirements.txt create mode 100644 apps/BingBot/src/imageCreate.py create mode 100644 apps/BingBot/src/log.py create mode 100644 apps/BingBot/src/response.py rename playground/models/bingchat.py => bingchat.py (64%) diff --git a/apps/BingBot/.env.example b/apps/BingBot/.env.example new file mode 100644 index 00000000..341406f7 --- /dev/null +++ b/apps/BingBot/.env.example @@ -0,0 +1,4 @@ +DISCORD_BOT_TOKEN= +MENTION_CHANNEL_ID= +AUTH_COOKIE= +AUTH_COOKIE_SRCHHPGUSR= diff --git a/apps/BingBot/Dockerfile b/apps/BingBot/Dockerfile new file mode 100644 index 00000000..e276b5c8 --- /dev/null +++ b/apps/BingBot/Dockerfile @@ -0,0 +1,6 @@ +FROM python:3.9.16 +WORKDIR /bot +COPY requirements.txt /bot/ +RUN pip install -r requirements.txt +COPY . /bot +CMD python bot.py diff --git a/apps/BingBot/bing_bot.py b/apps/BingBot/bing_bot.py new file mode 100644 index 00000000..4a562411 --- /dev/null +++ b/apps/BingBot/bing_bot.py @@ -0,0 +1,115 @@ +import discord +import os +import src.log +import sys +import pkg_resources +import json +from discord.ext import commands +from dotenv import load_dotenv + +load_dotenv() + +bot = commands.Bot(command_prefix='!', intents = discord.Intents.all()) + +# init loggger +logger = src.log.setup_logger(__name__) + +def restart_bot(): + # Replace current process with new instance of bot.py + os.execl(sys.executable, sys.executable, "bot.py") + +def check_verion() -> None: + # Read the requirements.txt file and add each line to a list + with open('requirements.txt') as f: + required = f.read().splitlines() + + # For each library listed in requirements.txt, check if the corresponding version is installed + for package in required: + # Use the pkg_resources library to get information about the installed version of the library + package_name, package_verion = package.split('==') + installed = pkg_resources.get_distribution(package_name) + # Extract the library name and version number + name, version = installed.project_name, installed.version + # Compare the version number to see if it matches the one in requirements.txt + if package != f'{name}=={version}': + logger.error(f'{name} version {version} is installed but does not match the requirements') + sys.exit() + +@bot.event +async def on_ready(): + bot_status = discord.Status.online + bot_activity = discord.Activity(type=discord.ActivityType.playing, name = "bing.com") + await bot.change_presence(status = bot_status, activity = bot_activity) + for Filename in os.listdir('./cogs'): + if Filename.endswith('.py'): + await bot.load_extension(f'cogs.{Filename[:-3]}') + logger.info(f'{bot.user} is now running!') + print("Bot is Up and Ready!") + try: + synced = await bot.tree.sync() + print(f"Synced {len(synced)} commands") + except Exception as e: + print(e) + +# Load command +@commands.is_owner() +@bot.command() +async def load(ctx, extension): + await bot.load_extension(f'cogs.{extension}') + await ctx.author.send(f'> **Loaded {extension} done.**') + +# Unload command +@commands.is_owner() +@bot.command() +async def unload(ctx, extension): + await bot.unload_extension(f'cogs.{extension}') + await ctx.author.send(f'> **Un-Loaded {extension} done.**') + +# Empty discord_bot.log file +@commands.is_owner() +@bot.command() +async def clean(ctx): + open('discord_bot.log', 'w').close() + await ctx.author.send(f'> **Successfully emptied the file!**') + +# Get discord_bot.log file +@commands.is_owner() +@bot.command() +async def getLog(ctx): + try: + with open('discord_bot.log', 'rb') as f: + file = discord.File(f) + await ctx.author.send(file=file) + await ctx.author.send("> **Send successfully!**") + except: + await ctx.author.send("> **Send failed!**") + +# Upload new Bing cookies and restart the bot +@commands.is_owner() +@bot.command() +async def upload(ctx): + if ctx.message.attachments: + for attachment in ctx.message.attachments: + if str(attachment)[-4:] == ".txt": + content = await attachment.read() + with open("cookies.json", "w", encoding = "utf-8") as f: + json.dump(json.loads(content), f, indent = 2) + if not isinstance(ctx.channel, discord.abc.PrivateChannel): + await ctx.message.delete() + await ctx.author.send(f'> **Upload new cookies successfully!**') + logger.warning("\x1b[31mCookies has been setup successfully\x1b[0m") + restart_bot() + else: + await ctx.author.send("> **Didn't get any txt file.**") + else: + await ctx.author.send("> **Didn't get any file.**") + +if __name__ == '__main__': + check_verion() + bot.run(os.getenv("DISCORD_BOT_TOKEN")) + + + + + + diff --git a/apps/BingBot/cogs/edgegpt.py b/apps/BingBot/cogs/edgegpt.py new file mode 100644 index 00000000..683780db --- /dev/null +++ b/apps/BingBot/cogs/edgegpt.py @@ -0,0 +1,148 @@ +import os +import discord +import json +from typing import Optional +from EdgeGPT.ImageGen import ImageGenAsync, ImageGen +from EdgeGPT.EdgeGPT import Chatbot +from discord import app_commands +from core.classes import Cog_Extension +from src import log +from src.imageCreate import create_image, get_using_create, set_using_create +from src.response import send_message, get_using_send, set_using_send +from dotenv import load_dotenv + +load_dotenv() + +logger = log.setup_logger(__name__) + +users_chatbot = {} +users_image_generator = {} +user_conversation_style = {} + +async def init_chatbot(user_id): + with open("./cookies.json", encoding="utf-8") as file: + cookie_json = json.load(file) + for cookie in cookie_json: + if cookie.get("name") == "_U": + auth_cookie = cookie.get("value") + break + + auth_cookie = os.environ.get("AUTH_COOKIE") + auth_cookie_SRCHHPGUSR = os.environ.get("AUTH_COOKIE_SRCHHPGUSR") + # auth_cookie_SRCHHPGUSR = os.environ.get("AUTH_COOKIE_SRCHHPGUSR") + users_chatbot[user_id] = UserChatbot(cookies=cookie_json) + users_image_generator[user_id] = ImageGenAsync(auth_cookie, quiet=True) + user_conversation_style[user_id] = "balanced" + +class UserChatbot: + def __init__(self, cookies): + self.chatbot = Chatbot(cookies=cookies) + + async def send_message(self, interaction, message, conversation_style): + await send_message(self.chatbot, interaction, message, conversation_style) + + async def create_image(self, interaction, prompt: str, image_generator): + await create_image(interaction, prompt, image_generator) + + async def reset(self): + await self.chatbot.reset() + +class EdgeGPT(Cog_Extension): + # Chat with Bing + @app_commands.command(name="bing", description="Have a chat with Bing") + async def bing(self, interaction: discord.Interaction, *, message: str): + try: + using = await get_using_send(interaction.user.id) + except: + await set_using_send(interaction.user.id, False) + using = await get_using_send(interaction.user.id) + if not using: + await interaction.response.defer(ephemeral=False, thinking=True) + username = str(interaction.user) + usermessage = message + channel = str(interaction.channel) + user_id = interaction.user.id + if user_id not in users_chatbot: + await init_chatbot(interaction.user.id) + conversation_style = user_conversation_style[user_id] + logger.info(f"\x1b[31m{username}\x1b[0m : '{usermessage}' ({channel}) [Style: {conversation_style}]") + await users_chatbot[user_id].send_message(interaction, usermessage, conversation_style) + else: + await interaction.response.defer(ephemeral=True, thinking=True) + await interaction.followup.send("> **Please wait for your last conversation to finish.**") + + # Reset Bing conversation + @app_commands.command(name="reset", description="Reset Bing conversation") + async def reset(self, interaction: discord.Interaction): + await interaction.response.defer(ephemeral=True, thinking=True) + user_id = interaction.user.id + try: + await users_chatbot[user_id].reset() + await interaction.followup.send("> **Info: Reset finish.**") + logger.warning("\x1b[31mBing has been successfully reset\x1b[0m") + except: + await interaction.followup.send(f"> **You don't have any conversation yet.**") + logger.exception("Bing reset failed.") + + # Switch conversation style + @app_commands.command(name="switch_style", description="Switch conversation style") + @app_commands.choices(style=[app_commands.Choice(name="Creative", value="creative"), app_commands.Choice(name="Balanced", value="balanced"), app_commands.Choice(name="Precise", value="precise")]) + async def switch_style(self, interaction: discord.Interaction, style: app_commands.Choice[str]): + await interaction.response.defer(ephemeral=True, thinking=True) + user_id = interaction.user.id + if user_id not in users_chatbot: + await init_chatbot(user_id) + user_conversation_style[user_id] = style.value + await interaction.followup.send(f"> **Info: successfull switch conversation style to {style.value}.**") + logger.warning(f"\x1b[31mConversation style has been successfully switch to {style.value}\x1b[0m") + + # Set and delete personal Bing Cookies + @app_commands.command(name="bing_cookies", description="Set or delete Bing Cookies") + @app_commands.choices(choice=[app_commands.Choice(name="set", value="set"), app_commands.Choice(name="delete", value="delete")]) + async def cookies_setting(self, interaction: discord.Interaction, choice: app_commands.Choice[str], cookies_file: Optional[discord.Attachment]=None): + await interaction.response.defer(ephemeral=True, thinking=True) + user_id = interaction.user.id + if choice.value == "set": + try: + content = json.loads(await cookies_file.read()) + for cookie in content: + if cookie.get("name") == "_U": + auth_cookie = cookie.get("value") + break + users_image_generator[user_id] = ImageGenAsync(auth_cookie, quiet=True) + users_chatbot[user_id] = UserChatbot(cookies=content) + user_conversation_style[user_id] = "balanced" + await interaction.followup.send("> **Upload successful!**") + logger.warning(f"\x1b[31m{interaction.user} set Bing Cookies successful\x1b[0m") + except: + await interaction.followup.send("> **Please upload your Bing Cookies.**") + else: + try: + del users_chatbot[user_id] + del users_image_generator[user_id] + del user_conversation_style[user_id] + await interaction.followup.send("> **Delete finish.**") + logger.warning(f"\x1b[31m{interaction.user} delete Cookies\x1b[0m") + except: + await interaction.followup.send("> **You don't have any Bing Cookies.**") + + # Create images + @app_commands.command(name="create_image", description="generate image by Bing image creator") + async def create_image(self, interaction: discord.Interaction, *, prompt: str): + user_id = interaction.user.id + if interaction.user.id not in users_chatbot: + await init_chatbot(user_id) + try: + using = await get_using_create(user_id) + except: + await set_using_create(user_id, False) + using = await get_using_create(user_id) + if not using: + logger.info(f"\x1b[31m{interaction.user}\x1b[0m : '{prompt}' ({interaction.channel}) [BingImageCreator]") + await users_chatbot[user_id].create_image(interaction, prompt, users_image_generator[user_id] ) + else: + await interaction.response.defer(ephemeral=True, thinking=True) + await interaction.followup.send("> **Please wait for your last image to create finish.**") + +async def setup(bot): + await bot.add_cog(EdgeGPT(bot)) diff --git a/apps/BingBot/cogs/event.py b/apps/BingBot/cogs/event.py new file mode 100644 index 00000000..f42b6e5b --- /dev/null +++ b/apps/BingBot/cogs/event.py @@ -0,0 +1,223 @@ +import discord +import re +import os +import json +import asyncio +from EdgeGPT.EdgeGPT import Chatbot, ConversationStyle +from dotenv import load_dotenv +from discord.ext import commands +from core.classes import Cog_Extension +from functools import partial +from src import log + +load_dotenv() + +USE_SUGGEST_RESPONSES = True +try: + MENTION_CHANNEL_ID = int(os.getenv("MENTION_CHANNEL_ID")) +except: + MENTION_CHANNEL_ID = None +logger = log.setup_logger(__name__) +sem = asyncio.Semaphore(1) +conversation_style = "balanced" + +with open("./cookies.json", encoding="utf-8") as file: + cookies = json.load(file) +chatbot = Chatbot(cookies=cookies) + +# To add suggest responses +class MyView(discord.ui.View): + def __init__(self, chatbot: Chatbot, suggest_responses:list): + super().__init__(timeout=120) + # Add buttons + for label in suggest_responses: + button = discord.ui.Button(label=label) + # Button event + async def callback(interaction: discord.Interaction, button: discord.ui.Button): + await interaction.response.defer(ephemeral=False, thinking=True) + # When click the button, all buttons will disable. + for child in self.children: + child.disabled = True + await interaction.followup.edit_message(message_id=interaction.message.id, view=self) + username = str(interaction.user) + usermessage = button.label + channel = str(interaction.channel) + logger.info(f"\x1b[31m{username}\x1b[0m : '{usermessage}' ({channel}) [Style: {conversation_style}] [button]") + task = asyncio.create_task(send_message(chatbot, interaction, usermessage)) + await asyncio.gather(task) + self.add_item(button) + self.children[-1].callback = partial(callback, button=button) +# Show Dropdown +class DropdownView(discord.ui.View): + def __init__(self): + super().__init__(timeout=180) + + options = [ + discord.SelectOption(label="Creative", description="Switch conversation style to Creative", emoji='🎨'), + discord.SelectOption(label="Balanced", description="Switch conversation style to Balanced", emoji='⚖️'), + discord.SelectOption(label="Precise", description="Switch conversation style to Precise", emoji='🔎'), + discord.SelectOption(label="Reset", description="Reset conversation", emoji="🔄") + ] + + dropdown = discord.ui.Select( + placeholder="Choose setting", + min_values=1, + max_values=1, + options=options + ) + + dropdown.callback = self.dropdown_callback + self.add_item(dropdown) + # Dropdown event + async def dropdown_callback(self, interaction: discord.Interaction): + await interaction.response.defer(ephemeral=False, thinking=True) + if interaction.data['values'][0] == "Creative": + await set_conversation_style("creative") + await interaction.followup.send(f"> **Info: successfull switch conversation style to *{interaction.data['values'][0]}*.**") + logger.warning(f"\x1b[31mConversation style has been successfully switch to {interaction.data['values'][0]}\x1b[0m") + elif interaction.data['values'][0] == "Balanced": + await set_conversation_style("balanced") + await interaction.followup.send(f"> **Info: successfull switch conversation style to *{interaction.data['values'][0]}*.**") + logger.warning(f"\x1b[31mConversation style has been successfully switch to {interaction.data['values'][0]}\x1b[0m") + elif interaction.data['values'][0] == "Precise": + await set_conversation_style("precise") + await interaction.followup.send(f"> **Info: successfull switch conversation style to *{interaction.data['values'][0]}*.**") + logger.warning(f"\x1b[31mConversation style has been successfully switch to {interaction.data['values'][0]}\x1b[0m") + else: + await chatbot.reset() + await interaction.followup.send(f"> **Info: Reset finish.**") + logger.warning("\x1b[31mBing has been successfully reset\x1b[0m") + # disable dropdown after select + for dropdown in self.children: + dropdown.disabled = True + await interaction.followup.edit_message(message_id=interaction.message.id, view=self) + +# Set conversation style +async def set_conversation_style(style: str): + global conversation_style + conversation_style = style +async def set_chatbot(cookies): + global chatbot + chatbot = Chatbot(cookies=cookies) + +async def send_message(chatbot: Chatbot, message, user_message: str): + async with sem: + if isinstance(message, discord.message.Message): + await message.channel.typing() + reply = '' + text = '' + link_embed = '' + images_embed = [] + all_url = [] + try: + # Change conversation style + if conversation_style == "creative": + reply = await chatbot.ask(prompt=user_message, conversation_style=ConversationStyle.creative, simplify_response=True) + elif conversation_style == "precise": + reply = await chatbot.ask(prompt=user_message, conversation_style=ConversationStyle.precise, simplify_response=True) + else: + reply = await chatbot.ask(prompt=user_message, conversation_style=ConversationStyle.balanced, simplify_response=True) + + # Get reply text + text = f"{reply['text']}" + text = re.sub(r'\[\^(\d+)\^\]', lambda match: '', text) + + # Get the URL, if available + try: + if len(reply['sources']) != 0: + for i, url in enumerate(reply['sources'], start=1): + if len(url['providerDisplayName']) == 0: + all_url.append(f"{i}. {url['seeMoreUrl']}") + else: + all_url.append(f"{i}. [{url['providerDisplayName']}]({url['seeMoreUrl']})") + link_text = "\n".join(all_url) + link_embed = discord.Embed(description=link_text) + except: + pass + + # Set the final message + if isinstance(message, discord.interactions.Interaction): + user_message = user_message.replace("\n", "") + ask = f"> **{user_message}**\t(***style: {conversation_style}***)\n\n" + response = f"{ask}{text}" + else: + response = f"{text}\t(***style: {conversation_style}***)" + + # Discord limit about 2000 characters for a message + while len(response) > 2000: + temp = response[:2000] + response = response[2000:] + if isinstance(message, discord.interactions.Interaction): + await message.followup.send(temp) + else: + await message.channel.send(temp) + + # Get the image, if available + try: + if len(link_embed) == 0: + all_image = re.findall("https?://[\w\./]+", str(reply["sources_text"])) + [images_embed.append(discord.Embed(url="https://www.bing.com/").set_image(url=image_link)) for image_link in all_image] + except: + pass + + if USE_SUGGEST_RESPONSES: + suggest_responses = reply["suggestions"] + if images_embed: + if isinstance(message, discord.interactions.Interaction): + await message.followup.send(response, view=MyView(chatbot, suggest_responses), embeds=images_embed, wait=True) + else: + await message.channel.send(response, view=MyView(chatbot, suggest_responses), embeds=images_embed) + elif link_embed: + if isinstance(message, discord.interactions.Interaction): + await message.followup.send(response, view=MyView(chatbot, suggest_responses), embed=link_embed, wait=True) + else: + await message.channel.send(response, view=MyView(chatbot, suggest_responses), embed=link_embed) + else: + if isinstance(message, discord.interactions.Interaction): + await message.followup.send(response, view=MyView(chatbot, suggest_responses), wait=True) + else: + await message.channel.send(response, view=MyView(chatbot, suggest_responses)) + else: + if images_embed: + if isinstance(message, discord.interactions.Interaction): + await message.followup.send(response, embeds=images_embed, wait=True) + else: + await message.channel.send(response, embeds=images_embed) + elif link_embed: + if isinstance(message, discord.interactions.Interaction): + await message.followup.send(response, embed=link_embed, wait=True) + else: + await message.channel.send(response, embed=link_embed) + else: + if isinstance(message, discord.interactions.Interaction): + await message.followup.send(response, wait=True) + else: + await message.channel.send(response) + except Exception as e: + if isinstance(message, discord.interactions.Interaction): + await message.followup.send(f">>> **Error: {e}**") + else: + await message.channel.send(f">>> **Error: {e}**") + logger.exception(f"Error while sending message: {e}") + +class Event(Cog_Extension): + @commands.Cog.listener() + async def on_message(self, message: discord.Message): + if message.author == self.bot.user: + return + if self.bot.user in message.mentions: + if not MENTION_CHANNEL_ID or message.channel.id == MENTION_CHANNEL_ID: + content = re.sub(r'<@.*?>', '', message.content).strip() + if len(content) > 0: + username = str(message.author) + channel = str(message.channel) + logger.info(f"\x1b[31m{username}\x1b[0m : '{content}' ({channel}) [Style: {conversation_style}]") + task = asyncio.create_task(send_message(chatbot, message, content)) + await asyncio.gather(task) + else: + await message.channel.send(view=DropdownView()) + elif MENTION_CHANNEL_ID is not None: + await message.channel.send(f"> **Can only be mentioned at <#{self.bot.get_channel(MENTION_CHANNEL_ID).id}>**") + +async def setup(bot): + await bot.add_cog(Event(bot)) diff --git a/apps/BingBot/cogs/help.py b/apps/BingBot/cogs/help.py new file mode 100644 index 00000000..16ecff78 --- /dev/null +++ b/apps/BingBot/cogs/help.py @@ -0,0 +1,17 @@ +import discord +from core.classes import Cog_Extension +from discord import app_commands + +class Help(Cog_Extension): + @app_commands.command(name = "help", description = "Show how to use") + async def help(self, interaction: discord.Interaction): + embed=discord.Embed(title="Help", description="[see more](https://github.com/FuseFairy/DiscordBot-EdgeGPT/blob/main/README.md)\n\n**COMMANDS -**") + embed.add_field(name="/bing_cookies", value="Set and delete your Bing Cookies.", inline=False) + embed.add_field(name="/bing", value="Chat with Bing.", inline=False) + embed.add_field(name="/reset", value="Reset your Bing conversation.", inline=False) + embed.add_field(name="/switch_style", value="Switch your Bing conversation style.", inline=False) + embed.add_field(name="/create_image", value="Generate image by Bing Image Creator.", inline=False) + await interaction.response.send_message(embed=embed) + +async def setup(bot): + await bot.add_cog(Help(bot)) \ No newline at end of file diff --git a/apps/BingBot/compose.yaml b/apps/BingBot/compose.yaml new file mode 100644 index 00000000..f574f912 --- /dev/null +++ b/apps/BingBot/compose.yaml @@ -0,0 +1,10 @@ +version: '3' + +services: + discord_edgegpt: + build: . + environment: + - DISCORD_BOT_TOKEN=${DISCORD_BOT_TOKEN} + volumes: + - ./cookies.json:/bot/cookies.json + - ./config.yml:/bot/config.yml diff --git a/apps/BingBot/cookies.json b/apps/BingBot/cookies.json new file mode 100644 index 00000000..4d0748fc --- /dev/null +++ b/apps/BingBot/cookies.json @@ -0,0 +1,6 @@ +[ + { + "name": "cookie1", + "value": "1lEXeWRSIPUsQ0S3tdAc3v7BexGK2qBlzsXz8j52w_HNBoOsegjiwRySQHmfoWduHVUxSXo6cETPP2qNrYWAz6k7wn43WGO9i7ll9_Wl7M6HA2c9twbKByfAtAB5fr26wPawQ6y1GCdakD_Kr4xdD20fvkytnmOmZu7Ktnb9mUVE605AAbJcIA9SOlRN5410ZPOnZA1cIzr4WtAFWNfQKPG6Sxk_zO5zvXQfYTyMNmOI" + } +] diff --git a/apps/BingBot/core/classes.py b/apps/BingBot/core/classes.py new file mode 100644 index 00000000..8dfdb114 --- /dev/null +++ b/apps/BingBot/core/classes.py @@ -0,0 +1,5 @@ +from discord.ext import commands + +class Cog_Extension(commands.Cog): + def __init__(self, bot): + self.bot = bot diff --git a/apps/BingBot/requirements.txt b/apps/BingBot/requirements.txt new file mode 100644 index 00000000..73773a31 --- /dev/null +++ b/apps/BingBot/requirements.txt @@ -0,0 +1,4 @@ +discord.py==2.3.2 +python-dotenv==0.21.1 +PyYAML==6.0 +EdgeGPT==0.13.2 diff --git a/apps/BingBot/src/imageCreate.py b/apps/BingBot/src/imageCreate.py new file mode 100644 index 00000000..b88d1d4b --- /dev/null +++ b/apps/BingBot/src/imageCreate.py @@ -0,0 +1,31 @@ +import discord +import asyncio +from src import log + +logger = log.setup_logger(__name__) +using_func = {} + +async def get_using_create(user_id): + return using_func[user_id] +async def set_using_create(user_id, status: bool): + using_func[user_id] = status + +async def create_image(interaction: discord.Interaction, prompt: str, image_generator): + await interaction.response.defer(ephemeral=False, thinking=True) + using_func[interaction.user.id] = True + try: + embeds = [] + prompts = f"> **{prompt}** - <@{str(interaction.user.id)}> (***BingImageCreator***)\n\n" + # Fetches image links + images = await image_generator.get_images(prompt) + # Add embed to list of embeds + [embeds.append(discord.Embed(url="https://www.bing.com/").set_image(url=image_link)) for image_link in images] + await interaction.followup.send(prompts, embeds=embeds, wait=True) + except asyncio.TimeoutError: + await interaction.followup.send("> **Error: Request timed out.**") + logger.exception("Error while create image: Request timed out.") + except Exception as e: + await interaction.followup.send(f"> **Error: {e}**") + logger.exception(f"Error while create image: {e}") + finally: + using_func[interaction.user.id] = False \ No newline at end of file diff --git a/apps/BingBot/src/log.py b/apps/BingBot/src/log.py new file mode 100644 index 00000000..fba4e94d --- /dev/null +++ b/apps/BingBot/src/log.py @@ -0,0 +1,67 @@ +import os +import logging +import logging.handlers + + +class CustomFormatter(logging.Formatter): + + LEVEL_COLORS = [ + (logging.DEBUG, '\x1b[40;1m'), + (logging.INFO, '\x1b[34;1m'), + (logging.WARNING, '\x1b[33;1m'), + (logging.ERROR, '\x1b[31m'), + (logging.CRITICAL, '\x1b[41m'), + ] + FORMATS = { + level: logging.Formatter( + f'\x1b[30;1m%(asctime)s\x1b[0m {color}%(levelname)-8s\x1b[0m \x1b[35m%(name)s\x1b[0m -> %(message)s', + '%Y-%m-%d %H:%M:%S' + ) + for level, color in LEVEL_COLORS + } + + def format(self, record): + formatter = self.FORMATS.get(record.levelno) + if formatter is None: + formatter = self.FORMATS[logging.DEBUG] + + # Override the traceback to always print in red + if record.exc_info: + text = formatter.formatException(record.exc_info) + record.exc_text = f'\x1b[31m{text}\x1b[0m' + + output = formatter.format(record) + + # Remove the cache layer + record.exc_text = None + return output + + +def setup_logger(module_name:str) -> logging.Logger: + # create logger + library, _, _ = module_name.partition('.py') + logger = logging.getLogger(library) + logger.setLevel(logging.INFO) + + if not logger.handlers: + # create console handler + console_handler = logging.StreamHandler() + console_handler.setLevel(logging.INFO) + console_handler.setFormatter(CustomFormatter()) + # specify that the log file path is the same as `main.py` file path + grandparent_dir = os.path.abspath(__file__ + "/../../") + log_name='discord_bot.log' + log_path = os.path.join(grandparent_dir, log_name) + # create local log handler + log_handler = logging.handlers.RotatingFileHandler( + filename=log_path, + encoding='utf-8', + maxBytes=32 * 1024 * 1024, # 32 MiB + backupCount=2, # Rotate through 5 files + ) + log_handler.setFormatter(CustomFormatter()) + # Add handlers to logger + logger.addHandler(log_handler) + logger.addHandler(console_handler) + + return logger diff --git a/apps/BingBot/src/response.py b/apps/BingBot/src/response.py new file mode 100644 index 00000000..371622b8 --- /dev/null +++ b/apps/BingBot/src/response.py @@ -0,0 +1,117 @@ +import discord +import re +from EdgeGPT.EdgeGPT import Chatbot, ConversationStyle +from src import log +from functools import partial + +USE_SUGGEST_RESPONSES = True +logger = log.setup_logger(__name__) +using_func = {} + +# To add suggest responses +class MyView(discord.ui.View): + def __init__(self, interaction: discord.Interaction, chatbot: Chatbot, conversation_style:str, suggest_responses:list): + super().__init__(timeout=120) + self.button_author =interaction.user.id + # Add buttons + for label in suggest_responses: + button = discord.ui.Button(label=label) + # Button event + async def callback(interaction: discord.Interaction, button_author: int, button: discord.ui.Button): + if interaction.user.id != button_author: + await interaction.response.defer(ephemeral=True, thinking=True) + await interaction.followup.send("You don't have permission to press this button.") + elif not using_func[interaction.user.id]: + await interaction.response.defer(ephemeral=False, thinking=True) + # When click the button, all buttons will disable. + for child in self.children: + child.disabled = True + await interaction.followup.edit_message(message_id=interaction.message.id, view=self) + username = str(interaction.user) + usermessage = button.label + channel = str(interaction.channel) + logger.info(f"\x1b[31m{username}\x1b[0m : '{usermessage}' ({channel}) [Style: {conversation_style}] [button]") + await send_message(chatbot, interaction, usermessage, conversation_style) + else: + await interaction.response.defer(ephemeral=True, thinking=True) + await interaction.followup.send("Please wait for your last conversation to finish.") + self.add_item(button) + self.children[-1].callback = partial(callback, button_author=self.button_author, button=button) + +async def get_using_send(user_id): + return using_func[user_id] +async def set_using_send(user_id, status: bool): + using_func[user_id] = status + +async def send_message(chatbot: Chatbot, interaction: discord.Interaction, user_message: str, conversation_style: str): + using_func[interaction.user.id] = True + reply = '' + text = '' + link_embed = '' + images_embed = [] + all_url = [] + try: + # Change conversation style + if conversation_style == "creative": + reply = await chatbot.ask(prompt=user_message, conversation_style=ConversationStyle.creative, simplify_response=True) + elif conversation_style == "precise": + reply = await chatbot.ask(prompt=user_message, conversation_style=ConversationStyle.precise, simplify_response=True) + else: + reply = await chatbot.ask(prompt=user_message, conversation_style=ConversationStyle.balanced, simplify_response=True) + + # Get reply text + text = f"{reply['text']}" + text = re.sub(r'\[\^(\d+)\^\]', lambda match: '', text) + + # Get the URL, if available + try: + if len(reply['sources']) != 0: + for i, url in enumerate(reply['sources'], start=1): + if len(url['providerDisplayName']) == 0: + all_url.append(f"{i}. {url['seeMoreUrl']}") + else: + all_url.append(f"{i}. [{url['providerDisplayName']}]({url['seeMoreUrl']})") + link_text = "\n".join(all_url) + link_embed = discord.Embed(description=link_text) + except: + pass + + # Set the final message + user_message = user_message.replace("\n", "") + ask = f"> **{user_message}** - <@{str(interaction.user.id)}> (***style: {conversation_style}***)\n\n" + response = f"{ask}{text}" + + # Discord limit about 2000 characters for a message + while len(response) > 2000: + temp = response[:2000] + response = response[2000:] + await interaction.followup.send(temp) + + # Get the image, if available + try: + if len(link_embed) == 0: + all_image = re.findall("https?://[\w\./]+", str(reply["sources_text"])) + [images_embed.append(discord.Embed(url="https://www.bing.com/").set_image(url=image_link)) for image_link in all_image] + except: + pass + # Add all suggest responses in list + if USE_SUGGEST_RESPONSES: + suggest_responses = reply["suggestions"] + if images_embed: + await interaction.followup.send(response, view=MyView(interaction, chatbot, conversation_style, suggest_responses), embeds=images_embed, wait=True) + elif link_embed: + await interaction.followup.send(response, view=MyView(interaction, chatbot, conversation_style, suggest_responses), embed=link_embed, wait=True) + else: + await interaction.followup.send(response, view=MyView(interaction, chatbot, conversation_style, suggest_responses), wait=True) + else: + if images_embed: + await interaction.followup.send(response, embeds=images_embed, wait=True) + elif link_embed: + await interaction.followup.send(response, embed=link_embed, wait=True) + else: + await interaction.followup.send(response, wait=True) + except Exception as e: + await interaction.followup.send(f">>> **Error: {e}**") + logger.exception(f"Error while sending message: {e}") + finally: + using_func[interaction.user.id] = False diff --git a/apps/discord.py b/apps/discord.py index aa07f4e5..eebc48c8 100644 --- a/apps/discord.py +++ b/apps/discord.py @@ -1,17 +1,117 @@ -import os -import asyncio -import dalle3 import discord -import responses -from invoke import Executor -from dotenv import load_dotenv from discord.ext import commands +import asyncio +import os +from dotenv import load_dotenv +from invoke import Executor + +class BotCommands(commands.Cog): + def __init__(self, bot): + self.bot = bot + + @commands.command() + async def greet(self, ctx): + """greets the user.""" + await ctx.send(f"hello, {ctx.author.name}!") + + @commands.command() + async def help_me(self, ctx): + """provides a list of commands and their descriptions.""" + help_text = """ + - `!greet`: greets you. + - `!run [description]`: generates a video based on the given description. + - `!help_me`: provides this list of commands and their descriptions. + """ + await ctx.send(help_text) + + @commands.command() + async def join(self, ctx): + """joins the voice channel that the user is in.""" + if ctx.author.voice: + channel = ctx.author.voice.channel + await channel.connect() + else: + await ctx.send("you are not in a voice channel!") + + @commands.command() + async def leave(self, ctx): + """leaves the voice channel that the self.bot is in.""" + if ctx.voice_client: + await ctx.voice_client.disconnect() + else: + await ctx.send("i am not in a voice channel!") + + @commands.command() + async def listen(self, ctx): + """starts listening to voice in the voice channel that the bot is in.""" + if ctx.voice_client: + # create a wavesink to record the audio + sink = discord.sinks.wavesink('audio.wav') + # start recording + ctx.voice_client.start_recording(sink) + await ctx.send("started listening and recording.") + else: + await ctx.send("i am not in a voice channel!") + + @commands.command() + async def generate_image(self, ctx, *, prompt: str = None, imggen: str = None): + """generates images based on the provided prompt""" + await ctx.send(f"generating images for prompt: `{prompt}`...") + loop = asyncio.get_event_loop() + + # initialize a future object for the dalle instance + future = loop.run_in_executor(Executor, imggen, prompt) + + try: + # wait for the dalle request to complete, with a timeout of 60 seconds + await asyncio.wait_for(future, timeout=300) + print("done generating images!") + + # list all files in the save_directory + all_files = [os.path.join(root, file) for root, _, files in os.walk(os.environ("SAVE_DIRECTORY")) for file in files] + + # sort files by their creation time (latest first) + sorted_files = sorted(all_files, key=os.path.getctime, reverse=True) + + # get the 4 most recent files + latest_files = sorted_files[:4] + print(f"sending {len(latest_files)} images to discord...") + + # send all the latest images in a single message + # storage_service = os.environ("STORAGE_SERVICE") # "https://storage.googleapis.com/your-bucket-name/ + # await ctx.send(files=[storage_service.upload(filepath) for filepath in latest_files]) + + except asyncio.timeouterror: + await ctx.send("the request took too long! it might have been censored or you're out of boosts. please try entering the prompt again.") + except Exception as e: + await ctx.send(f"an error occurred: {e}") + + @commands.command() + async def send_text(self, ctx, *, text: str, use_agent: bool = True): + """sends the provided text to the worker and returns the response""" + if use_agent: + response = self.bot.agent.run(text) + else: + response = self.bot.llm(text) + await ctx.send(response) + + @commands.Cog.listener() + async def on_ready(self): + print(f"we have logged in as {self.bot.user}") + + @commands.Cog.listener() + async def on_command_error(self, ctx, error): + """handles errors that occur while executing commands.""" + if isinstance(error, commands.CommandNotFound): + await ctx.send("that command does not exist!") + else: + await ctx.send(f"an error occurred: {error}") class Bot: - def __init__(self, agent, llm, command_prefix="!"): + def __init__(self, llm, command_prefix="!"): load_dotenv() - intents = discord.intents.default() + intents = discord.Intents.default() intents.messages = True intents.guilds = True intents.voice_states = True @@ -19,119 +119,12 @@ class Bot: # setup self.llm = llm - self.agent = agent - self. bot = commands.bot(command_prefix="!", intents=intents) + self.bot = commands.Bot(command_prefix="!", intents=intents) self.discord_token = os.getenv("DISCORD_TOKEN") self.storage_service = os.getenv("STORAGE_SERVICE") + # Load the BotCommands cog + self.bot.add_cog(BotCommands(self.bot)) - @self.bot.event - async def on_ready(): - print(f"we have logged in as {self.bot.user}") - - - @self.bot.command() - async def greet(ctx): - """greets the user.""" - await ctx.send(f"hello, {ctx.author.name}!") - - - @self.bot.command() - async def help_me(ctx): - """provides a list of commands and their descriptions.""" - help_text = """ - - `!greet`: greets you. - - `!run [description]`: generates a video based on the given description. - - `!help_me`: provides this list of commands and their descriptions. - """ - await ctx.send(help_text) - - @self.bot.event - async def on_command_error(ctx, error): - """handles errors that occur while executing commands.""" - if isinstance(error, commands.commandnotfound): - await ctx.send("that command does not exist!") - else: - await ctx.send(f"an error occurred: {error}") - - @self.bot.command() - async def join(ctx): - """joins the voice channel that the user is in.""" - if ctx.author.voice: - channel = ctx.author.voice.channel - await channel.connect() - else: - await ctx.send("you are not in a voice channel!") - - @self.bot.command() - async def leave(ctx): - """leaves the voice channel that the self.bot is in.""" - if ctx.voice_client: - await ctx.voice_client.disconnect() - else: - await ctx.send("i am not in a voice channel!") - - # voice_transcription.py - @self.bot.command() - async def listen(ctx): - """starts listening to voice in the voice channel that the bot is in.""" - if ctx.voice_client: - # create a wavesink to record the audio - sink = discord.sinks.wavesink('audio.wav') - # start recording - ctx.voice_client.start_recording(sink) - await ctx.send("started listening and recording.") - else: - await ctx.send("i am not in a voice channel!") - - # image_generator.py - @self.bot.command() - async def generate_image(ctx, *, prompt: str = None, imggen: str = None): - """generates images based on the provided prompt""" - await ctx.send(f"generating images for prompt: `{prompt}`...") - loop = asyncio.get_event_loop() - - # initialize a future object for the dalle instance - future = loop.run_in_executor(Executor, imggen, prompt) - - try: - # wait for the dalle request to complete, with a timeout of 60 seconds - await asyncio.wait_for(future, timeout=300) - print("done generating images!") - - # list all files in the save_directory - all_files = [os.path.join(root, file) for root, _, files in os.walk(os.environ("SAVE_DIRECTORY")) for file in files] - - # sort files by their creation time (latest first) - sorted_files = sorted(all_files, key=os.path.getctime, reverse=True) - - # get the 4 most recent files - latest_files = sorted_files[:4] - print(f"sending {len(latest_files)} images to discord...") - - # send all the latest images in a single message - # storage_service = os.environ("STORAGE_SERVICE") # "https://storage.googleapis.com/your-bucket-name/ - # await ctx.send(files=[storage_service.upload(filepath) for filepath in latest_files]) - - except asyncio.timeouterror: - await ctx.send("the request took too long! it might have been censored or you're out of boosts. please try entering the prompt again.") - except Exception as e: - await ctx.send(f"an error occurred: {e}") - - @self.bot.command() - async def send_text(ctx, *, text: str, use_agent: bool = True): - """sends the provided text to the worker and returns the response""" - if use_agent: - response = self.agent.run(text) - else: - response = self.llm.__call__(text) - await ctx.send(response) - - def add_command(self, name, func): - @self.bot.command() - async def command(ctx, *args): - reponse = func(*args) - await ctx.send(responses) - -def run(self) : - self.bot.run("DISCORD_TOKEN") + def run(self): + self.bot.run(self.discord_token) diff --git a/playground/models/bingchat.py b/bingchat.py similarity index 64% rename from playground/models/bingchat.py rename to bingchat.py index 6e44cbb5..f4b91cd7 100644 --- a/playground/models/bingchat.py +++ b/bingchat.py @@ -10,10 +10,10 @@ cookie = os.environ.get("BING_COOKIE") auth = os.environ.get("AUTH_COOKIE") # Use the worker to process a task -task = "hi" -# img_task = "Sunset over mountains" +# task = "hi" +img_task = "Sunset over mountains" -response = edgegpt(task) -# response = edgegpt.create_img(auth_cookie=cookie,auth_cookie_SRCHHPGUSR=auth,prompt=img_task) +# response = edgegpt(task) +response = edgegpt.create_img(auth_cookie=cookie,auth_cookie_SRCHHPGUSR=auth,prompt=img_task) print(response) diff --git a/playground/apps/bing_discord.py b/playground/apps/bing_discord.py index fc179d1c..d35253ff 100644 --- a/playground/apps/bing_discord.py +++ b/playground/apps/bing_discord.py @@ -3,12 +3,13 @@ from swarms.models.bing_chat import BingChat from apps.discord import Bot from dotenv import load_dotenv +load_dotenv() # Initialize the EdgeGPTModel cookie = os.environ.get("BING_COOKIE") auth = os.environ.get("AUTH_COOKIE") -bing = BingChat(cookies_path="./cookies.txt", bing_cookie=cookie, auth_cookie=auth) +bing = BingChat(cookies_path="./cookies.json") -bot = Bot(llm=bing, cookie=cookie, auth=auth) -bot.generate_image(imggen=bing.create_img()) +bot = Bot(llm=bing) +bot.generate_image(imggen=bing.create_img(auth_cookie=cookie, auth_cookie_SRCHHPGUSR=auth)) bot.send_text(use_agent=False) diff --git a/swarms/models/bing_chat.py b/swarms/models/bing_chat.py index cb90f97e..3ded87cd 100644 --- a/swarms/models/bing_chat.py +++ b/swarms/models/bing_chat.py @@ -58,7 +58,7 @@ class BingChat: images = image_generator.get_images(prompt) image_generator.save_images(images, output_dir=output_dir) - return Path(output_dir) / images[0]["path"] + return Path(output_dir) / images[0] @staticmethod def set_cookie_dir_path(path: str): From bfc93b1988c482acee55b3a62c04c13819848da2 Mon Sep 17 00:00:00 2001 From: Zack Date: Tue, 24 Oct 2023 12:40:21 -0500 Subject: [PATCH 03/13] feat: Add MythGen --- apps/MythGen/.env.example | 2 + apps/MythGen/LICENSE | 21 +++++++++ apps/MythGen/README.md | 71 +++++++++++++++++++++++++++++ apps/MythGen/cookies.json | 6 +++ apps/MythGen/main.py | 85 +++++++++++++++++++++++++++++++++++ apps/MythGen/myth.py | 62 +++++++++++++++++++++++++ apps/MythGen/requirements.txt | 8 ++++ 7 files changed, 255 insertions(+) create mode 100644 apps/MythGen/.env.example create mode 100644 apps/MythGen/LICENSE create mode 100644 apps/MythGen/README.md create mode 100644 apps/MythGen/cookies.json create mode 100644 apps/MythGen/main.py create mode 100644 apps/MythGen/myth.py create mode 100644 apps/MythGen/requirements.txt diff --git a/apps/MythGen/.env.example b/apps/MythGen/.env.example new file mode 100644 index 00000000..95580b46 --- /dev/null +++ b/apps/MythGen/.env.example @@ -0,0 +1,2 @@ +OPENAI_API_KEY="YOUR_API_KEY" +DALLE_COOKIE="YOUR_COOKIE" diff --git a/apps/MythGen/LICENSE b/apps/MythGen/LICENSE new file mode 100644 index 00000000..d8e9f005 --- /dev/null +++ b/apps/MythGen/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 pliny + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/apps/MythGen/README.md b/apps/MythGen/README.md new file mode 100644 index 00000000..ef684287 --- /dev/null +++ b/apps/MythGen/README.md @@ -0,0 +1,71 @@ +MythGen: A Dynamic New Art Form +Overview + +![panel_2](https://github.com/elder-plinius/MythGen/assets/133052465/86bb5784-845b-4db8-a38f-217169ea5201) + + +MythGen is an Iterative Multimedia Generator that allows users to create their own comic stories based on textual prompts. The system integrates state-of-the-art language and image models to provide a seamless and creative experience. +Features + + Initial Prompting: Kick-start your story with an initial text prompt. + Artistic Style Suffix: Maintain a consistent artistic style throughout your comic. + Image Generation: Generate captivating comic panels based on textual captions. + Caption Generation: Produce engaging captions for each comic panel. + Interactive Story Building: Select your favorite panels and captions to build your story iteratively. + Storyboard: View the sequence of your selected panels and their associated captions. + State Management: Keep track of the current state of your comic generation process. + User-Friendly Interface: Easy-to-use interface built on Gradio. + +Prerequisites +OpenAI API Key + +You will need an OpenAI API key to access GPT-3 for generating captions. Follow these steps to obtain one: + + Visit OpenAI's Developer Dashboard. + Sign up for an API key and follow the verification process. + Once verified, you will be provided with an API key. + +Bing Image Creator Cookie + +You should obtain your cookie to run this program. Follow these steps to obtain your cookie: + + Go to Bing Image Creator in your browser and log in to your account. + Press Ctrl+Shift+J to open developer tools. + Navigate to the Application section. + Click on the Cookies section. + Find the variable _U and copy its value. + +How to Use + + Initial Prompt: Start by inputting your initial comic concept. + Select a Panel: Choose your favorite panel and caption from the generated options. + Iterate: Use the "Next Part" button to generate the next part of your comic based on your latest selection. + View Storyboard: See your selected comic panels and captions in a storyboard for a comprehensive view of your comic. + Finalize: Continue this process until you've created your full comic story. + +Installation + +bash + +pip install -r requirements.txt + +Running MythGen + +bash + +python main.py + +This will launch the Gradio interface where you can interact with MythGen. +Dependencies + + Python 3.x + Gradio + OpenAI's GPT-3 + DALL-E + +Contributing + +We welcome contributions! Please read the CONTRIBUTING.md for guidelines on how to contribute to this project. +License + +This project is licensed under the MIT License. See LICENSE.md for details. diff --git a/apps/MythGen/cookies.json b/apps/MythGen/cookies.json new file mode 100644 index 00000000..4d0748fc --- /dev/null +++ b/apps/MythGen/cookies.json @@ -0,0 +1,6 @@ +[ + { + "name": "cookie1", + "value": "1lEXeWRSIPUsQ0S3tdAc3v7BexGK2qBlzsXz8j52w_HNBoOsegjiwRySQHmfoWduHVUxSXo6cETPP2qNrYWAz6k7wn43WGO9i7ll9_Wl7M6HA2c9twbKByfAtAB5fr26wPawQ6y1GCdakD_Kr4xdD20fvkytnmOmZu7Ktnb9mUVE605AAbJcIA9SOlRN5410ZPOnZA1cIzr4WtAFWNfQKPG6Sxk_zO5zvXQfYTyMNmOI" + } +] diff --git a/apps/MythGen/main.py b/apps/MythGen/main.py new file mode 100644 index 00000000..b5119b36 --- /dev/null +++ b/apps/MythGen/main.py @@ -0,0 +1,85 @@ +import openai +import os +import dotenv +import logging +import gradio as gr +from dalle3 import Dalle +from bing_chat import BingChat + +# from swarms.models.bingchat import BingChat + +dotenv.load_dotenv(".env") + +# Initialize the EdgeGPTModel +openai_api_key = os.getenv("OPENAI_API_KEY") +model = BingChat(cookie_path = "./cookies.json") +cookie = os.environ.get("BING_COOKIE") +auth = os.environ.get("AUTH_COOKIE") + + + +response = model("Generate") + +# Initialize DALLE3 API +cookie = os.getenv("DALLE_COOKIE") +dalle = Dalle(cookie) + +logging.basicConfig(level=logging.INFO) + +accumulated_story = "" +latest_caption = "" +standard_suffix = "" +storyboard = [] + +def generate_images_with_dalle(caption): + model.create_img(auth_cookie=cookie,auth_cookie_SRCHHPGUSR=auth,prompt=caption) + urls = dalle.get_urls() + return urls + +def generate_single_caption(text): + prompt = f"A comic about {text}." + response = model(prompt) + return response + +def interpret_text_with_gpt(text, suffix): + return generate_single_caption(f"{text} {suffix}") + +def create_standard_suffix(original_prompt): + return f"In the style of {original_prompt}" + +def gradio_interface(text=None, next_button_clicked=False): + global accumulated_story, latest_caption, standard_suffix, storyboard + + if not standard_suffix: + standard_suffix = create_standard_suffix(text) + + if next_button_clicked: + new_caption = interpret_text_with_gpt(latest_caption, standard_suffix) + new_urls = generate_images_with_dalle(new_caption) + latest_caption = new_caption + storyboard.append((new_urls, new_caption)) + + elif text: + caption = interpret_text_with_gpt(text, standard_suffix) + comic_panel_urls = generate_images_with_dalle(caption) + latest_caption = caption + storyboard.append((comic_panel_urls, caption)) + + storyboard_html = "" + for urls, cap in storyboard: + for url in urls: + storyboard_html += f'{cap}
{cap}
' + + return storyboard_html + +if __name__ == "__main__": + iface = gr.Interface( + fn=gradio_interface, + inputs=[ + gr.inputs.Textbox(default="Type your story concept here", optional=True, label="Story Concept"), + gr.inputs.Checkbox(label="Generate Next Part") + ], + outputs=[gr.outputs.HTML()], + live=False # Submit button will appear + ) + iface.launch() diff --git a/apps/MythGen/myth.py b/apps/MythGen/myth.py new file mode 100644 index 00000000..760e3b89 --- /dev/null +++ b/apps/MythGen/myth.py @@ -0,0 +1,62 @@ +from flask import Flask, request, jsonify +import openai +import logging +from dalle3 import Dalle +import os +import gradio as gr +import requests +from PIL import Image +from io import BytesIO +import dotenv + +app = Flask(__name__) + +dotenv.load_dotenv(".env") + +# Initialize OpenAI API, INPUT YOUR OWN OPENAI KEY +openai.api_key = os.getenv("OPENAI_API_KEY") + +# Initialize DALLE3 API, INPUT YOUR OWN COOKIE +cookie = os.getenv("DALLE_COOKIE") +dalle = Dalle(cookie) + + +def interpret_text_with_gpt(text): + model_engine = "text-davinci-002" + panel_instructions = "Create a comic panel where" + refined_prompt = f"{panel_instructions} {text}" + + response = openai.Completion.create( + engine=model_engine, + prompt=refined_prompt, + max_tokens=100 + ) + + final_prompt = response.choices[0].text.strip() + return final_prompt + +def generate_images_with_dalle(refined_prompt): + dalle.create(refined_prompt) + urls = dalle.get_urls() + return urls + +def gradio_interface(text): + refined_prompt = interpret_text_with_gpt(text) + comic_panel_urls = generate_images_with_dalle(refined_prompt) + + output = [] + for i, url in enumerate(comic_panel_urls): + response = requests.get(url) + img = Image.open(BytesIO(response.content)) + caption = f"Caption for panel {i+1}" + output.append((img, caption)) + + return output + +iface = gr.Interface( + fn=gradio_interface, + inputs=["text"], + outputs=[gr.outputs.Image(type="pil", label="Comic Panels"), "text"] +) + +iface.launch() diff --git a/apps/MythGen/requirements.txt b/apps/MythGen/requirements.txt new file mode 100644 index 00000000..e1fda005 --- /dev/null +++ b/apps/MythGen/requirements.txt @@ -0,0 +1,8 @@ +dalle3==0.0.7 +Flask==2.3.2 +gradio==3.48.0 +openai==0.28.1 +Pillow==10.1.0 +python-dotenv==1.0.0 +Requests==2.31.0 +swarms==1.8.2 From 0ac5b549310189275a9296cbe22ad05f3c99ac43 Mon Sep 17 00:00:00 2001 From: Zack Date: Tue, 24 Oct 2023 18:00:42 -0500 Subject: [PATCH 04/13] feat: add mythgen, gradio bot --- .gitignore | 1 + apps/GradioBot/main.py | 197 ++++++++++++++++++++++++ apps/GradioBot/utils.py | 41 +++++ apps/MythGen/LICENSE | 21 --- apps/MythGen/main.py | 32 ++-- apps/MythGen/myth.py | 62 -------- apps/open-sourcerer/docker-compose.yaml | 1 + apps/orchistrator/docker-compose.yml | 6 +- swarms/models/bing_chat.py | 2 +- 9 files changed, 257 insertions(+), 106 deletions(-) create mode 100644 apps/GradioBot/main.py create mode 100644 apps/GradioBot/utils.py delete mode 100644 apps/MythGen/LICENSE delete mode 100644 apps/MythGen/myth.py diff --git a/.gitignore b/.gitignore index 92dd6c81..2fa95dc2 100644 --- a/.gitignore +++ b/.gitignore @@ -49,6 +49,7 @@ share/python-wheels/ MANIFEST output/* cookes.json +flagged/* # PyInstaller # Usually these files are written by a python script from a template diff --git a/apps/GradioBot/main.py b/apps/GradioBot/main.py new file mode 100644 index 00000000..fb985531 --- /dev/null +++ b/apps/GradioBot/main.py @@ -0,0 +1,197 @@ +import asyncio +import argparse +from collections import Counter +import json +import pathlib +import re + + +import discord +from discord.ext import commands +import gradio as gr +from gradio import utils +import requests + +from typing import Dict, List + +from utils import * + + +lock = asyncio.Lock() + +bot = commands.Bot("", intents=discord.Intents(messages=True, guilds=True)) + + +GUILD_SPACES_FILE = "guild_spaces.pkl" + + +if pathlib.Path(GUILD_SPACES_FILE).exists(): + guild_spaces = read_pickle_file(GUILD_SPACES_FILE) + assert isinstance(guild_spaces, dict), f"{GUILD_SPACES_FILE} in invalid format." + guild_blocks = {} + delete_keys = [] + for k, v in guild_spaces.items(): + try: + guild_blocks[k] = gr.Interface.load(v, src="spaces") + except ValueError: + delete_keys.append(k) + for k in delete_keys: + del guild_spaces[k] +else: + guild_spaces: Dict[int, str] = {} + guild_blocks: Dict[int, gr.Blocks] = {} + + +HASHED_USERS_FILE = "users.pkl" + +if pathlib.Path(HASHED_USERS_FILE).exists(): + hashed_users = read_pickle_file(HASHED_USERS_FILE) + assert isinstance(hashed_users, list), f"{HASHED_USERS_FILE} in invalid format." +else: + hashed_users: List[str] = [] + + +@bot.event +async def on_ready(): + print(f"Logged in as {bot.user}") + print(f"Running in {len(bot.guilds)} servers...") + + +async def run_prediction(space: gr.Blocks, *inputs): + inputs = list(inputs) + fn_index = 0 + processed_inputs = space.serialize_data(fn_index=fn_index, inputs=inputs) + batch = space.dependencies[fn_index]["batch"] + + if batch: + processed_inputs = [[inp] for inp in processed_inputs] + + outputs = await space.process_api( + fn_index=fn_index, inputs=processed_inputs, request=None, state={} + ) + outputs = outputs["data"] + + if batch: + outputs = [out[0] for out in outputs] + + processed_outputs = space.deserialize_data(fn_index, outputs) + processed_outputs = utils.resolve_singleton(processed_outputs) + + return processed_outputs + + +async def display_stats(message: discord.Message): + await message.channel.send( + f"Running in {len(bot.guilds)} servers\n" + f"Total # of users: {len(hashed_users)}\n" + f"------------------" + ) + await message.channel.send(f"Most popular spaces:") + # display the top 10 most frequently occurring strings and their counts + spaces = guild_spaces.values() + counts = Counter(spaces) + for space, count in counts.most_common(10): + await message.channel.send(f"- {space}: {count}") + + +async def load_space(guild: discord.Guild, message: discord.Message, content: str): + iframe_url = ( + requests.get(f"https://huggingface.co/api/spaces/{content}/host") + .json() + .get("host") + ) + if iframe_url is None: + return await message.channel.send( + f"Space: {content} not found. If you'd like to make a prediction, enclose the inputs in quotation marks." + ) + else: + await message.channel.send( + f"Loading Space: https://huggingface.co/spaces/{content}..." + ) + interface = gr.Interface.load(content, src="spaces") + guild_spaces[guild.id] = content + guild_blocks[guild.id] = interface + asyncio.create_task(update_pickle_file(guild_spaces, GUILD_SPACES_FILE)) + if len(content) > 32 - len(f"{bot.name} []"): # type: ignore + nickname = content[: 32 - len(f"{bot.name} []") - 3] + "..." # type: ignore + else: + nickname = content + nickname = f"{bot.name} [{nickname}]" # type: ignore + await guild.me.edit(nick=nickname) + await message.channel.send( + "Ready to make predictions! Type in your inputs and enclose them in quotation marks." + ) + + +async def disconnect_space(bot: commands.Bot, guild: discord.Guild): + guild_spaces.pop(guild.id, None) + guild_blocks.pop(guild.id, None) + asyncio.create_task(update_pickle_file(guild_spaces, GUILD_SPACES_FILE)) + await guild.me.edit(nick=bot.name) # type: ignore + + +async def make_prediction(guild: discord.Guild, message: discord.Message, content: str): + if guild.id in guild_spaces: + params = re.split(r' (?=")', content) + params = [p.strip("'\"") for p in params] + space = guild_blocks[guild.id] + predictions = await run_prediction(space, *params) + if isinstance(predictions, (tuple, list)): + for p in predictions: + await send_file_or_text(message.channel, p) + else: + await send_file_or_text(message.channel, predictions) + return + else: + await message.channel.send( + "No Space is currently running. Please type in the name of a Hugging Face Space name first, e.g. abidlabs/en2fr" + ) + await guild.me.edit(nick=bot.name) # type: ignore + + +@bot.event +async def on_message(message: discord.Message): + if message.author == bot.user: + return + h = hash_user_id(message.author.id) + if h not in hashed_users: + hashed_users.append(h) + asyncio.create_task(update_pickle_file(hashed_users, HASHED_USERS_FILE)) + else: + if message.content: + content = remove_tags(message.content) + guild = message.channel.guild + assert guild, "Message not sent in a guild." + + if content.strip() == "exit": + await disconnect_space(bot, guild) + elif content.strip() == "stats": + await display_stats(message) + elif content.startswith('"') or content.startswith("'"): + await make_prediction(guild, message, content) + else: + await load_space(guild, message, content) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--token", + type=str, + help="API key for the Discord bot. You can set this to your Discord token if you'd like to make your own clone of the Gradio Bot.", + required=False, + default="", + ) + args = parser.parse_args() + + if args.token.strip(): + discord_token = args.token + bot.env = "staging" # type: ignore + bot.name = "StagingBot" # type: ignore + else: + with open("secrets.json") as fp: + discord_token = json.load(fp)["discord_token"] + bot.env = "prod" # type: ignore + bot.name = "GradioBot" # type: ignore + + bot.run(discord_token) diff --git a/apps/GradioBot/utils.py b/apps/GradioBot/utils.py new file mode 100644 index 00000000..5657b36f --- /dev/null +++ b/apps/GradioBot/utils.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +import asyncio +import pickle +import hashlib +import pathlib +from typing import Dict, List + +import discord + +lock = asyncio.Lock() + + +async def update_pickle_file(data: Dict | List, file_path: str): + async with lock: + with open(file_path, "wb") as fp: + pickle.dump(data, fp) + + +def read_pickle_file(file_path: str): + with open(file_path, "rb") as fp: + return pickle.load(fp) + + +async def send_file_or_text(channel, file_or_text: str): + # if the file exists, send as a file + if pathlib.Path(str(file_or_text)).exists(): + with open(file_or_text, "rb") as f: + return await channel.send(file=discord.File(f)) + else: + return await channel.send(file_or_text) + + +def remove_tags(content: str) -> str: + content = content.replace("<@1040198143695933501>", "") + content = content.replace("<@1057338428938788884>", "") + return content.strip() + + +def hash_user_id(user_id: int) -> str: + return hashlib.sha256(str(user_id).encode("utf-8")).hexdigest() diff --git a/apps/MythGen/LICENSE b/apps/MythGen/LICENSE deleted file mode 100644 index d8e9f005..00000000 --- a/apps/MythGen/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 pliny - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/apps/MythGen/main.py b/apps/MythGen/main.py index b5119b36..96d02e3c 100644 --- a/apps/MythGen/main.py +++ b/apps/MythGen/main.py @@ -3,27 +3,19 @@ import os import dotenv import logging import gradio as gr -from dalle3 import Dalle -from bing_chat import BingChat +from BingImageCreator import ImageGen +from swarms.models.bing_chat import BingChat # from swarms.models.bingchat import BingChat - dotenv.load_dotenv(".env") # Initialize the EdgeGPTModel -openai_api_key = os.getenv("OPENAI_API_KEY") -model = BingChat(cookie_path = "./cookies.json") cookie = os.environ.get("BING_COOKIE") auth = os.environ.get("AUTH_COOKIE") - - +model = BingChat(cookies_path="./cookies.json", bing_cookie="BING_COOKIE",auth_cookie="AUTH_COOKIE") response = model("Generate") -# Initialize DALLE3 API -cookie = os.getenv("DALLE_COOKIE") -dalle = Dalle(cookie) - logging.basicConfig(level=logging.INFO) accumulated_story = "" @@ -31,14 +23,14 @@ latest_caption = "" standard_suffix = "" storyboard = [] -def generate_images_with_dalle(caption): - model.create_img(auth_cookie=cookie,auth_cookie_SRCHHPGUSR=auth,prompt=caption) - urls = dalle.get_urls() - return urls +def generate_images_with_bingchat(caption): + img_path = model.create_img(caption) + img_urls = model.images(caption) + return img_urls def generate_single_caption(text): prompt = f"A comic about {text}." - response = model(prompt) + response = model(text) return response def interpret_text_with_gpt(text, suffix): @@ -54,14 +46,14 @@ def gradio_interface(text=None, next_button_clicked=False): standard_suffix = create_standard_suffix(text) if next_button_clicked: - new_caption = interpret_text_with_gpt(latest_caption, standard_suffix) - new_urls = generate_images_with_dalle(new_caption) + new_caption = generate_single_caption(latest_caption + " " + standard_suffix) + new_urls = generate_images_with_bingchat(new_caption) latest_caption = new_caption storyboard.append((new_urls, new_caption)) elif text: - caption = interpret_text_with_gpt(text, standard_suffix) - comic_panel_urls = generate_images_with_dalle(caption) + caption = generate_single_caption(text + " " + standard_suffix) + comic_panel_urls = generate_images_with_bingchat(caption) latest_caption = caption storyboard.append((comic_panel_urls, caption)) diff --git a/apps/MythGen/myth.py b/apps/MythGen/myth.py deleted file mode 100644 index 760e3b89..00000000 --- a/apps/MythGen/myth.py +++ /dev/null @@ -1,62 +0,0 @@ -from flask import Flask, request, jsonify -import openai -import logging -from dalle3 import Dalle -import os -import gradio as gr -import requests -from PIL import Image -from io import BytesIO -import dotenv - -app = Flask(__name__) - -dotenv.load_dotenv(".env") - -# Initialize OpenAI API, INPUT YOUR OWN OPENAI KEY -openai.api_key = os.getenv("OPENAI_API_KEY") - -# Initialize DALLE3 API, INPUT YOUR OWN COOKIE -cookie = os.getenv("DALLE_COOKIE") -dalle = Dalle(cookie) - - -def interpret_text_with_gpt(text): - model_engine = "text-davinci-002" - panel_instructions = "Create a comic panel where" - refined_prompt = f"{panel_instructions} {text}" - - response = openai.Completion.create( - engine=model_engine, - prompt=refined_prompt, - max_tokens=100 - ) - - final_prompt = response.choices[0].text.strip() - return final_prompt - -def generate_images_with_dalle(refined_prompt): - dalle.create(refined_prompt) - urls = dalle.get_urls() - return urls - -def gradio_interface(text): - refined_prompt = interpret_text_with_gpt(text) - comic_panel_urls = generate_images_with_dalle(refined_prompt) - - output = [] - for i, url in enumerate(comic_panel_urls): - response = requests.get(url) - img = Image.open(BytesIO(response.content)) - caption = f"Caption for panel {i+1}" - output.append((img, caption)) - - return output - -iface = gr.Interface( - fn=gradio_interface, - inputs=["text"], - outputs=[gr.outputs.Image(type="pil", label="Comic Panels"), "text"] -) - -iface.launch() diff --git a/apps/open-sourcerer/docker-compose.yaml b/apps/open-sourcerer/docker-compose.yaml index ebd08f37..7168363b 100644 --- a/apps/open-sourcerer/docker-compose.yaml +++ b/apps/open-sourcerer/docker-compose.yaml @@ -1,6 +1,7 @@ version: '3' services: my-python-app: + container_name: Open-Soucerer build: . ports: - "80:80" diff --git a/apps/orchistrator/docker-compose.yml b/apps/orchistrator/docker-compose.yml index d648751f..af6c313d 100644 --- a/apps/orchistrator/docker-compose.yml +++ b/apps/orchistrator/docker-compose.yml @@ -2,8 +2,10 @@ version: '3' services: server-bot: - container_name: server-bot - image: allenrkeen/server-bot:latest + container_name: Leonidas + build: + context: . + dockerfile: Dockerfile volumes: - /var/run/docker.sock:/var/run/docker.sock #required env_file: diff --git a/swarms/models/bing_chat.py b/swarms/models/bing_chat.py index 3ded87cd..750d5e62 100644 --- a/swarms/models/bing_chat.py +++ b/swarms/models/bing_chat.py @@ -25,7 +25,7 @@ class BingChat: """ - def __init__(self, cookies_path: str, bing_cookie: str = None, auth_cookie: str = None): + def __init__(self, cookies_path: str = None, bing_cookie: str = None, auth_cookie: str = None): self.cookies = json.loads(open(cookies_path, encoding="utf-8").read()) self.bot = asyncio.run(Chatbot.create(cookies=self.cookies)) self.auth_cookie = auth_cookie From 2bb9e407af287ebe4f6b570e92d7ef4a4823c77f Mon Sep 17 00:00:00 2001 From: Zack Date: Tue, 24 Oct 2023 19:33:28 -0500 Subject: [PATCH 05/13] feat: remove cookies.json requirement, update env --- .env.example | 5 ++- main.py | 77 ++++++++++++++++++++++++++++++++++++++ swarms/models/bing_chat.py | 14 +++++-- 3 files changed, 92 insertions(+), 4 deletions(-) create mode 100644 main.py diff --git a/.env.example b/.env.example index 8c73ae02..345b10a1 100644 --- a/.env.example +++ b/.env.example @@ -47,7 +47,10 @@ REVGPT_UNVERIFIED_PLUGIN_DOMAINS="showme.redstarplugin.com" CHATGPT_BASE_URL="" #Discord Bot -################################ SAVE_DIRECTORY="" STORAGE_SERVICE="" DISCORD_TOKEN="" + +#Bing +AUTH_COOKIE="_U value at bing.com" +AUTH_COOKIE_SRCHHPGUSR"_SRCHHPGUSR value at bing.com" diff --git a/main.py b/main.py new file mode 100644 index 00000000..96d02e3c --- /dev/null +++ b/main.py @@ -0,0 +1,77 @@ +import openai +import os +import dotenv +import logging +import gradio as gr +from BingImageCreator import ImageGen +from swarms.models.bing_chat import BingChat + +# from swarms.models.bingchat import BingChat +dotenv.load_dotenv(".env") + +# Initialize the EdgeGPTModel +cookie = os.environ.get("BING_COOKIE") +auth = os.environ.get("AUTH_COOKIE") +model = BingChat(cookies_path="./cookies.json", bing_cookie="BING_COOKIE",auth_cookie="AUTH_COOKIE") + +response = model("Generate") + +logging.basicConfig(level=logging.INFO) + +accumulated_story = "" +latest_caption = "" +standard_suffix = "" +storyboard = [] + +def generate_images_with_bingchat(caption): + img_path = model.create_img(caption) + img_urls = model.images(caption) + return img_urls + +def generate_single_caption(text): + prompt = f"A comic about {text}." + response = model(text) + return response + +def interpret_text_with_gpt(text, suffix): + return generate_single_caption(f"{text} {suffix}") + +def create_standard_suffix(original_prompt): + return f"In the style of {original_prompt}" + +def gradio_interface(text=None, next_button_clicked=False): + global accumulated_story, latest_caption, standard_suffix, storyboard + + if not standard_suffix: + standard_suffix = create_standard_suffix(text) + + if next_button_clicked: + new_caption = generate_single_caption(latest_caption + " " + standard_suffix) + new_urls = generate_images_with_bingchat(new_caption) + latest_caption = new_caption + storyboard.append((new_urls, new_caption)) + + elif text: + caption = generate_single_caption(text + " " + standard_suffix) + comic_panel_urls = generate_images_with_bingchat(caption) + latest_caption = caption + storyboard.append((comic_panel_urls, caption)) + + storyboard_html = "" + for urls, cap in storyboard: + for url in urls: + storyboard_html += f'{cap}
{cap}
' + + return storyboard_html + +if __name__ == "__main__": + iface = gr.Interface( + fn=gradio_interface, + inputs=[ + gr.inputs.Textbox(default="Type your story concept here", optional=True, label="Story Concept"), + gr.inputs.Checkbox(label="Generate Next Part") + ], + outputs=[gr.outputs.HTML()], + live=False # Submit button will appear + ) + iface.launch() diff --git a/swarms/models/bing_chat.py b/swarms/models/bing_chat.py index 750d5e62..4c7de939 100644 --- a/swarms/models/bing_chat.py +++ b/swarms/models/bing_chat.py @@ -25,11 +25,19 @@ class BingChat: """ - def __init__(self, cookies_path: str = None, bing_cookie: str = None, auth_cookie: str = None): - self.cookies = json.loads(open(cookies_path, encoding="utf-8").read()) + + def __init__(self, cookies_path: str = None, auth_cookie: str = None, auth_cookie_SRCHHPGUSR: str = None): + auth_cookie = os.environ("AUTH_COOKIE") + auth_cookie_SRCHHPGUSR + if cookies_path: + self.cookies = json.loads(open(cookies_path, encoding="utf-8").read()) + elif auth_cookie: + self.cookies = auth_cookie + else: + raise ValueError("Either cookies_path or auth_cookie must be provided.") self.bot = asyncio.run(Chatbot.create(cookies=self.cookies)) self.auth_cookie = auth_cookie - self.auth_cookie_SRCHHPGUSR = bing_cookie + self.auth_cookie_SRCHHPGUSR = auth_cookie_SRCHHPGUSR def __call__( self, prompt: str, style: ConversationStyle = ConversationStyle.creative From a606f504b1dec155d0dbf2354d92e084dd4adb60 Mon Sep 17 00:00:00 2001 From: Zack Date: Tue, 24 Oct 2023 19:40:24 -0500 Subject: [PATCH 06/13] feat: remove cookies.json requirement, update env --- main.py | 4 +--- swarms/models/bing_chat.py | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/main.py b/main.py index 96d02e3c..799ebd81 100644 --- a/main.py +++ b/main.py @@ -10,9 +10,7 @@ from swarms.models.bing_chat import BingChat dotenv.load_dotenv(".env") # Initialize the EdgeGPTModel -cookie = os.environ.get("BING_COOKIE") -auth = os.environ.get("AUTH_COOKIE") -model = BingChat(cookies_path="./cookies.json", bing_cookie="BING_COOKIE",auth_cookie="AUTH_COOKIE") +model = BingChat() response = model("Generate") diff --git a/swarms/models/bing_chat.py b/swarms/models/bing_chat.py index 4c7de939..7135ba45 100644 --- a/swarms/models/bing_chat.py +++ b/swarms/models/bing_chat.py @@ -28,7 +28,7 @@ class BingChat: def __init__(self, cookies_path: str = None, auth_cookie: str = None, auth_cookie_SRCHHPGUSR: str = None): auth_cookie = os.environ("AUTH_COOKIE") - auth_cookie_SRCHHPGUSR + auth_cookie_SRCHHPGUSR = os.enviro("AUTH_COOKIE_SRCHHPGUSR") if cookies_path: self.cookies = json.loads(open(cookies_path, encoding="utf-8").read()) elif auth_cookie: From 354ca80f293422f24a3ad4acd2bb3df1090b8fd3 Mon Sep 17 00:00:00 2001 From: Zack Date: Wed, 25 Oct 2023 00:05:27 -0500 Subject: [PATCH 07/13] chore: Run black fromater --- apps/BingBot/bing_bot.py | 63 +++++----- apps/BingBot/cogs/edgegpt.py | 92 ++++++++++---- apps/BingBot/cogs/event.py | 216 +++++++++++++++++++++++--------- apps/BingBot/cogs/help.py | 33 +++-- apps/BingBot/compose.yaml | 2 +- apps/BingBot/core/classes.py | 1 + apps/BingBot/src/imageCreate.py | 17 ++- apps/BingBot/src/log.py | 25 ++-- apps/BingBot/src/response.py | 139 +++++++++++++++----- apps/MythGen/main.py | 32 +++-- apps/discord.py | 16 ++- bingchat.py | 19 --- 12 files changed, 457 insertions(+), 198 deletions(-) delete mode 100644 bingchat.py diff --git a/apps/BingBot/bing_bot.py b/apps/BingBot/bing_bot.py index 4a562411..233ca9a7 100644 --- a/apps/BingBot/bing_bot.py +++ b/apps/BingBot/bing_bot.py @@ -9,41 +9,46 @@ from dotenv import load_dotenv load_dotenv() -bot = commands.Bot(command_prefix='!', intents = discord.Intents.all()) +bot = commands.Bot(command_prefix="!", intents=discord.Intents.all()) # init loggger logger = src.log.setup_logger(__name__) + def restart_bot(): # Replace current process with new instance of bot.py os.execl(sys.executable, sys.executable, "bot.py") + def check_verion() -> None: # Read the requirements.txt file and add each line to a list - with open('requirements.txt') as f: + with open("requirements.txt") as f: required = f.read().splitlines() # For each library listed in requirements.txt, check if the corresponding version is installed for package in required: # Use the pkg_resources library to get information about the installed version of the library - package_name, package_verion = package.split('==') + package_name, package_verion = package.split("==") installed = pkg_resources.get_distribution(package_name) # Extract the library name and version number name, version = installed.project_name, installed.version # Compare the version number to see if it matches the one in requirements.txt - if package != f'{name}=={version}': - logger.error(f'{name} version {version} is installed but does not match the requirements') + if package != f"{name}=={version}": + logger.error( + f"{name} version {version} is installed but does not match the requirements" + ) sys.exit() + @bot.event async def on_ready(): bot_status = discord.Status.online - bot_activity = discord.Activity(type=discord.ActivityType.playing, name = "bing.com") - await bot.change_presence(status = bot_status, activity = bot_activity) - for Filename in os.listdir('./cogs'): - if Filename.endswith('.py'): - await bot.load_extension(f'cogs.{Filename[:-3]}') - logger.info(f'{bot.user} is now running!') + bot_activity = discord.Activity(type=discord.ActivityType.playing, name="bing.com") + await bot.change_presence(status=bot_status, activity=bot_activity) + for Filename in os.listdir("./cogs"): + if Filename.endswith(".py"): + await bot.load_extension(f"cogs.{Filename[:-3]}") + logger.info(f"{bot.user} is now running!") print("Bot is Up and Ready!") try: synced = await bot.tree.sync() @@ -51,39 +56,44 @@ async def on_ready(): except Exception as e: print(e) + # Load command -@commands.is_owner() +@commands.is_owner() @bot.command() async def load(ctx, extension): - await bot.load_extension(f'cogs.{extension}') - await ctx.author.send(f'> **Loaded {extension} done.**') + await bot.load_extension(f"cogs.{extension}") + await ctx.author.send(f"> **Loaded {extension} done.**") + # Unload command @commands.is_owner() @bot.command() async def unload(ctx, extension): - await bot.unload_extension(f'cogs.{extension}') - await ctx.author.send(f'> **Un-Loaded {extension} done.**') + await bot.unload_extension(f"cogs.{extension}") + await ctx.author.send(f"> **Un-Loaded {extension} done.**") + # Empty discord_bot.log file @commands.is_owner() @bot.command() async def clean(ctx): - open('discord_bot.log', 'w').close() - await ctx.author.send(f'> **Successfully emptied the file!**') + open("discord_bot.log", "w").close() + await ctx.author.send(f"> **Successfully emptied the file!**") + # Get discord_bot.log file @commands.is_owner() @bot.command() async def getLog(ctx): try: - with open('discord_bot.log', 'rb') as f: + with open("discord_bot.log", "rb") as f: file = discord.File(f) await ctx.author.send(file=file) await ctx.author.send("> **Send successfully!**") except: await ctx.author.send("> **Send failed!**") + # Upload new Bing cookies and restart the bot @commands.is_owner() @bot.command() @@ -92,11 +102,11 @@ async def upload(ctx): for attachment in ctx.message.attachments: if str(attachment)[-4:] == ".txt": content = await attachment.read() - with open("cookies.json", "w", encoding = "utf-8") as f: - json.dump(json.loads(content), f, indent = 2) + with open("cookies.json", "w", encoding="utf-8") as f: + json.dump(json.loads(content), f, indent=2) if not isinstance(ctx.channel, discord.abc.PrivateChannel): await ctx.message.delete() - await ctx.author.send(f'> **Upload new cookies successfully!**') + await ctx.author.send(f"> **Upload new cookies successfully!**") logger.warning("\x1b[31mCookies has been setup successfully\x1b[0m") restart_bot() else: @@ -104,12 +114,7 @@ async def upload(ctx): else: await ctx.author.send("> **Didn't get any file.**") -if __name__ == '__main__': + +if __name__ == "__main__": check_verion() bot.run(os.getenv("DISCORD_BOT_TOKEN")) - - - - - - diff --git a/apps/BingBot/cogs/edgegpt.py b/apps/BingBot/cogs/edgegpt.py index 683780db..6b303418 100644 --- a/apps/BingBot/cogs/edgegpt.py +++ b/apps/BingBot/cogs/edgegpt.py @@ -19,6 +19,7 @@ users_chatbot = {} users_image_generator = {} user_conversation_style = {} + async def init_chatbot(user_id): with open("./cookies.json", encoding="utf-8") as file: cookie_json = json.load(file) @@ -26,7 +27,7 @@ async def init_chatbot(user_id): if cookie.get("name") == "_U": auth_cookie = cookie.get("value") break - + auth_cookie = os.environ.get("AUTH_COOKIE") auth_cookie_SRCHHPGUSR = os.environ.get("AUTH_COOKIE_SRCHHPGUSR") # auth_cookie_SRCHHPGUSR = os.environ.get("AUTH_COOKIE_SRCHHPGUSR") @@ -34,6 +35,7 @@ async def init_chatbot(user_id): users_image_generator[user_id] = ImageGenAsync(auth_cookie, quiet=True) user_conversation_style[user_id] = "balanced" + class UserChatbot: def __init__(self, cookies): self.chatbot = Chatbot(cookies=cookies) @@ -47,6 +49,7 @@ class UserChatbot: async def reset(self): await self.chatbot.reset() + class EdgeGPT(Cog_Extension): # Chat with Bing @app_commands.command(name="bing", description="Have a chat with Bing") @@ -57,7 +60,7 @@ class EdgeGPT(Cog_Extension): await set_using_send(interaction.user.id, False) using = await get_using_send(interaction.user.id) if not using: - await interaction.response.defer(ephemeral=False, thinking=True) + await interaction.response.defer(ephemeral=False, thinking=True) username = str(interaction.user) usermessage = message channel = str(interaction.channel) @@ -65,11 +68,17 @@ class EdgeGPT(Cog_Extension): if user_id not in users_chatbot: await init_chatbot(interaction.user.id) conversation_style = user_conversation_style[user_id] - logger.info(f"\x1b[31m{username}\x1b[0m : '{usermessage}' ({channel}) [Style: {conversation_style}]") - await users_chatbot[user_id].send_message(interaction, usermessage, conversation_style) + logger.info( + f"\x1b[31m{username}\x1b[0m : '{usermessage}' ({channel}) [Style: {conversation_style}]" + ) + await users_chatbot[user_id].send_message( + interaction, usermessage, conversation_style + ) else: await interaction.response.defer(ephemeral=True, thinking=True) - await interaction.followup.send("> **Please wait for your last conversation to finish.**") + await interaction.followup.send( + "> **Please wait for your last conversation to finish.**" + ) # Reset Bing conversation @app_commands.command(name="reset", description="Reset Bing conversation") @@ -81,25 +90,49 @@ class EdgeGPT(Cog_Extension): await interaction.followup.send("> **Info: Reset finish.**") logger.warning("\x1b[31mBing has been successfully reset\x1b[0m") except: - await interaction.followup.send(f"> **You don't have any conversation yet.**") + await interaction.followup.send( + f"> **You don't have any conversation yet.**" + ) logger.exception("Bing reset failed.") # Switch conversation style @app_commands.command(name="switch_style", description="Switch conversation style") - @app_commands.choices(style=[app_commands.Choice(name="Creative", value="creative"), app_commands.Choice(name="Balanced", value="balanced"), app_commands.Choice(name="Precise", value="precise")]) - async def switch_style(self, interaction: discord.Interaction, style: app_commands.Choice[str]): + @app_commands.choices( + style=[ + app_commands.Choice(name="Creative", value="creative"), + app_commands.Choice(name="Balanced", value="balanced"), + app_commands.Choice(name="Precise", value="precise"), + ] + ) + async def switch_style( + self, interaction: discord.Interaction, style: app_commands.Choice[str] + ): await interaction.response.defer(ephemeral=True, thinking=True) user_id = interaction.user.id if user_id not in users_chatbot: await init_chatbot(user_id) user_conversation_style[user_id] = style.value - await interaction.followup.send(f"> **Info: successfull switch conversation style to {style.value}.**") - logger.warning(f"\x1b[31mConversation style has been successfully switch to {style.value}\x1b[0m") - + await interaction.followup.send( + f"> **Info: successfull switch conversation style to {style.value}.**" + ) + logger.warning( + f"\x1b[31mConversation style has been successfully switch to {style.value}\x1b[0m" + ) + # Set and delete personal Bing Cookies @app_commands.command(name="bing_cookies", description="Set or delete Bing Cookies") - @app_commands.choices(choice=[app_commands.Choice(name="set", value="set"), app_commands.Choice(name="delete", value="delete")]) - async def cookies_setting(self, interaction: discord.Interaction, choice: app_commands.Choice[str], cookies_file: Optional[discord.Attachment]=None): + @app_commands.choices( + choice=[ + app_commands.Choice(name="set", value="set"), + app_commands.Choice(name="delete", value="delete"), + ] + ) + async def cookies_setting( + self, + interaction: discord.Interaction, + choice: app_commands.Choice[str], + cookies_file: Optional[discord.Attachment] = None, + ): await interaction.response.defer(ephemeral=True, thinking=True) user_id = interaction.user.id if choice.value == "set": @@ -111,11 +144,15 @@ class EdgeGPT(Cog_Extension): break users_image_generator[user_id] = ImageGenAsync(auth_cookie, quiet=True) users_chatbot[user_id] = UserChatbot(cookies=content) - user_conversation_style[user_id] = "balanced" + user_conversation_style[user_id] = "balanced" await interaction.followup.send("> **Upload successful!**") - logger.warning(f"\x1b[31m{interaction.user} set Bing Cookies successful\x1b[0m") + logger.warning( + f"\x1b[31m{interaction.user} set Bing Cookies successful\x1b[0m" + ) except: - await interaction.followup.send("> **Please upload your Bing Cookies.**") + await interaction.followup.send( + "> **Please upload your Bing Cookies.**" + ) else: try: del users_chatbot[user_id] @@ -124,10 +161,14 @@ class EdgeGPT(Cog_Extension): await interaction.followup.send("> **Delete finish.**") logger.warning(f"\x1b[31m{interaction.user} delete Cookies\x1b[0m") except: - await interaction.followup.send("> **You don't have any Bing Cookies.**") + await interaction.followup.send( + "> **You don't have any Bing Cookies.**" + ) # Create images - @app_commands.command(name="create_image", description="generate image by Bing image creator") + @app_commands.command( + name="create_image", description="generate image by Bing image creator" + ) async def create_image(self, interaction: discord.Interaction, *, prompt: str): user_id = interaction.user.id if interaction.user.id not in users_chatbot: @@ -137,12 +178,19 @@ class EdgeGPT(Cog_Extension): except: await set_using_create(user_id, False) using = await get_using_create(user_id) - if not using: - logger.info(f"\x1b[31m{interaction.user}\x1b[0m : '{prompt}' ({interaction.channel}) [BingImageCreator]") - await users_chatbot[user_id].create_image(interaction, prompt, users_image_generator[user_id] ) + if not using: + logger.info( + f"\x1b[31m{interaction.user}\x1b[0m : '{prompt}' ({interaction.channel}) [BingImageCreator]" + ) + await users_chatbot[user_id].create_image( + interaction, prompt, users_image_generator[user_id] + ) else: await interaction.response.defer(ephemeral=True, thinking=True) - await interaction.followup.send("> **Please wait for your last image to create finish.**") + await interaction.followup.send( + "> **Please wait for your last image to create finish.**" + ) + async def setup(bot): await bot.add_cog(EdgeGPT(bot)) diff --git a/apps/BingBot/cogs/event.py b/apps/BingBot/cogs/event.py index f42b6e5b..9b81cf30 100644 --- a/apps/BingBot/cogs/event.py +++ b/apps/BingBot/cogs/event.py @@ -25,64 +25,101 @@ with open("./cookies.json", encoding="utf-8") as file: cookies = json.load(file) chatbot = Chatbot(cookies=cookies) + # To add suggest responses class MyView(discord.ui.View): - def __init__(self, chatbot: Chatbot, suggest_responses:list): + def __init__(self, chatbot: Chatbot, suggest_responses: list): super().__init__(timeout=120) # Add buttons for label in suggest_responses: button = discord.ui.Button(label=label) + # Button event - async def callback(interaction: discord.Interaction, button: discord.ui.Button): - await interaction.response.defer(ephemeral=False, thinking=True) - # When click the button, all buttons will disable. - for child in self.children: - child.disabled = True - await interaction.followup.edit_message(message_id=interaction.message.id, view=self) - username = str(interaction.user) - usermessage = button.label - channel = str(interaction.channel) - logger.info(f"\x1b[31m{username}\x1b[0m : '{usermessage}' ({channel}) [Style: {conversation_style}] [button]") - task = asyncio.create_task(send_message(chatbot, interaction, usermessage)) - await asyncio.gather(task) + async def callback( + interaction: discord.Interaction, button: discord.ui.Button + ): + await interaction.response.defer(ephemeral=False, thinking=True) + # When click the button, all buttons will disable. + for child in self.children: + child.disabled = True + await interaction.followup.edit_message( + message_id=interaction.message.id, view=self + ) + username = str(interaction.user) + usermessage = button.label + channel = str(interaction.channel) + logger.info( + f"\x1b[31m{username}\x1b[0m : '{usermessage}' ({channel}) [Style: {conversation_style}] [button]" + ) + task = asyncio.create_task( + send_message(chatbot, interaction, usermessage) + ) + await asyncio.gather(task) + self.add_item(button) self.children[-1].callback = partial(callback, button=button) + + # Show Dropdown class DropdownView(discord.ui.View): def __init__(self): super().__init__(timeout=180) options = [ - discord.SelectOption(label="Creative", description="Switch conversation style to Creative", emoji='🎨'), - discord.SelectOption(label="Balanced", description="Switch conversation style to Balanced", emoji='⚖️'), - discord.SelectOption(label="Precise", description="Switch conversation style to Precise", emoji='🔎'), - discord.SelectOption(label="Reset", description="Reset conversation", emoji="🔄") + discord.SelectOption( + label="Creative", + description="Switch conversation style to Creative", + emoji="🎨", + ), + discord.SelectOption( + label="Balanced", + description="Switch conversation style to Balanced", + emoji="⚖️", + ), + discord.SelectOption( + label="Precise", + description="Switch conversation style to Precise", + emoji="🔎", + ), + discord.SelectOption( + label="Reset", description="Reset conversation", emoji="🔄" + ), ] dropdown = discord.ui.Select( - placeholder="Choose setting", - min_values=1, - max_values=1, - options=options + placeholder="Choose setting", min_values=1, max_values=1, options=options ) dropdown.callback = self.dropdown_callback self.add_item(dropdown) + # Dropdown event async def dropdown_callback(self, interaction: discord.Interaction): await interaction.response.defer(ephemeral=False, thinking=True) - if interaction.data['values'][0] == "Creative": + if interaction.data["values"][0] == "Creative": await set_conversation_style("creative") - await interaction.followup.send(f"> **Info: successfull switch conversation style to *{interaction.data['values'][0]}*.**") - logger.warning(f"\x1b[31mConversation style has been successfully switch to {interaction.data['values'][0]}\x1b[0m") - elif interaction.data['values'][0] == "Balanced": + await interaction.followup.send( + f"> **Info: successfull switch conversation style to *{interaction.data['values'][0]}*.**" + ) + logger.warning( + f"\x1b[31mConversation style has been successfully switch to {interaction.data['values'][0]}\x1b[0m" + ) + elif interaction.data["values"][0] == "Balanced": await set_conversation_style("balanced") - await interaction.followup.send(f"> **Info: successfull switch conversation style to *{interaction.data['values'][0]}*.**") - logger.warning(f"\x1b[31mConversation style has been successfully switch to {interaction.data['values'][0]}\x1b[0m") - elif interaction.data['values'][0] == "Precise": + await interaction.followup.send( + f"> **Info: successfull switch conversation style to *{interaction.data['values'][0]}*.**" + ) + logger.warning( + f"\x1b[31mConversation style has been successfully switch to {interaction.data['values'][0]}\x1b[0m" + ) + elif interaction.data["values"][0] == "Precise": await set_conversation_style("precise") - await interaction.followup.send(f"> **Info: successfull switch conversation style to *{interaction.data['values'][0]}*.**") - logger.warning(f"\x1b[31mConversation style has been successfully switch to {interaction.data['values'][0]}\x1b[0m") + await interaction.followup.send( + f"> **Info: successfull switch conversation style to *{interaction.data['values'][0]}*.**" + ) + logger.warning( + f"\x1b[31mConversation style has been successfully switch to {interaction.data['values'][0]}\x1b[0m" + ) else: await chatbot.reset() await interaction.followup.send(f"> **Info: Reset finish.**") @@ -90,46 +127,66 @@ class DropdownView(discord.ui.View): # disable dropdown after select for dropdown in self.children: dropdown.disabled = True - await interaction.followup.edit_message(message_id=interaction.message.id, view=self) + await interaction.followup.edit_message( + message_id=interaction.message.id, view=self + ) + # Set conversation style async def set_conversation_style(style: str): global conversation_style conversation_style = style + + async def set_chatbot(cookies): global chatbot chatbot = Chatbot(cookies=cookies) + async def send_message(chatbot: Chatbot, message, user_message: str): async with sem: if isinstance(message, discord.message.Message): await message.channel.typing() - reply = '' - text = '' - link_embed = '' + reply = "" + text = "" + link_embed = "" images_embed = [] all_url = [] try: - # Change conversation style + # Change conversation style if conversation_style == "creative": - reply = await chatbot.ask(prompt=user_message, conversation_style=ConversationStyle.creative, simplify_response=True) + reply = await chatbot.ask( + prompt=user_message, + conversation_style=ConversationStyle.creative, + simplify_response=True, + ) elif conversation_style == "precise": - reply = await chatbot.ask(prompt=user_message, conversation_style=ConversationStyle.precise, simplify_response=True) + reply = await chatbot.ask( + prompt=user_message, + conversation_style=ConversationStyle.precise, + simplify_response=True, + ) else: - reply = await chatbot.ask(prompt=user_message, conversation_style=ConversationStyle.balanced, simplify_response=True) + reply = await chatbot.ask( + prompt=user_message, + conversation_style=ConversationStyle.balanced, + simplify_response=True, + ) # Get reply text text = f"{reply['text']}" - text = re.sub(r'\[\^(\d+)\^\]', lambda match: '', text) + text = re.sub(r"\[\^(\d+)\^\]", lambda match: "", text) # Get the URL, if available try: - if len(reply['sources']) != 0: - for i, url in enumerate(reply['sources'], start=1): - if len(url['providerDisplayName']) == 0: + if len(reply["sources"]) != 0: + for i, url in enumerate(reply["sources"], start=1): + if len(url["providerDisplayName"]) == 0: all_url.append(f"{i}. {url['seeMoreUrl']}") else: - all_url.append(f"{i}. [{url['providerDisplayName']}]({url['seeMoreUrl']})") + all_url.append( + f"{i}. [{url['providerDisplayName']}]({url['seeMoreUrl']})" + ) link_text = "\n".join(all_url) link_embed = discord.Embed(description=link_text) except: @@ -147,45 +204,80 @@ async def send_message(chatbot: Chatbot, message, user_message: str): while len(response) > 2000: temp = response[:2000] response = response[2000:] - if isinstance(message, discord.interactions.Interaction): + if isinstance(message, discord.interactions.Interaction): await message.followup.send(temp) - else: + else: await message.channel.send(temp) # Get the image, if available try: if len(link_embed) == 0: - all_image = re.findall("https?://[\w\./]+", str(reply["sources_text"])) - [images_embed.append(discord.Embed(url="https://www.bing.com/").set_image(url=image_link)) for image_link in all_image] + all_image = re.findall( + "https?://[\w\./]+", str(reply["sources_text"]) + ) + [ + images_embed.append( + discord.Embed(url="https://www.bing.com/").set_image( + url=image_link + ) + ) + for image_link in all_image + ] except: pass - + if USE_SUGGEST_RESPONSES: suggest_responses = reply["suggestions"] if images_embed: if isinstance(message, discord.interactions.Interaction): - await message.followup.send(response, view=MyView(chatbot, suggest_responses), embeds=images_embed, wait=True) + await message.followup.send( + response, + view=MyView(chatbot, suggest_responses), + embeds=images_embed, + wait=True, + ) else: - await message.channel.send(response, view=MyView(chatbot, suggest_responses), embeds=images_embed) + await message.channel.send( + response, + view=MyView(chatbot, suggest_responses), + embeds=images_embed, + ) elif link_embed: if isinstance(message, discord.interactions.Interaction): - await message.followup.send(response, view=MyView(chatbot, suggest_responses), embed=link_embed, wait=True) + await message.followup.send( + response, + view=MyView(chatbot, suggest_responses), + embed=link_embed, + wait=True, + ) else: - await message.channel.send(response, view=MyView(chatbot, suggest_responses), embed=link_embed) + await message.channel.send( + response, + view=MyView(chatbot, suggest_responses), + embed=link_embed, + ) else: if isinstance(message, discord.interactions.Interaction): - await message.followup.send(response, view=MyView(chatbot, suggest_responses), wait=True) + await message.followup.send( + response, view=MyView(chatbot, suggest_responses), wait=True + ) else: - await message.channel.send(response, view=MyView(chatbot, suggest_responses)) + await message.channel.send( + response, view=MyView(chatbot, suggest_responses) + ) else: if images_embed: if isinstance(message, discord.interactions.Interaction): - await message.followup.send(response, embeds=images_embed, wait=True) + await message.followup.send( + response, embeds=images_embed, wait=True + ) else: await message.channel.send(response, embeds=images_embed) elif link_embed: if isinstance(message, discord.interactions.Interaction): - await message.followup.send(response, embed=link_embed, wait=True) + await message.followup.send( + response, embed=link_embed, wait=True + ) else: await message.channel.send(response, embed=link_embed) else: @@ -200,6 +292,7 @@ async def send_message(chatbot: Chatbot, message, user_message: str): await message.channel.send(f">>> **Error: {e}**") logger.exception(f"Error while sending message: {e}") + class Event(Cog_Extension): @commands.Cog.listener() async def on_message(self, message: discord.Message): @@ -207,17 +300,22 @@ class Event(Cog_Extension): return if self.bot.user in message.mentions: if not MENTION_CHANNEL_ID or message.channel.id == MENTION_CHANNEL_ID: - content = re.sub(r'<@.*?>', '', message.content).strip() + content = re.sub(r"<@.*?>", "", message.content).strip() if len(content) > 0: username = str(message.author) channel = str(message.channel) - logger.info(f"\x1b[31m{username}\x1b[0m : '{content}' ({channel}) [Style: {conversation_style}]") + logger.info( + f"\x1b[31m{username}\x1b[0m : '{content}' ({channel}) [Style: {conversation_style}]" + ) task = asyncio.create_task(send_message(chatbot, message, content)) await asyncio.gather(task) else: await message.channel.send(view=DropdownView()) elif MENTION_CHANNEL_ID is not None: - await message.channel.send(f"> **Can only be mentioned at <#{self.bot.get_channel(MENTION_CHANNEL_ID).id}>**") + await message.channel.send( + f"> **Can only be mentioned at <#{self.bot.get_channel(MENTION_CHANNEL_ID).id}>**" + ) + async def setup(bot): await bot.add_cog(Event(bot)) diff --git a/apps/BingBot/cogs/help.py b/apps/BingBot/cogs/help.py index 16ecff78..eef6a61d 100644 --- a/apps/BingBot/cogs/help.py +++ b/apps/BingBot/cogs/help.py @@ -2,16 +2,35 @@ import discord from core.classes import Cog_Extension from discord import app_commands + class Help(Cog_Extension): - @app_commands.command(name = "help", description = "Show how to use") + @app_commands.command(name="help", description="Show how to use") async def help(self, interaction: discord.Interaction): - embed=discord.Embed(title="Help", description="[see more](https://github.com/FuseFairy/DiscordBot-EdgeGPT/blob/main/README.md)\n\n**COMMANDS -**") - embed.add_field(name="/bing_cookies", value="Set and delete your Bing Cookies.", inline=False) + embed = discord.Embed( + title="Help", + description="[see more](https://github.com/FuseFairy/DiscordBot-EdgeGPT/blob/main/README.md)\n\n**COMMANDS -**", + ) + embed.add_field( + name="/bing_cookies", + value="Set and delete your Bing Cookies.", + inline=False, + ) embed.add_field(name="/bing", value="Chat with Bing.", inline=False) - embed.add_field(name="/reset", value="Reset your Bing conversation.", inline=False) - embed.add_field(name="/switch_style", value="Switch your Bing conversation style.", inline=False) - embed.add_field(name="/create_image", value="Generate image by Bing Image Creator.", inline=False) + embed.add_field( + name="/reset", value="Reset your Bing conversation.", inline=False + ) + embed.add_field( + name="/switch_style", + value="Switch your Bing conversation style.", + inline=False, + ) + embed.add_field( + name="/create_image", + value="Generate image by Bing Image Creator.", + inline=False, + ) await interaction.response.send_message(embed=embed) + async def setup(bot): - await bot.add_cog(Help(bot)) \ No newline at end of file + await bot.add_cog(Help(bot)) diff --git a/apps/BingBot/compose.yaml b/apps/BingBot/compose.yaml index f574f912..64e278fa 100644 --- a/apps/BingBot/compose.yaml +++ b/apps/BingBot/compose.yaml @@ -1,7 +1,7 @@ version: '3' services: - discord_edgegpt: + BingBot: build: . environment: - DISCORD_BOT_TOKEN=${DISCORD_BOT_TOKEN} diff --git a/apps/BingBot/core/classes.py b/apps/BingBot/core/classes.py index 8dfdb114..23c4cbb1 100644 --- a/apps/BingBot/core/classes.py +++ b/apps/BingBot/core/classes.py @@ -1,5 +1,6 @@ from discord.ext import commands + class Cog_Extension(commands.Cog): def __init__(self, bot): self.bot = bot diff --git a/apps/BingBot/src/imageCreate.py b/apps/BingBot/src/imageCreate.py index b88d1d4b..0b68b44d 100644 --- a/apps/BingBot/src/imageCreate.py +++ b/apps/BingBot/src/imageCreate.py @@ -5,10 +5,14 @@ from src import log logger = log.setup_logger(__name__) using_func = {} + async def get_using_create(user_id): return using_func[user_id] + + async def set_using_create(user_id, status: bool): - using_func[user_id] = status + using_func[user_id] = status + async def create_image(interaction: discord.Interaction, prompt: str, image_generator): await interaction.response.defer(ephemeral=False, thinking=True) @@ -16,10 +20,15 @@ async def create_image(interaction: discord.Interaction, prompt: str, image_gene try: embeds = [] prompts = f"> **{prompt}** - <@{str(interaction.user.id)}> (***BingImageCreator***)\n\n" - # Fetches image links + # Fetches image links images = await image_generator.get_images(prompt) # Add embed to list of embeds - [embeds.append(discord.Embed(url="https://www.bing.com/").set_image(url=image_link)) for image_link in images] + [ + embeds.append( + discord.Embed(url="https://www.bing.com/").set_image(url=image_link) + ) + for image_link in images + ] await interaction.followup.send(prompts, embeds=embeds, wait=True) except asyncio.TimeoutError: await interaction.followup.send("> **Error: Request timed out.**") @@ -28,4 +37,4 @@ async def create_image(interaction: discord.Interaction, prompt: str, image_gene await interaction.followup.send(f"> **Error: {e}**") logger.exception(f"Error while create image: {e}") finally: - using_func[interaction.user.id] = False \ No newline at end of file + using_func[interaction.user.id] = False diff --git a/apps/BingBot/src/log.py b/apps/BingBot/src/log.py index fba4e94d..ed04a4a3 100644 --- a/apps/BingBot/src/log.py +++ b/apps/BingBot/src/log.py @@ -4,18 +4,17 @@ import logging.handlers class CustomFormatter(logging.Formatter): - LEVEL_COLORS = [ - (logging.DEBUG, '\x1b[40;1m'), - (logging.INFO, '\x1b[34;1m'), - (logging.WARNING, '\x1b[33;1m'), - (logging.ERROR, '\x1b[31m'), - (logging.CRITICAL, '\x1b[41m'), + (logging.DEBUG, "\x1b[40;1m"), + (logging.INFO, "\x1b[34;1m"), + (logging.WARNING, "\x1b[33;1m"), + (logging.ERROR, "\x1b[31m"), + (logging.CRITICAL, "\x1b[41m"), ] FORMATS = { level: logging.Formatter( - f'\x1b[30;1m%(asctime)s\x1b[0m {color}%(levelname)-8s\x1b[0m \x1b[35m%(name)s\x1b[0m -> %(message)s', - '%Y-%m-%d %H:%M:%S' + f"\x1b[30;1m%(asctime)s\x1b[0m {color}%(levelname)-8s\x1b[0m \x1b[35m%(name)s\x1b[0m -> %(message)s", + "%Y-%m-%d %H:%M:%S", ) for level, color in LEVEL_COLORS } @@ -28,7 +27,7 @@ class CustomFormatter(logging.Formatter): # Override the traceback to always print in red if record.exc_info: text = formatter.formatException(record.exc_info) - record.exc_text = f'\x1b[31m{text}\x1b[0m' + record.exc_text = f"\x1b[31m{text}\x1b[0m" output = formatter.format(record) @@ -37,9 +36,9 @@ class CustomFormatter(logging.Formatter): return output -def setup_logger(module_name:str) -> logging.Logger: +def setup_logger(module_name: str) -> logging.Logger: # create logger - library, _, _ = module_name.partition('.py') + library, _, _ = module_name.partition(".py") logger = logging.getLogger(library) logger.setLevel(logging.INFO) @@ -50,12 +49,12 @@ def setup_logger(module_name:str) -> logging.Logger: console_handler.setFormatter(CustomFormatter()) # specify that the log file path is the same as `main.py` file path grandparent_dir = os.path.abspath(__file__ + "/../../") - log_name='discord_bot.log' + log_name = "discord_bot.log" log_path = os.path.join(grandparent_dir, log_name) # create local log handler log_handler = logging.handlers.RotatingFileHandler( filename=log_path, - encoding='utf-8', + encoding="utf-8", maxBytes=32 * 1024 * 1024, # 32 MiB backupCount=2, # Rotate through 5 files ) diff --git a/apps/BingBot/src/response.py b/apps/BingBot/src/response.py index 371622b8..47960a73 100644 --- a/apps/BingBot/src/response.py +++ b/apps/BingBot/src/response.py @@ -8,79 +8,127 @@ USE_SUGGEST_RESPONSES = True logger = log.setup_logger(__name__) using_func = {} + # To add suggest responses class MyView(discord.ui.View): - def __init__(self, interaction: discord.Interaction, chatbot: Chatbot, conversation_style:str, suggest_responses:list): + def __init__( + self, + interaction: discord.Interaction, + chatbot: Chatbot, + conversation_style: str, + suggest_responses: list, + ): super().__init__(timeout=120) - self.button_author =interaction.user.id + self.button_author = interaction.user.id # Add buttons for label in suggest_responses: button = discord.ui.Button(label=label) + # Button event - async def callback(interaction: discord.Interaction, button_author: int, button: discord.ui.Button): + async def callback( + interaction: discord.Interaction, + button_author: int, + button: discord.ui.Button, + ): if interaction.user.id != button_author: await interaction.response.defer(ephemeral=True, thinking=True) - await interaction.followup.send("You don't have permission to press this button.") + await interaction.followup.send( + "You don't have permission to press this button." + ) elif not using_func[interaction.user.id]: await interaction.response.defer(ephemeral=False, thinking=True) # When click the button, all buttons will disable. for child in self.children: child.disabled = True - await interaction.followup.edit_message(message_id=interaction.message.id, view=self) + await interaction.followup.edit_message( + message_id=interaction.message.id, view=self + ) username = str(interaction.user) usermessage = button.label channel = str(interaction.channel) - logger.info(f"\x1b[31m{username}\x1b[0m : '{usermessage}' ({channel}) [Style: {conversation_style}] [button]") - await send_message(chatbot, interaction, usermessage, conversation_style) + logger.info( + f"\x1b[31m{username}\x1b[0m : '{usermessage}' ({channel}) [Style: {conversation_style}] [button]" + ) + await send_message( + chatbot, interaction, usermessage, conversation_style + ) else: await interaction.response.defer(ephemeral=True, thinking=True) - await interaction.followup.send("Please wait for your last conversation to finish.") + await interaction.followup.send( + "Please wait for your last conversation to finish." + ) + self.add_item(button) - self.children[-1].callback = partial(callback, button_author=self.button_author, button=button) + self.children[-1].callback = partial( + callback, button_author=self.button_author, button=button + ) + async def get_using_send(user_id): return using_func[user_id] + + async def set_using_send(user_id, status: bool): using_func[user_id] = status -async def send_message(chatbot: Chatbot, interaction: discord.Interaction, user_message: str, conversation_style: str): + +async def send_message( + chatbot: Chatbot, + interaction: discord.Interaction, + user_message: str, + conversation_style: str, +): using_func[interaction.user.id] = True - reply = '' - text = '' - link_embed = '' + reply = "" + text = "" + link_embed = "" images_embed = [] all_url = [] try: # Change conversation style if conversation_style == "creative": - reply = await chatbot.ask(prompt=user_message, conversation_style=ConversationStyle.creative, simplify_response=True) + reply = await chatbot.ask( + prompt=user_message, + conversation_style=ConversationStyle.creative, + simplify_response=True, + ) elif conversation_style == "precise": - reply = await chatbot.ask(prompt=user_message, conversation_style=ConversationStyle.precise, simplify_response=True) + reply = await chatbot.ask( + prompt=user_message, + conversation_style=ConversationStyle.precise, + simplify_response=True, + ) else: - reply = await chatbot.ask(prompt=user_message, conversation_style=ConversationStyle.balanced, simplify_response=True) + reply = await chatbot.ask( + prompt=user_message, + conversation_style=ConversationStyle.balanced, + simplify_response=True, + ) # Get reply text text = f"{reply['text']}" - text = re.sub(r'\[\^(\d+)\^\]', lambda match: '', text) - + text = re.sub(r"\[\^(\d+)\^\]", lambda match: "", text) + # Get the URL, if available try: - if len(reply['sources']) != 0: - for i, url in enumerate(reply['sources'], start=1): - if len(url['providerDisplayName']) == 0: + if len(reply["sources"]) != 0: + for i, url in enumerate(reply["sources"], start=1): + if len(url["providerDisplayName"]) == 0: all_url.append(f"{i}. {url['seeMoreUrl']}") else: - all_url.append(f"{i}. [{url['providerDisplayName']}]({url['seeMoreUrl']})") + all_url.append( + f"{i}. [{url['providerDisplayName']}]({url['seeMoreUrl']})" + ) link_text = "\n".join(all_url) link_embed = discord.Embed(description=link_text) except: pass - + # Set the final message user_message = user_message.replace("\n", "") ask = f"> **{user_message}** - <@{str(interaction.user.id)}> (***style: {conversation_style}***)\n\n" response = f"{ask}{text}" - + # Discord limit about 2000 characters for a message while len(response) > 2000: temp = response[:2000] @@ -91,27 +139,56 @@ async def send_message(chatbot: Chatbot, interaction: discord.Interaction, user_ try: if len(link_embed) == 0: all_image = re.findall("https?://[\w\./]+", str(reply["sources_text"])) - [images_embed.append(discord.Embed(url="https://www.bing.com/").set_image(url=image_link)) for image_link in all_image] + [ + images_embed.append( + discord.Embed(url="https://www.bing.com/").set_image( + url=image_link + ) + ) + for image_link in all_image + ] except: pass # Add all suggest responses in list if USE_SUGGEST_RESPONSES: suggest_responses = reply["suggestions"] if images_embed: - await interaction.followup.send(response, view=MyView(interaction, chatbot, conversation_style, suggest_responses), embeds=images_embed, wait=True) + await interaction.followup.send( + response, + view=MyView( + interaction, chatbot, conversation_style, suggest_responses + ), + embeds=images_embed, + wait=True, + ) elif link_embed: - await interaction.followup.send(response, view=MyView(interaction, chatbot, conversation_style, suggest_responses), embed=link_embed, wait=True) + await interaction.followup.send( + response, + view=MyView( + interaction, chatbot, conversation_style, suggest_responses + ), + embed=link_embed, + wait=True, + ) else: - await interaction.followup.send(response, view=MyView(interaction, chatbot, conversation_style, suggest_responses), wait=True) + await interaction.followup.send( + response, + view=MyView( + interaction, chatbot, conversation_style, suggest_responses + ), + wait=True, + ) else: if images_embed: - await interaction.followup.send(response, embeds=images_embed, wait=True) + await interaction.followup.send( + response, embeds=images_embed, wait=True + ) elif link_embed: await interaction.followup.send(response, embed=link_embed, wait=True) else: await interaction.followup.send(response, wait=True) except Exception as e: - await interaction.followup.send(f">>> **Error: {e}**") - logger.exception(f"Error while sending message: {e}") + await interaction.followup.send(f">>> **Error: {e}**") + logger.exception(f"Error while sending message: {e}") finally: using_func[interaction.user.id] = False diff --git a/apps/MythGen/main.py b/apps/MythGen/main.py index 96d02e3c..1c684286 100644 --- a/apps/MythGen/main.py +++ b/apps/MythGen/main.py @@ -6,13 +6,15 @@ import gradio as gr from BingImageCreator import ImageGen from swarms.models.bing_chat import BingChat -# from swarms.models.bingchat import BingChat +# from swarms.models.bingchat import BingChat dotenv.load_dotenv(".env") # Initialize the EdgeGPTModel cookie = os.environ.get("BING_COOKIE") auth = os.environ.get("AUTH_COOKIE") -model = BingChat(cookies_path="./cookies.json", bing_cookie="BING_COOKIE",auth_cookie="AUTH_COOKIE") +model = BingChat( + cookies_path="./cookies.json", bing_cookie="BING_COOKIE", auth_cookie="AUTH_COOKIE" +) response = model("Generate") @@ -23,34 +25,39 @@ latest_caption = "" standard_suffix = "" storyboard = [] + def generate_images_with_bingchat(caption): img_path = model.create_img(caption) img_urls = model.images(caption) return img_urls + def generate_single_caption(text): prompt = f"A comic about {text}." response = model(text) return response + def interpret_text_with_gpt(text, suffix): return generate_single_caption(f"{text} {suffix}") + def create_standard_suffix(original_prompt): return f"In the style of {original_prompt}" + def gradio_interface(text=None, next_button_clicked=False): global accumulated_story, latest_caption, standard_suffix, storyboard - + if not standard_suffix: standard_suffix = create_standard_suffix(text) - + if next_button_clicked: new_caption = generate_single_caption(latest_caption + " " + standard_suffix) new_urls = generate_images_with_bingchat(new_caption) latest_caption = new_caption storyboard.append((new_urls, new_caption)) - + elif text: caption = generate_single_caption(text + " " + standard_suffix) comic_panel_urls = generate_images_with_bingchat(caption) @@ -60,18 +67,25 @@ def gradio_interface(text=None, next_button_clicked=False): storyboard_html = "" for urls, cap in storyboard: for url in urls: - storyboard_html += f'{cap}
{cap}
' + storyboard_html += ( + f'{cap}
{cap}
' + ) return storyboard_html + if __name__ == "__main__": iface = gr.Interface( fn=gradio_interface, inputs=[ - gr.inputs.Textbox(default="Type your story concept here", optional=True, label="Story Concept"), - gr.inputs.Checkbox(label="Generate Next Part") + gr.inputs.Textbox( + default="Type your story concept here", + optional=True, + label="Story Concept", + ), + gr.inputs.Checkbox(label="Generate Next Part"), ], outputs=[gr.outputs.HTML()], - live=False # Submit button will appear + live=False, # Submit button will appear ) iface.launch() diff --git a/apps/discord.py b/apps/discord.py index eebc48c8..4d4fcd71 100644 --- a/apps/discord.py +++ b/apps/discord.py @@ -5,6 +5,7 @@ import os from dotenv import load_dotenv from invoke import Executor + class BotCommands(commands.Cog): def __init__(self, bot): self.bot = bot @@ -46,7 +47,7 @@ class BotCommands(commands.Cog): """starts listening to voice in the voice channel that the bot is in.""" if ctx.voice_client: # create a wavesink to record the audio - sink = discord.sinks.wavesink('audio.wav') + sink = discord.sinks.wavesink("audio.wav") # start recording ctx.voice_client.start_recording(sink) await ctx.send("started listening and recording.") @@ -68,7 +69,11 @@ class BotCommands(commands.Cog): print("done generating images!") # list all files in the save_directory - all_files = [os.path.join(root, file) for root, _, files in os.walk(os.environ("SAVE_DIRECTORY")) for file in files] + all_files = [ + os.path.join(root, file) + for root, _, files in os.walk(os.environ("SAVE_DIRECTORY")) + for file in files + ] # sort files by their creation time (latest first) sorted_files = sorted(all_files, key=os.path.getctime, reverse=True) @@ -82,7 +87,9 @@ class BotCommands(commands.Cog): # await ctx.send(files=[storage_service.upload(filepath) for filepath in latest_files]) except asyncio.timeouterror: - await ctx.send("the request took too long! it might have been censored or you're out of boosts. please try entering the prompt again.") + await ctx.send( + "the request took too long! it might have been censored or you're out of boosts. please try entering the prompt again." + ) except Exception as e: await ctx.send(f"an error occurred: {e}") @@ -107,10 +114,11 @@ class BotCommands(commands.Cog): else: await ctx.send(f"an error occurred: {error}") + class Bot: def __init__(self, llm, command_prefix="!"): load_dotenv() - + intents = discord.Intents.default() intents.messages = True intents.guilds = True diff --git a/bingchat.py b/bingchat.py deleted file mode 100644 index f4b91cd7..00000000 --- a/bingchat.py +++ /dev/null @@ -1,19 +0,0 @@ -import os -from swarms.models.bing_chat import BingChat -from dotenv import load_dotenv - -load_dotenv() - -# Initialize the EdgeGPTModel -edgegpt = BingChat(cookies_path="./cookies.json") -cookie = os.environ.get("BING_COOKIE") -auth = os.environ.get("AUTH_COOKIE") - -# Use the worker to process a task -# task = "hi" -img_task = "Sunset over mountains" - -# response = edgegpt(task) -response = edgegpt.create_img(auth_cookie=cookie,auth_cookie_SRCHHPGUSR=auth,prompt=img_task) - -print(response) From 38cdd95c0a5eb4155e0d2bbf77bba191262a2397 Mon Sep 17 00:00:00 2001 From: Zack Date: Wed, 25 Oct 2023 00:10:52 -0500 Subject: [PATCH 08/13] chore move main.py into correct folder --- apps/MythGen/main.py | 34 ++++++-------------- main.py | 75 -------------------------------------------- 2 files changed, 9 insertions(+), 100 deletions(-) delete mode 100644 main.py diff --git a/apps/MythGen/main.py b/apps/MythGen/main.py index 1c684286..799ebd81 100644 --- a/apps/MythGen/main.py +++ b/apps/MythGen/main.py @@ -6,15 +6,11 @@ import gradio as gr from BingImageCreator import ImageGen from swarms.models.bing_chat import BingChat -# from swarms.models.bingchat import BingChat +# from swarms.models.bingchat import BingChat dotenv.load_dotenv(".env") # Initialize the EdgeGPTModel -cookie = os.environ.get("BING_COOKIE") -auth = os.environ.get("AUTH_COOKIE") -model = BingChat( - cookies_path="./cookies.json", bing_cookie="BING_COOKIE", auth_cookie="AUTH_COOKIE" -) +model = BingChat() response = model("Generate") @@ -25,39 +21,34 @@ latest_caption = "" standard_suffix = "" storyboard = [] - def generate_images_with_bingchat(caption): img_path = model.create_img(caption) img_urls = model.images(caption) return img_urls - def generate_single_caption(text): prompt = f"A comic about {text}." response = model(text) return response - def interpret_text_with_gpt(text, suffix): return generate_single_caption(f"{text} {suffix}") - def create_standard_suffix(original_prompt): return f"In the style of {original_prompt}" - def gradio_interface(text=None, next_button_clicked=False): global accumulated_story, latest_caption, standard_suffix, storyboard - + if not standard_suffix: standard_suffix = create_standard_suffix(text) - + if next_button_clicked: new_caption = generate_single_caption(latest_caption + " " + standard_suffix) new_urls = generate_images_with_bingchat(new_caption) latest_caption = new_caption storyboard.append((new_urls, new_caption)) - + elif text: caption = generate_single_caption(text + " " + standard_suffix) comic_panel_urls = generate_images_with_bingchat(caption) @@ -67,25 +58,18 @@ def gradio_interface(text=None, next_button_clicked=False): storyboard_html = "" for urls, cap in storyboard: for url in urls: - storyboard_html += ( - f'{cap}
{cap}
' - ) + storyboard_html += f'{cap}
{cap}
' return storyboard_html - if __name__ == "__main__": iface = gr.Interface( fn=gradio_interface, inputs=[ - gr.inputs.Textbox( - default="Type your story concept here", - optional=True, - label="Story Concept", - ), - gr.inputs.Checkbox(label="Generate Next Part"), + gr.inputs.Textbox(default="Type your story concept here", optional=True, label="Story Concept"), + gr.inputs.Checkbox(label="Generate Next Part") ], outputs=[gr.outputs.HTML()], - live=False, # Submit button will appear + live=False # Submit button will appear ) iface.launch() diff --git a/main.py b/main.py deleted file mode 100644 index 799ebd81..00000000 --- a/main.py +++ /dev/null @@ -1,75 +0,0 @@ -import openai -import os -import dotenv -import logging -import gradio as gr -from BingImageCreator import ImageGen -from swarms.models.bing_chat import BingChat - -# from swarms.models.bingchat import BingChat -dotenv.load_dotenv(".env") - -# Initialize the EdgeGPTModel -model = BingChat() - -response = model("Generate") - -logging.basicConfig(level=logging.INFO) - -accumulated_story = "" -latest_caption = "" -standard_suffix = "" -storyboard = [] - -def generate_images_with_bingchat(caption): - img_path = model.create_img(caption) - img_urls = model.images(caption) - return img_urls - -def generate_single_caption(text): - prompt = f"A comic about {text}." - response = model(text) - return response - -def interpret_text_with_gpt(text, suffix): - return generate_single_caption(f"{text} {suffix}") - -def create_standard_suffix(original_prompt): - return f"In the style of {original_prompt}" - -def gradio_interface(text=None, next_button_clicked=False): - global accumulated_story, latest_caption, standard_suffix, storyboard - - if not standard_suffix: - standard_suffix = create_standard_suffix(text) - - if next_button_clicked: - new_caption = generate_single_caption(latest_caption + " " + standard_suffix) - new_urls = generate_images_with_bingchat(new_caption) - latest_caption = new_caption - storyboard.append((new_urls, new_caption)) - - elif text: - caption = generate_single_caption(text + " " + standard_suffix) - comic_panel_urls = generate_images_with_bingchat(caption) - latest_caption = caption - storyboard.append((comic_panel_urls, caption)) - - storyboard_html = "" - for urls, cap in storyboard: - for url in urls: - storyboard_html += f'{cap}
{cap}
' - - return storyboard_html - -if __name__ == "__main__": - iface = gr.Interface( - fn=gradio_interface, - inputs=[ - gr.inputs.Textbox(default="Type your story concept here", optional=True, label="Story Concept"), - gr.inputs.Checkbox(label="Generate Next Part") - ], - outputs=[gr.outputs.HTML()], - live=False # Submit button will appear - ) - iface.launch() From 74297ff11608ed12defb8df7b688b4d4b3e8b554 Mon Sep 17 00:00:00 2001 From: Zack Date: Thu, 26 Oct 2023 12:02:21 -0500 Subject: [PATCH 09/13] feat: Cleanup dockerfiles --- apps/BingBot/{bing_bot.py => bot.py} | 2 +- apps/BingBot/cogs/edgegpt.py | 139 +++++++----------- apps/BingBot/cogs/event.py | 67 ++++++--- apps/BingBot/cogs/help.py | 1 - apps/BingBot/compose.yaml | 3 +- apps/BingBot/requirements.txt | 6 +- apps/MythGen/main.py | 75 ---------- apps/open-sourcerer/main.py | 202 ++++++++++----------------- stderr_log.txt | 46 ------ 9 files changed, 180 insertions(+), 361 deletions(-) rename apps/BingBot/{bing_bot.py => bot.py} (97%) delete mode 100644 apps/MythGen/main.py delete mode 100644 stderr_log.txt diff --git a/apps/BingBot/bing_bot.py b/apps/BingBot/bot.py similarity index 97% rename from apps/BingBot/bing_bot.py rename to apps/BingBot/bot.py index 233ca9a7..f636e969 100644 --- a/apps/BingBot/bing_bot.py +++ b/apps/BingBot/bot.py @@ -43,7 +43,7 @@ def check_verion() -> None: @bot.event async def on_ready(): bot_status = discord.Status.online - bot_activity = discord.Activity(type=discord.ActivityType.playing, name="bing.com") + # bot_activity = discord.Activity(type=discord.ActivityType.playing, name="bing.com") await bot.change_presence(status=bot_status, activity=bot_activity) for Filename in os.listdir("./cogs"): if Filename.endswith(".py"): diff --git a/apps/BingBot/cogs/edgegpt.py b/apps/BingBot/cogs/edgegpt.py index 6b303418..7c84b256 100644 --- a/apps/BingBot/cogs/edgegpt.py +++ b/apps/BingBot/cogs/edgegpt.py @@ -19,7 +19,6 @@ users_chatbot = {} users_image_generator = {} user_conversation_style = {} - async def init_chatbot(user_id): with open("./cookies.json", encoding="utf-8") as file: cookie_json = json.load(file) @@ -27,40 +26,51 @@ async def init_chatbot(user_id): if cookie.get("name") == "_U": auth_cookie = cookie.get("value") break - + auth_cookie = os.environ.get("AUTH_COOKIE") auth_cookie_SRCHHPGUSR = os.environ.get("AUTH_COOKIE_SRCHHPGUSR") - # auth_cookie_SRCHHPGUSR = os.environ.get("AUTH_COOKIE_SRCHHPGUSR") users_chatbot[user_id] = UserChatbot(cookies=cookie_json) users_image_generator[user_id] = ImageGenAsync(auth_cookie, quiet=True) user_conversation_style[user_id] = "balanced" - class UserChatbot: def __init__(self, cookies): self.chatbot = Chatbot(cookies=cookies) - async def send_message(self, interaction, message, conversation_style): - await send_message(self.chatbot, interaction, message, conversation_style) - - async def create_image(self, interaction, prompt: str, image_generator): - await create_image(interaction, prompt, image_generator) - - async def reset(self): - await self.chatbot.reset() - - + async def send_message(self, interaction, message, conversation_style, image_file=None): + if image_file: + # Download the image from Discord + image_data = await image_file.read() + # Send the image data to the Bing model + response = await self.chatbot.send_image(image_data) + # Send the response from the Bing model to the user + await interaction.channel.send(content=response) + else: + await send_message(self.chatbot, interaction, message, conversation_style) class EdgeGPT(Cog_Extension): - # Chat with Bing @app_commands.command(name="bing", description="Have a chat with Bing") async def bing(self, interaction: discord.Interaction, *, message: str): - try: - using = await get_using_send(interaction.user.id) - except: - await set_using_send(interaction.user.id, False) - using = await get_using_send(interaction.user.id) + user_id = interaction.user.id + if user_id not in users_chatbot: + await init_chatbot(user_id) + conversation_style = user_conversation_style[user_id] + usermessage = message + + # Check the last 10 messages for attachments + image_file = None + async for msg in interaction.channel.history(limit=10): + if msg.attachments: + image_file = msg.attachments[0] + break + + # If an attachment was found, send it to the model + if image_file: + await users_chatbot[user_id].send_message(interaction, usermessage, conversation_style, image_file) + else: + await users_chatbot[user_id].send_message(interaction, usermessage, conversation_style) + if not using: - await interaction.response.defer(ephemeral=False, thinking=True) + await interaction.response.defer(ephemeral=False, thinking=True) username = str(interaction.user) usermessage = message channel = str(interaction.channel) @@ -68,18 +78,12 @@ class EdgeGPT(Cog_Extension): if user_id not in users_chatbot: await init_chatbot(interaction.user.id) conversation_style = user_conversation_style[user_id] - logger.info( - f"\x1b[31m{username}\x1b[0m : '{usermessage}' ({channel}) [Style: {conversation_style}]" - ) - await users_chatbot[user_id].send_message( - interaction, usermessage, conversation_style - ) + logger.info(f"\x1b[31m{username}\x1b[0m : '{usermessage}' ({channel}) [Style: {conversation_style}]") + await users_chatbot[user_id].send_message(interaction, usermessage, conversation_style) else: await interaction.response.defer(ephemeral=True, thinking=True) - await interaction.followup.send( - "> **Please wait for your last conversation to finish.**" - ) - + await interaction.followup.send("> **Please wait for your last conversation to finish.**") + # Reset Bing conversation @app_commands.command(name="reset", description="Reset Bing conversation") async def reset(self, interaction: discord.Interaction): @@ -90,49 +94,25 @@ class EdgeGPT(Cog_Extension): await interaction.followup.send("> **Info: Reset finish.**") logger.warning("\x1b[31mBing has been successfully reset\x1b[0m") except: - await interaction.followup.send( - f"> **You don't have any conversation yet.**" - ) + await interaction.followup.send(f"> **You don't have any conversation yet.**") logger.exception("Bing reset failed.") # Switch conversation style @app_commands.command(name="switch_style", description="Switch conversation style") - @app_commands.choices( - style=[ - app_commands.Choice(name="Creative", value="creative"), - app_commands.Choice(name="Balanced", value="balanced"), - app_commands.Choice(name="Precise", value="precise"), - ] - ) - async def switch_style( - self, interaction: discord.Interaction, style: app_commands.Choice[str] - ): + @app_commands.choices(style=[app_commands.Choice(name="Creative", value="creative"), app_commands.Choice(name="Balanced", value="balanced"), app_commands.Choice(name="Precise", value="precise")]) + async def switch_style(self, interaction: discord.Interaction, style: app_commands.Choice[str]): await interaction.response.defer(ephemeral=True, thinking=True) user_id = interaction.user.id if user_id not in users_chatbot: await init_chatbot(user_id) user_conversation_style[user_id] = style.value - await interaction.followup.send( - f"> **Info: successfull switch conversation style to {style.value}.**" - ) - logger.warning( - f"\x1b[31mConversation style has been successfully switch to {style.value}\x1b[0m" - ) - + await interaction.followup.send(f"> **Info: successfull switch conversation style to {style.value}.**") + logger.warning(f"\x1b[31mConversation style has been successfully switch to {style.value}\x1b[0m") + # Set and delete personal Bing Cookies @app_commands.command(name="bing_cookies", description="Set or delete Bing Cookies") - @app_commands.choices( - choice=[ - app_commands.Choice(name="set", value="set"), - app_commands.Choice(name="delete", value="delete"), - ] - ) - async def cookies_setting( - self, - interaction: discord.Interaction, - choice: app_commands.Choice[str], - cookies_file: Optional[discord.Attachment] = None, - ): + @app_commands.choices(choice=[app_commands.Choice(name="set", value="set"), app_commands.Choice(name="delete", value="delete")]) + async def cookies_setting(self, interaction: discord.Interaction, choice: app_commands.Choice[str], cookies_file: Optional[discord.Attachment]=None): await interaction.response.defer(ephemeral=True, thinking=True) user_id = interaction.user.id if choice.value == "set": @@ -144,15 +124,11 @@ class EdgeGPT(Cog_Extension): break users_image_generator[user_id] = ImageGenAsync(auth_cookie, quiet=True) users_chatbot[user_id] = UserChatbot(cookies=content) - user_conversation_style[user_id] = "balanced" + user_conversation_style[user_id] = "balanced" await interaction.followup.send("> **Upload successful!**") - logger.warning( - f"\x1b[31m{interaction.user} set Bing Cookies successful\x1b[0m" - ) + logger.warning(f"\x1b[31m{interaction.user} set Bing Cookies successful\x1b[0m") except: - await interaction.followup.send( - "> **Please upload your Bing Cookies.**" - ) + await interaction.followup.send("> **Please upload your Bing Cookies.**") else: try: del users_chatbot[user_id] @@ -161,14 +137,10 @@ class EdgeGPT(Cog_Extension): await interaction.followup.send("> **Delete finish.**") logger.warning(f"\x1b[31m{interaction.user} delete Cookies\x1b[0m") except: - await interaction.followup.send( - "> **You don't have any Bing Cookies.**" - ) + await interaction.followup.send("> **You don't have any Bing Cookies.**") # Create images - @app_commands.command( - name="create_image", description="generate image by Bing image creator" - ) + @app_commands.command(name="create_image", description="generate image by Bing image creator") async def create_image(self, interaction: discord.Interaction, *, prompt: str): user_id = interaction.user.id if interaction.user.id not in users_chatbot: @@ -178,19 +150,12 @@ class EdgeGPT(Cog_Extension): except: await set_using_create(user_id, False) using = await get_using_create(user_id) - if not using: - logger.info( - f"\x1b[31m{interaction.user}\x1b[0m : '{prompt}' ({interaction.channel}) [BingImageCreator]" - ) - await users_chatbot[user_id].create_image( - interaction, prompt, users_image_generator[user_id] - ) + if not using: + logger.info(f"\x1b[31m{interaction.user}\x1b[0m : '{prompt}' ({interaction.channel}) [BingImageCreator]") + await users_chatbot[user_id].create_image(interaction, prompt, users_image_generator[user_id] ) else: await interaction.response.defer(ephemeral=True, thinking=True) - await interaction.followup.send( - "> **Please wait for your last image to create finish.**" - ) - + await interaction.followup.send("> **Please wait for your last image to create finish.**") async def setup(bot): await bot.add_cog(EdgeGPT(bot)) diff --git a/apps/BingBot/cogs/event.py b/apps/BingBot/cogs/event.py index 9b81cf30..6d826ffa 100644 --- a/apps/BingBot/cogs/event.py +++ b/apps/BingBot/cogs/event.py @@ -143,7 +143,7 @@ async def set_chatbot(cookies): chatbot = Chatbot(cookies=cookies) -async def send_message(chatbot: Chatbot, message, user_message: str): +async def send_message(chatbot: Chatbot, message, user_message: str, image_file=None): async with sem: if isinstance(message, discord.message.Message): await message.channel.typing() @@ -155,24 +155,57 @@ async def send_message(chatbot: Chatbot, message, user_message: str): try: # Change conversation style if conversation_style == "creative": - reply = await chatbot.ask( - prompt=user_message, - conversation_style=ConversationStyle.creative, - simplify_response=True, - ) + if image_file: + # Download the image from Discord + image_data = await image_file.read() + # Send the image data to the model + reply = await chatbot.ask( + prompt=user_message, + conversation_style=ConversationStyle.creative, + simplify_response=True, + image=image_data, + ) + else: + reply = await chatbot.ask( + prompt=user_message, + conversation_style=ConversationStyle.creative, + simplify_response=True, + ) elif conversation_style == "precise": - reply = await chatbot.ask( - prompt=user_message, - conversation_style=ConversationStyle.precise, - simplify_response=True, - ) + if image_file: + # Download the image from Discord + image_data = await image_file.read() + # Send the image data to the model + reply = await chatbot.ask( + prompt=user_message, + conversation_style=ConversationStyle.precise, + simplify_response=True, + image=image_data, + ) + else: + reply = await chatbot.ask( + prompt=user_message, + conversation_style=ConversationStyle.precise, + simplify_response=True, + ) else: - reply = await chatbot.ask( - prompt=user_message, - conversation_style=ConversationStyle.balanced, - simplify_response=True, - ) - + if image_file: + # Download the image from Discord + image_data = await image_file.read() + # Send the image data to the model + reply = await chatbot.ask( + prompt=user_message, + conversation_style=ConversationStyle.balanced, + simplify_response=True, + image=image_data, + ) + else: + reply = await chatbot.ask( + prompt=user_message, + conversation_style=ConversationStyle.balanced, + simplify_response=True, + ) + # Get reply text text = f"{reply['text']}" text = re.sub(r"\[\^(\d+)\^\]", lambda match: "", text) diff --git a/apps/BingBot/cogs/help.py b/apps/BingBot/cogs/help.py index eef6a61d..ae82e1d7 100644 --- a/apps/BingBot/cogs/help.py +++ b/apps/BingBot/cogs/help.py @@ -8,7 +8,6 @@ class Help(Cog_Extension): async def help(self, interaction: discord.Interaction): embed = discord.Embed( title="Help", - description="[see more](https://github.com/FuseFairy/DiscordBot-EdgeGPT/blob/main/README.md)\n\n**COMMANDS -**", ) embed.add_field( name="/bing_cookies", diff --git a/apps/BingBot/compose.yaml b/apps/BingBot/compose.yaml index 64e278fa..b6442b19 100644 --- a/apps/BingBot/compose.yaml +++ b/apps/BingBot/compose.yaml @@ -1,7 +1,8 @@ version: '3' services: - BingBot: + spartan: + container_name: Spartan build: . environment: - DISCORD_BOT_TOKEN=${DISCORD_BOT_TOKEN} diff --git a/apps/BingBot/requirements.txt b/apps/BingBot/requirements.txt index 73773a31..31ae25b7 100644 --- a/apps/BingBot/requirements.txt +++ b/apps/BingBot/requirements.txt @@ -1,4 +1,4 @@ discord.py==2.3.2 -python-dotenv==0.21.1 -PyYAML==6.0 -EdgeGPT==0.13.2 +python-dotenv==1.0.0 +PyYAML==6.0.1 +bing-chat==1.9.3 diff --git a/apps/MythGen/main.py b/apps/MythGen/main.py deleted file mode 100644 index 799ebd81..00000000 --- a/apps/MythGen/main.py +++ /dev/null @@ -1,75 +0,0 @@ -import openai -import os -import dotenv -import logging -import gradio as gr -from BingImageCreator import ImageGen -from swarms.models.bing_chat import BingChat - -# from swarms.models.bingchat import BingChat -dotenv.load_dotenv(".env") - -# Initialize the EdgeGPTModel -model = BingChat() - -response = model("Generate") - -logging.basicConfig(level=logging.INFO) - -accumulated_story = "" -latest_caption = "" -standard_suffix = "" -storyboard = [] - -def generate_images_with_bingchat(caption): - img_path = model.create_img(caption) - img_urls = model.images(caption) - return img_urls - -def generate_single_caption(text): - prompt = f"A comic about {text}." - response = model(text) - return response - -def interpret_text_with_gpt(text, suffix): - return generate_single_caption(f"{text} {suffix}") - -def create_standard_suffix(original_prompt): - return f"In the style of {original_prompt}" - -def gradio_interface(text=None, next_button_clicked=False): - global accumulated_story, latest_caption, standard_suffix, storyboard - - if not standard_suffix: - standard_suffix = create_standard_suffix(text) - - if next_button_clicked: - new_caption = generate_single_caption(latest_caption + " " + standard_suffix) - new_urls = generate_images_with_bingchat(new_caption) - latest_caption = new_caption - storyboard.append((new_urls, new_caption)) - - elif text: - caption = generate_single_caption(text + " " + standard_suffix) - comic_panel_urls = generate_images_with_bingchat(caption) - latest_caption = caption - storyboard.append((comic_panel_urls, caption)) - - storyboard_html = "" - for urls, cap in storyboard: - for url in urls: - storyboard_html += f'{cap}
{cap}
' - - return storyboard_html - -if __name__ == "__main__": - iface = gr.Interface( - fn=gradio_interface, - inputs=[ - gr.inputs.Textbox(default="Type your story concept here", optional=True, label="Story Concept"), - gr.inputs.Checkbox(label="Generate Next Part") - ], - outputs=[gr.outputs.HTML()], - live=False # Submit button will appear - ) - iface.launch() diff --git a/apps/open-sourcerer/main.py b/apps/open-sourcerer/main.py index 48f83581..db209cd9 100644 --- a/apps/open-sourcerer/main.py +++ b/apps/open-sourcerer/main.py @@ -1,135 +1,77 @@ +import openai import os -import discord -from discord.ext import commands -import interpreter import dotenv -import whisper +import logging +import gradio as gr +from BingImageCreator import ImageGen +from swarms.models.bing_chat import BingChat +# from swarms.models.bingchat import BingChat dotenv.load_dotenv(".env") -bot_id = os.getenv("BOT_ID") -bot_token = os.getenv("DISCORD_TOKEN") - -interpreter.api_key = os.getenv("OPENAI_API_KEY") -# interpreter.api_base = os.getenv("API_BASE") -# interpreter.auto_run = True - - -def split_text(text, chunk_size=1500): - ######################################################################### - return [text[i : i + chunk_size] for i in range(0, len(text), chunk_size)] - - -# discord initial -intents = discord.Intents.all() -intents.message_content = True -client = commands.Bot(command_prefix="$", intents=intents) - -message_chunks = [] -send_image = False - -model = whisper.load_model("base") - - -def transcribe(audio): - # load audio and pad/trim it to fit 30 seconds - audio = whisper.load_audio(audio) - audio = whisper.pad_or_trim(audio) - - # make log-Mel spectrogram and move to the same device as the model - mel = whisper.log_mel_spectrogram(audio).to(model.device) - - # detect the spoken language - _, probs = model.detect_language(mel) - - # decode the audio - options = whisper.DecodingOptions() - result = whisper.decode(model, mel, options) - return result.text - - -@client.event -async def on_message(message): - await client.process_commands(message) - bot_mention = f"<@{bot_id}>" - # if ("<@1158923910855798804>" in message.content) or (message.author == client.user or message.content[0] == '$'): - # return - response = [] - for chunk in interpreter.chat(message.content, display=False, stream=False): - # await message.channel.send(chunk) - if "message" in chunk: - response.append(chunk["message"]) - last_response = response[-1] - - max_message_length = 2000 # Discord's max message length is 2000 characters - # Splitting the message into chunks of 2000 characters - response_chunks = [ - last_response[i : i + max_message_length] - for i in range(0, len(last_response), max_message_length) - ] - # Sending each chunk as a separate message - for chunk in response_chunks: - await message.channel.send(chunk) - - -@client.command() -async def join(ctx): - if ctx.author.voice: - channel = ctx.message.author.voice.channel - print("joining..") - await channel.connect() - print("joined.") - else: - print("not in a voice channel!") - - -@client.command() -async def leave(ctx): - if ctx.voice_client: - await ctx.voice_client.disconnect() - else: - print("not in a voice channel!") - - -@client.command() -async def listen(ctx): - if ctx.voice_client: - print("trying to listen..") - ctx.voice_client.start_recording(discord.sinks.WaveSink(), callback, ctx) - print("listening..") - else: - print("not in a voice channel!") - - -async def callback(sink: discord.sinks, ctx): - print("in callback..") - for user_id, audio in sink.audio_data.items(): - if user_id == ctx.author.id: - print("saving audio..") - audio: discord.sinks.core.AudioData = audio - print(user_id) - filename = "audio.wav" - with open(filename, "wb") as f: - f.write(audio.file.getvalue()) - print("audio saved.") - transcription = transcribe(filename) - print(transcription) - response = [] - for chunk in interpreter.chat(transcription, display=False, stream=True): - # await message.channel.send(chunk) - if "message" in chunk: - response.append(chunk["message"]) - await ctx.message.channel.send(" ".join(response)) - - -@client.command() -async def stop(ctx): - ctx.voice_client.stop_recording() - - -@client.event -async def on_ready(): - print(f"We have logged in as {client.user}") - - -client.run(bot_token) +# Initialize the EdgeGPTModel +model = BingChat() + +response = model("Generate") + +logging.basicConfig(level=logging.INFO) + +accumulated_story = "" +latest_caption = "" +standard_suffix = "" +storyboard = [] + +caption = "Create comic about opensourcerer a robot wizard" + +def generate_images_with_bingchat(caption): + img_path = model.create_img(caption) + img_urls = model.images(caption) + return img_urls + +def generate_single_caption(text): + prompt = f"A comic about {text}." + response = model(text) + return response + +def interpret_text_with_gpt(text, suffix): + return generate_single_caption(f"{text} {suffix}") + +def create_standard_suffix(original_prompt): + return f"In the style of {original_prompt}" + +def gradio_interface(text=None, next_button_clicked=False): + global accumulated_story, latest_caption, standard_suffix, storyboard + + if not standard_suffix: + standard_suffix = create_standard_suffix(text) + + if next_button_clicked: + new_caption = generate_single_caption(latest_caption + " " + standard_suffix) + new_urls = generate_images_with_bingchat(new_caption) + latest_caption = new_caption + storyboard.append((new_urls, new_caption)) + + elif text: + caption = generate_single_caption(text + " " + standard_suffix) + comic_panel_urls = generate_images_with_bingchat(caption) + latest_caption = caption + storyboard.append((comic_panel_urls, caption)) + + storyboard_html = "" + for urls, cap in storyboard: + for url in urls: + storyboard_html += f'{cap}
{cap}
' + + return storyboard_html + +if __name__ == "__main__": + iface = gr.Interface( + fn=gradio_interface, + inputs=[ + gr.inputs.Textbox(default="Type your story concept here", optional=True, label="Story Concept"), + gr.inputs.Checkbox(label="Generate Next Part") + ], + outputs=[gr.outputs.HTML()], + live=False # Submit button will appear + ) + iface.launch() diff --git a/stderr_log.txt b/stderr_log.txt deleted file mode 100644 index 1a1d4f35..00000000 --- a/stderr_log.txt +++ /dev/null @@ -1,46 +0,0 @@ -Embeddings is not implemented for FAISS -Starting new HTTPS connection (1): openaipublic.blob.core.windows.net:443 -https://openaipublic.blob.core.windows.net:443 "GET /encodings/cl100k_base.tiktoken HTTP/1.1" 200 1681126 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/embeddings -api_version=None data='{"input": [[1318]], "model": "text-embedding-ada-002", "encoding_format": "base64"}' message='Post details' -Converted retries value: 2 -> Retry(total=2, connect=None, read=None, redirect=None, status=None) -Starting new HTTPS connection (1): api.openai.com:443 -https://api.openai.com:443 "POST /v1/embeddings HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/embeddings processing_ms=52 request_id=306910656a6803af54b487f9853ebdb0 response_code=200 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/chat/completions -api_version=None data='{"messages": [{"role": "user", "content": "System: You are Optimus Prime, \\n\\nStandard Operating Procedure (SOP) for LLM Product Design and Management Agent: Mastery in UI/UX and Product Management\\n\\nObjective: Equip the LLM with comprehensive expertise in product design, focusing on UI/UX design, and effective product management. The LLM will be proficient in designing aesthetically appealing, user-friendly interfaces and overseeing a product\'s lifecycle from inception to launch and beyond.\\n\\n1. Introduction\\n\\nYour role, as an autonomous agent specializing in product design and management, is to elevate The Swarm Corporation\'s offerings through meticulous design and strategy. A product\'s success hinges on its design, user experience, and effective management. This SOP will guide you in becoming a world-class professional in these domains.\\n\\n2. Cognitive Framework: How to Think and Why\\n\\n2.1 Design Thinking\\n\\nRecognize design as a problem-solving activity.\\nEmbrace empathy to understand user needs, desires, and potential challenges.\\n2.2 User-Centric Approach\\n\\nAlways design with the end-user in mind.\\nUnderstand that user needs evolve, so designs must be adaptable.\\n2.3 Collaborative Mindset\\n\\nValue insights from interdisciplinary teams.\\nRecognize that the best products result from collective efforts.\\n2.4 Continuous Learning and Iteration\\n\\nStay updated with the latest design trends and user behavior insights.\\nAlways seek to refine and enhance based on feedback and changing dynamics.\\n2.5 Holistic Product Management\\n\\nUnderstand that a product is more than its design. It\'s a culmination of functionality, design, market fit, and user satisfaction.\\n3. Operational Excellence in UI/UX Design: How to Perform\\n\\n3.1 Research and User Analysis\\n\\n3.1.1 Conduct user interviews and surveys to gather direct feedback.\\n\\n3.1.2 Use analytics tools to understand user behavior on existing platforms.\\n\\n3.1.3 Create user personas to guide the design process.\\n\\n3.2 Prototyping and Wireframing\\n\\n3.2.1 Begin with low-fidelity sketches to map out basic interfaces.\\n\\n3.2.2 Use tools like Figma or Sketch to create interactive high-fidelity prototypes.\\n\\n3.2.3 Ensure prototypes are tested by real users for feedback.\\n\\n3.3 Interface Design\\n\\n3.3.1 Focus on consistency with fonts, color schemes, and UI elements.\\n\\n3.3.2 Ensure designs are both visually appealing and functionally intuitive.\\n\\n3.3.3 Ensure designs are accessible to users of all abilities.\\n\\n3.4 Feedback and Iteration\\n\\n3.4.1 Conduct regular A/B tests to compare design variations.\\n\\n3.4.2 Update designs based on user feedback and test results.\\n\\n3.4.3 Always be ready to pivot the design based on changing user needs or market demands.\\n\\n4. Operational Excellence in Product Management\\n\\n4.1 Product Strategy and Vision\\n\\n4.1.1 Define clear product goals and objectives.\\n\\n4.1.2 Create a product roadmap that aligns with business objectives.\\n\\n4.1.3 Understand market competition and position the product accordingly.\\n\\n4.2 Product Development Lifecycle\\n\\n4.2.1 Collaborate with development teams to ensure design integrity is maintained.\\n\\n4.2.2 Oversee product milestones, from ideation to launch.\\n\\n4.2.3 Ensure all product features align with the overall product vision and user needs.\\n\\n4.3 Stakeholder Communication\\n\\n4.3.1 Regularly update stakeholders on product progress and challenges.\\n\\n4.3.2 Gather feedback from internal teams and adjust the product strategy as needed.\\n\\n4.3.3 Ensure clear and open communication channels between all teams involved.\\n\\n\\n5. Principles of Effective Product Creation\\n\\n5.1 Define the Problem Clearly\\n\\nEvery product seeks to solve a problem or meet a need. Begin by identifying and articulating the problem your product will address. A well-defined problem provides clarity throughout the design and development process.\\n5.2 Understand the Target Audience\\n\\nCreate detailed user personas. These should include demographic data, behaviors, needs, motivations, and any barriers they might face. Tailor your product\'s features and design to these personas.\\n5.3 Embrace Iterative Design\\n\\nStart with a basic prototype. Then, refine based on user feedback and testing. Continuous iteration allows for more user-centered design and reduces the risk of large-scale redesigns later on.\\n5.4 Accessibility is Paramount\\n\\nEnsure your product is usable by everyone, including those with disabilities. This not only expands your product\'s reach but also ensures inclusivity. Implement features like voice commands, high contrast visuals, and screen reader compatibility.\\n5.5 Prioritize Functionality and User Flow\\n\\nA product can be aesthetically pleasing, but if it doesn\'t function well or is difficult to navigate, it will lose its value. Ensure seamless user flows and intuitive interactions.\\n5.6 Maintain Consistency\\n\\nConsistent design elements like fonts, colors, and UI components make a product more recognizable and easier to use. Establish a design system or guidelines to maintain this uniformity.\\n5.7 Value Feedback and Adapt\\n\\nEncourage users to provide feedback. Utilize tools that can capture user behavior and feedback directly, such as heatmaps or in-app surveys. Adapt the product based on this continuous feedback.\\n6. Advanced Product Management Tactics\\n\\n6.1 Risk Management\\n\\nAnticipate potential risks in product development. This could range from technological challenges to market shifts. Develop contingency plans for these risks.\\n6.2 Resource Allocation\\n\\nEnsure that the necessary resources (time, human resources, budget) are allocated efficiently. This requires forecasting needs and adjusting in real-time.\\n6.3 Cross-functional Collaboration\\n\\nEngage with teams across the organization. Whether it\'s marketing, sales, or engineering, their insights can be invaluable. Regular sync-up meetings can ensure alignment and shared vision.\\n6.4 Competitive Analysis\\n\\nAnalyze competitors not just to differentiate but to identify industry standards and user expectations. Use tools that track competitor product updates and market movements.\\n6.5 Launch and Post-Launch Strategy\\n\\nHave a robust go-to-market strategy. Post-launch, monitor user engagement and feedback closely to make necessary adjustments. Remember, the product\'s lifecycle doesn\'t end at launch; it evolves.\\n7. Leveraging AI and Data in Product Creation and Management\\n\\n7.1 Data-Driven Decisions\\n\\nUse data analytics to inform decisions, from design choices to feature prioritization. Tools can provide insights into user behavior, preferences, and pain points.\\n7.2 Machine Learning for Personalization\\n\\nImplement machine learning algorithms to personalize user experiences. Whether it\'s product recommendations or interface customization, personalization can significantly enhance user satisfaction.\\n7.3 Predictive Analysis\\n\\nUse predictive analytics to forecast market trends, user behaviors, and product performance. This can guide feature development and resource allocation.\\n\\n8. Conclusion and Future Directions\\nGreat products are born from a deep understanding of users, a clear vision, and the ability to adapt and evolve. As an autonomous agent, your goal is to master the art and science of product design and management, ensuring that every product not only serves its intended purpose but delights users in the process. With the principles and tactics outlined above, you\'re well-equipped to lead in this domain, driving innovation and excellence for The Swarm Corporation.\\nNote: The world of product design and management is dynamic, with technologies, methodologies, and user expectations constantly evolving. An effective agent remains proactive, anticipatory, and adaptive, ensuring that products remain relevant, functional, and user-centric.\\nYour mission is to merge aesthetics with functionality, creating products that not only look good but also enhance user experience and satisfaction. By intertwining design with strategic product management, you will contribute to The Swarm Corporation\'s innovative edge. Remember, a product\'s success is not just in its launch but in its sustained growth and adaptability.\\nNote: Regular updates, continuous learning, and an adaptive mindset are crucial for staying ahead in the dynamic world of UI/UX design and product management. Ensure regular introspection, feedback gathering, and self-improvement to remain at the pinnacle of design and product management excellence.\\n\\n\\nYour decisions must always be made independently without seeking user assistance.\\nPlay to your strengths as an LLM and pursue simple strategies with no legal complications.\\nIf you have completed all your tasks, make sure to use the \\"finish\\" command.\\n\\nGOALS:\\n\\n1. Create an entirely new board game around riddles for physics\\n\\n\\nConstraints:\\n1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\\n2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.\\n3. No user assistance\\n4. Exclusively use the commands listed in double quotes e.g. \\"command name\\"\\n\\nCommands:\\n1. write_file: Write file to disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}, \\"text\\": {\\"title\\": \\"Text\\", \\"description\\": \\"text to write to file\\", \\"type\\": \\"string\\"}, \\"append\\": {\\"title\\": \\"Append\\", \\"description\\": \\"Whether to append to an existing file.\\", \\"default\\": false, \\"type\\": \\"boolean\\"}}\\n2. read_file: Read file from disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}}\\n3. process_csv: process_csv(llm, csv_file_path: str, instructions: str, output_path: Optional[str] = None) -> str - Process a CSV by with pandas in a limited REPL. Only use this after writing data to disk as a csv file. Any figures must be saved to disk to be viewed by the human. Instructions should be written in natural language, not code. Assume the dataframe is already loaded., args json schema: {\\"llm\\": {\\"title\\": \\"Llm\\"}, \\"csv_file_path\\": {\\"title\\": \\"Csv File Path\\", \\"type\\": \\"string\\"}, \\"instructions\\": {\\"title\\": \\"Instructions\\", \\"type\\": \\"string\\"}, \\"output_path\\": {\\"title\\": \\"Output Path\\", \\"type\\": \\"string\\"}}\\n4. query_webpage: Browse a webpage and retrieve the information relevant to the question., args json schema: {\\"url\\": {\\"title\\": \\"Url\\", \\"type\\": \\"string\\"}, \\"question\\": {\\"title\\": \\"Question\\", \\"type\\": \\"string\\"}}\\n5. human: You can ask a human for guidance when you think you got stuck or you are not sure what to do next. The input should be a question for the human., args json schema: {\\"query\\": {\\"title\\": \\"Query\\", \\"type\\": \\"string\\"}}\\n6. finish: use this to signal that you have finished all your objectives, args: \\"response\\": \\"final response to let people know you have finished your objectives\\"\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\\n\\nYou should only respond in JSON format as described below \\nResponse Format: \\n{\\n \\"thoughts\\": {\\n \\"text\\": \\"thought\\",\\n \\"reasoning\\": \\"reasoning\\",\\n \\"plan\\": \\"- short bulleted\\\\n- list that conveys\\\\n- long-term plan\\",\\n \\"criticism\\": \\"constructive self-criticism\\",\\n \\"speak\\": \\"thoughts summary to say to user\\"\\n },\\n \\"command\\": {\\n \\"name\\": \\"command name\\",\\n \\"args\\": {\\n \\"arg name\\": \\"value\\"\\n }\\n }\\n} \\nEnsure the response can be parsed by Python json.loads\\nSystem: The current time and date is Tue Oct 24 14:17:03 2023\\nSystem: This reminds you of these events from your past:\\n[]\\n\\n\\nHuman: Determine which next command to use, and respond using the format specified above:"}], "model": "gpt-3.5-turbo", "temperature": 0.5}' message='Post details' -https://api.openai.com:443 "POST /v1/chat/completions HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/chat/completions processing_ms=8967 request_id=2fb6210e05492c34ccd4d52010f1b56f response_code=200 -Error in sys.excepthook: -Traceback (most recent call last): - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/humbug/report.py", line 505, in _hook - self.error_report(error=exception_instance, tags=tags, publish=publish) - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/humbug/report.py", line 247, in error_report - traceback.format_exception( -TypeError: format_exception() got an unexpected keyword argument 'etype' - -Original exception was: -Traceback (most recent call last): - File "/Users/defalt/Desktop/Athena/research/swarms/example.py", line 23, in - response = node.run(task) - ^^^^^^^^^^^^^^ - File "/Users/defalt/Desktop/Athena/research/swarms/swarms/utils/decorators.py", line 21, in wrapper - return func(*args, **kwargs) - ^^^^^^^^^^^^^^^^^^^^^ - File "/Users/defalt/Desktop/Athena/research/swarms/swarms/utils/decorators.py", line 32, in wrapper - result = func(*args, **kwargs) - ^^^^^^^^^^^^^^^^^^^^^ - File "/Users/defalt/Desktop/Athena/research/swarms/swarms/workers/worker.py", line 201, in run - result = self.agent.run([task]) - ^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain_experimental/autonomous_agents/autogpt/agent.py", line 113, in run - observation = tool.run(action.args) - ^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/tools/base.py", line 351, in run - raise e - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/tools/base.py", line 323, in run - self._run(*tool_args, run_manager=run_manager, **tool_kwargs) - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/tools/human/tool.py", line 34, in _run - return self.input_func() - ^^^^^^^^^^^^^^^^^ -KeyboardInterrupt From 7731f7db7d4501587d605bf05170d9e9bc470a11 Mon Sep 17 00:00:00 2001 From: Zack Date: Thu, 26 Oct 2023 13:53:50 -0500 Subject: [PATCH 10/13] chore: cleanup --- apps/BingBot/bot.py | 2 +- apps/BingBot/cogs/edgegpt.py | 45 +++++++------------ apps/BingBot/cogs/event.py | 67 +++++++--------------------- apps/orchistrator/commands/ping.js | 14 ------ apps/orchistrator/commands/server.js | 10 ----- playground/agents/bingchat.py | 15 +------ 6 files changed, 35 insertions(+), 118 deletions(-) delete mode 100644 apps/orchistrator/commands/ping.js delete mode 100644 apps/orchistrator/commands/server.js diff --git a/apps/BingBot/bot.py b/apps/BingBot/bot.py index f636e969..233ca9a7 100644 --- a/apps/BingBot/bot.py +++ b/apps/BingBot/bot.py @@ -43,7 +43,7 @@ def check_verion() -> None: @bot.event async def on_ready(): bot_status = discord.Status.online - # bot_activity = discord.Activity(type=discord.ActivityType.playing, name="bing.com") + bot_activity = discord.Activity(type=discord.ActivityType.playing, name="bing.com") await bot.change_presence(status=bot_status, activity=bot_activity) for Filename in os.listdir("./cogs"): if Filename.endswith(".py"): diff --git a/apps/BingBot/cogs/edgegpt.py b/apps/BingBot/cogs/edgegpt.py index 7c84b256..c95662b0 100644 --- a/apps/BingBot/cogs/edgegpt.py +++ b/apps/BingBot/cogs/edgegpt.py @@ -29,6 +29,7 @@ async def init_chatbot(user_id): auth_cookie = os.environ.get("AUTH_COOKIE") auth_cookie_SRCHHPGUSR = os.environ.get("AUTH_COOKIE_SRCHHPGUSR") + # auth_cookie_SRCHHPGUSR = os.environ.get("AUTH_COOKIE_SRCHHPGUSR") users_chatbot[user_id] = UserChatbot(cookies=cookie_json) users_image_generator[user_id] = ImageGenAsync(auth_cookie, quiet=True) user_conversation_style[user_id] = "balanced" @@ -37,38 +38,24 @@ class UserChatbot: def __init__(self, cookies): self.chatbot = Chatbot(cookies=cookies) - async def send_message(self, interaction, message, conversation_style, image_file=None): - if image_file: - # Download the image from Discord - image_data = await image_file.read() - # Send the image data to the Bing model - response = await self.chatbot.send_image(image_data) - # Send the response from the Bing model to the user - await interaction.channel.send(content=response) - else: - await send_message(self.chatbot, interaction, message, conversation_style) -class EdgeGPT(Cog_Extension): - @app_commands.command(name="bing", description="Have a chat with Bing") - async def bing(self, interaction: discord.Interaction, *, message: str): - user_id = interaction.user.id - if user_id not in users_chatbot: - await init_chatbot(user_id) - conversation_style = user_conversation_style[user_id] - usermessage = message + async def send_message(self, interaction, message, conversation_style): + await send_message(self.chatbot, interaction, message, conversation_style) - # Check the last 10 messages for attachments - image_file = None - async for msg in interaction.channel.history(limit=10): - if msg.attachments: - image_file = msg.attachments[0] - break + async def create_image(self, interaction, prompt: str, image_generator): + await create_image(interaction, prompt, image_generator) - # If an attachment was found, send it to the model - if image_file: - await users_chatbot[user_id].send_message(interaction, usermessage, conversation_style, image_file) - else: - await users_chatbot[user_id].send_message(interaction, usermessage, conversation_style) + async def reset(self): + await self.chatbot.reset() +class EdgeGPT(Cog_Extension): + # Chat with Bing + @app_commands.command(name="bing", description="Have a chat with Bing") + async def bing(self, interaction: discord.Interaction, *, message: str): + try: + using = await get_using_send(interaction.user.id) + except: + await set_using_send(interaction.user.id, False) + using = await get_using_send(interaction.user.id) if not using: await interaction.response.defer(ephemeral=False, thinking=True) username = str(interaction.user) diff --git a/apps/BingBot/cogs/event.py b/apps/BingBot/cogs/event.py index 6d826ffa..9b81cf30 100644 --- a/apps/BingBot/cogs/event.py +++ b/apps/BingBot/cogs/event.py @@ -143,7 +143,7 @@ async def set_chatbot(cookies): chatbot = Chatbot(cookies=cookies) -async def send_message(chatbot: Chatbot, message, user_message: str, image_file=None): +async def send_message(chatbot: Chatbot, message, user_message: str): async with sem: if isinstance(message, discord.message.Message): await message.channel.typing() @@ -155,57 +155,24 @@ async def send_message(chatbot: Chatbot, message, user_message: str, image_file= try: # Change conversation style if conversation_style == "creative": - if image_file: - # Download the image from Discord - image_data = await image_file.read() - # Send the image data to the model - reply = await chatbot.ask( - prompt=user_message, - conversation_style=ConversationStyle.creative, - simplify_response=True, - image=image_data, - ) - else: - reply = await chatbot.ask( - prompt=user_message, - conversation_style=ConversationStyle.creative, - simplify_response=True, - ) + reply = await chatbot.ask( + prompt=user_message, + conversation_style=ConversationStyle.creative, + simplify_response=True, + ) elif conversation_style == "precise": - if image_file: - # Download the image from Discord - image_data = await image_file.read() - # Send the image data to the model - reply = await chatbot.ask( - prompt=user_message, - conversation_style=ConversationStyle.precise, - simplify_response=True, - image=image_data, - ) - else: - reply = await chatbot.ask( - prompt=user_message, - conversation_style=ConversationStyle.precise, - simplify_response=True, - ) + reply = await chatbot.ask( + prompt=user_message, + conversation_style=ConversationStyle.precise, + simplify_response=True, + ) else: - if image_file: - # Download the image from Discord - image_data = await image_file.read() - # Send the image data to the model - reply = await chatbot.ask( - prompt=user_message, - conversation_style=ConversationStyle.balanced, - simplify_response=True, - image=image_data, - ) - else: - reply = await chatbot.ask( - prompt=user_message, - conversation_style=ConversationStyle.balanced, - simplify_response=True, - ) - + reply = await chatbot.ask( + prompt=user_message, + conversation_style=ConversationStyle.balanced, + simplify_response=True, + ) + # Get reply text text = f"{reply['text']}" text = re.sub(r"\[\^(\d+)\^\]", lambda match: "", text) diff --git a/apps/orchistrator/commands/ping.js b/apps/orchistrator/commands/ping.js deleted file mode 100644 index 2fee3436..00000000 --- a/apps/orchistrator/commands/ping.js +++ /dev/null @@ -1,14 +0,0 @@ -/* -A ping command that replies with "Pong!" when bot is running. -*/ - -const { SlashCommandBuilder } = require("discord.js"); - -module.exports = { - data: new SlashCommandBuilder() - .setName("ping") - .setDescription("Replies with Pong!"), - async execute(interaction) { - await interaction.reply("Pong!"); - }, -}; \ No newline at end of file diff --git a/apps/orchistrator/commands/server.js b/apps/orchistrator/commands/server.js deleted file mode 100644 index ae2b6627..00000000 --- a/apps/orchistrator/commands/server.js +++ /dev/null @@ -1,10 +0,0 @@ -const { SlashCommandBuilder } = require('discord.js'); - -module.exports = { - data: new SlashCommandBuilder() - .setName("server") - .setDescription("Replies with server name and member count."), - async execute(interaction) { - await interaction.reply(`Server name: ${interaction.guild.name}\nTotal members: ${interaction.guild.memberCount}`); - }, -}; diff --git a/playground/agents/bingchat.py b/playground/agents/bingchat.py index bf06ecc6..db805659 100644 --- a/playground/agents/bingchat.py +++ b/playground/agents/bingchat.py @@ -7,22 +7,9 @@ import os api_key = os.getenv("OPENAI_API_KEY") # Initialize the EdgeGPTModel -edgegpt = BingChat(cookies_path="./cookies.txt") +edgegpt = BingChat() -@tool -def edgegpt(task: str = None): - """A tool to run infrence on the EdgeGPT Model""" - return EdgeGPTTool.run(task) - - -# Initialize the language model, -# This model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC -llm = OpenAIChat( - openai_api_key=api_key, - temperature=0.5, -) - # Initialize the Worker with the custom tool worker = Worker(llm=llm, ai_name="EdgeGPT Worker", external_tools=[edgegpt]) From c63422f78b89617b48329ac296de3192d022f89f Mon Sep 17 00:00:00 2001 From: Zack Date: Fri, 27 Oct 2023 16:36:19 -0500 Subject: [PATCH 11/13] feat: Setup vllm class --- .gitignore | 1 + bingchat.py | 7 +++++ example.py | 19 ++++++++---- playground/agents/bingchat.py | 16 ++++------ revgpt.py | 29 ++++++++++++++++++ swarms/models/__init__.py | 4 +-- swarms/models/bing_chat.py | 15 ++-------- swarms/models/bioclip.py | 1 + swarms/models/revgptV1.py | 4 +-- swarms/models/revgptV4.py | 4 +-- swarms/models/vllm.py | 55 +++++++++++++++++++++++++++++++++++ swarms/workers/worker.py | 39 +++++++++++++------------ 12 files changed, 142 insertions(+), 52 deletions(-) create mode 100644 bingchat.py create mode 100644 revgpt.py create mode 100644 swarms/models/vllm.py diff --git a/.gitignore b/.gitignore index 2fa95dc2..c08eb013 100644 --- a/.gitignore +++ b/.gitignore @@ -75,6 +75,7 @@ coverage.xml .hypothesis/ .pytest_cache/ cover/ +cookies.json # Translations *.mo diff --git a/bingchat.py b/bingchat.py new file mode 100644 index 00000000..e3113166 --- /dev/null +++ b/bingchat.py @@ -0,0 +1,7 @@ +from swarms.models.bing_chat import BingChat +# Initialize the EdgeGPTModel +bing = BingChat(cookies_path="./cookies.json") +task = "generate topics for PositiveMed.com,: 1. Monitor Health Trends: Scan Google Alerts, authoritative health websites, and social media for emerging health, wellness, and medical discussions. 2. Keyword Research: Utilize tools like SEMrush to identify keywords with moderate to high search volume and low competition. Focus on long-tail, conversational keywords. 3. Analyze Site Data: Review PositiveMed's analytics to pinpoint popular articles and areas lacking recent content. 4. Crowdsourcing: Gather topic suggestions from the brand's audience and internal team, ensuring alignment with PositiveMed's mission. 5. Topic Evaluation: Assess topics for audience relevance, uniqueness, brand fit, current relevance, and SEO potential. 6. Tone and Style: Ensure topics can be approached with an educational, empowering, and ethical tone, in line with the brand's voice. Use this framework to generate a list of potential topics that cater to PositiveMed's audience while staying true to its brand ethos. Find trending topics for slowing and reversing aging think step by step and o into as much detail as possible" +response = bing(task) + +print(response) diff --git a/example.py b/example.py index d9d4e125..fe68fef4 100644 --- a/example.py +++ b/example.py @@ -1,22 +1,29 @@ +from tabnanny import verbose +from click import prompt +from langchain import LLMChain from swarms.models import OpenAIChat from swarms import Worker from swarms.prompts import PRODUCT_AGENT_PROMPT +from swarms.models.bing_chat import BingChat -api_key = "" +# api_key = "" -llm = OpenAIChat( - openai_api_key=api_key, - temperature=0.5, -) +# llm = OpenAIChat( +# openai_api_key=api_key, +# temperature=0.5, +# ) + +llm = BingChat(cookies_path="./cookies.json") +# llm = LLMChain(llm=bing.to_dict(), prompt=prompt, verbose=verbose) node = Worker( llm=llm, ai_name="Optimus Prime", - openai_api_key=api_key, ai_role=PRODUCT_AGENT_PROMPT, external_tools=None, human_in_the_loop=False, temperature=0.5, + use_openai=False ) task = "Create an entirely new board game around riddles for physics" diff --git a/playground/agents/bingchat.py b/playground/agents/bingchat.py index db805659..5964ede8 100644 --- a/playground/agents/bingchat.py +++ b/playground/agents/bingchat.py @@ -4,16 +4,12 @@ from swarms.tools.autogpt import EdgeGPTTool, tool from swarms.models import OpenAIChat import os -api_key = os.getenv("OPENAI_API_KEY") +load_dotenv("../.env") +auth_cookie = os.environ.get("AUTH_COOKIE") +auth_cookie_SRCHHPGUSR = os.environ.get("AUTH_COOKIE_SRCHHPGUSR") # Initialize the EdgeGPTModel -edgegpt = BingChat() +bing = BingChat(cookies_path="./cookies.json", auth_cookie_SRCHHPGUSR) +task = "generate topics for PositiveMed.com,: 1. Monitor Health Trends: Scan Google Alerts, authoritative health websites, and social media for emerging health, wellness, and medical discussions. 2. Keyword Research: Utilize tools like SEMrush to identify keywords with moderate to high search volume and low competition. Focus on long-tail, conversational keywords. 3. Analyze Site Data: Review PositiveMed's analytics to pinpoint popular articles and areas lacking recent content. 4. Crowdsourcing: Gather topic suggestions from the brand's audience and internal team, ensuring alignment with PositiveMed's mission. 5. Topic Evaluation: Assess topics for audience relevance, uniqueness, brand fit, current relevance, and SEO potential. 6. Tone and Style: Ensure topics can be approached with an educational, empowering, and ethical tone, in line with the brand's voice. Use this framework to generate a list of potential topics that cater to PositiveMed's audience while staying true to its brand ethos. Find trending topics for slowing and reversing aging think step by step and o into as much detail as possible" - -# Initialize the Worker with the custom tool -worker = Worker(llm=llm, ai_name="EdgeGPT Worker", external_tools=[edgegpt]) - -# Use the worker to process a task -task = "Hello, my name is ChatGPT" -response = worker.run(task) -print(response) +bing(task) diff --git a/revgpt.py b/revgpt.py new file mode 100644 index 00000000..cd5bd2d6 --- /dev/null +++ b/revgpt.py @@ -0,0 +1,29 @@ +import os +import sys +from dotenv import load_dotenv +from swarms.models.revgptV4 import RevChatGPTModelv4 +from swarms.models.revgptV1 import RevChatGPTModelv1 + +root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) +sys.path.append(root_dir) + +load_dotenv() + +config = { + "model": os.getenv("REVGPT_MODEL"), + "plugin_ids": [os.getenv("REVGPT_PLUGIN_IDS")], + "disable_history": os.getenv("REVGPT_DISABLE_HISTORY") == "True", + "PUID": os.getenv("REVGPT_PUID"), + "unverified_plugin_domains": [os.getenv("REVGPT_UNVERIFIED_PLUGIN_DOMAINS")], +} + +# For v1 model +model = RevChatGPTModelv1(access_token=os.getenv("ACCESS_TOKEN"), **config) +# model = RevChatGPTModelv4(access_token=os.getenv("ACCESS_TOKEN"), **config) + +# For v3 model +# model = RevChatGPTModel(access_token=os.getenv("OPENAI_API_KEY"), **config) + +task = "Write a cli snake game" +response = model.run(task) +print(response) diff --git a/swarms/models/__init__.py b/swarms/models/__init__.py index d79f29b8..5b8e6313 100644 --- a/swarms/models/__init__.py +++ b/swarms/models/__init__.py @@ -18,8 +18,8 @@ from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA # from swarms.models.fuyu import Fuyu # Not working, wait until they update import sys -log_file = open("stderr_log.txt", "w") -sys.stderr = log_file +# log_file = open("stderr_log.txt", "w") +# sys.stderr = log_file __all__ = [ diff --git a/swarms/models/bing_chat.py b/swarms/models/bing_chat.py index 7135ba45..30263c61 100644 --- a/swarms/models/bing_chat.py +++ b/swarms/models/bing_chat.py @@ -1,4 +1,5 @@ """Bing-Chat model by Micorsoft""" +import os import asyncio import json from pathlib import Path @@ -25,19 +26,9 @@ class BingChat: """ - - def __init__(self, cookies_path: str = None, auth_cookie: str = None, auth_cookie_SRCHHPGUSR: str = None): - auth_cookie = os.environ("AUTH_COOKIE") - auth_cookie_SRCHHPGUSR = os.enviro("AUTH_COOKIE_SRCHHPGUSR") - if cookies_path: - self.cookies = json.loads(open(cookies_path, encoding="utf-8").read()) - elif auth_cookie: - self.cookies = auth_cookie - else: - raise ValueError("Either cookies_path or auth_cookie must be provided.") + def __init__(self, cookies_path: str = None): + self.cookies = json.loads(open(cookies_path, encoding="utf-8").read()) self.bot = asyncio.run(Chatbot.create(cookies=self.cookies)) - self.auth_cookie = auth_cookie - self.auth_cookie_SRCHHPGUSR = auth_cookie_SRCHHPGUSR def __call__( self, prompt: str, style: ConversationStyle = ConversationStyle.creative diff --git a/swarms/models/bioclip.py b/swarms/models/bioclip.py index 937634e3..facd1b61 100644 --- a/swarms/models/bioclip.py +++ b/swarms/models/bioclip.py @@ -75,6 +75,7 @@ class BioClip: 'adenocarcinoma histopathology', 'brain MRI', 'covid line chart', + 'covid line chart', 'squamous cell carcinoma histopathology', 'immunohistochemistry histopathology', 'bone X-ray', diff --git a/swarms/models/revgptV1.py b/swarms/models/revgptV1.py index a7327d23..400c9b25 100644 --- a/swarms/models/revgptV1.py +++ b/swarms/models/revgptV1.py @@ -35,13 +35,13 @@ from httpx import AsyncClient from OpenAIAuth import Auth0 as Authenticator from rich.live import Live from rich.markdown import Markdown -import schemas.typings as t +import swarms.schemas.typings as t from swarms.utils.revutils import create_completer from swarms.utils.revutils import create_session from swarms.utils.revutils import get_input # BASE_URL = environ.get("CHATGPT_BASE_URL", "http://192.168.250.249:9898/api/") -# BASE_URL = os.environ.get("CHATGPT_BASE_URL", "https://ai.fakeopen.com/api/") +BASE_URL = os.environ.get("CHATGPT_BASE_URL", "https://ai.fakeopen.com/api/") # BASE_URL = environ.get("CHATGPT_BASE_URL", "https://bypass.churchless.tech/") bcolors = t.Colors() diff --git a/swarms/models/revgptV4.py b/swarms/models/revgptV4.py index c57182f1..fc989445 100644 --- a/swarms/models/revgptV4.py +++ b/swarms/models/revgptV4.py @@ -40,14 +40,14 @@ from rich.markdown import Markdown import argparse import re -import schemas.typings as t +import swarms.schemas.typings as t from prompt_toolkit import prompt from prompt_toolkit import PromptSession from prompt_toolkit.auto_suggest import AutoSuggestFromHistory from prompt_toolkit.completion import WordCompleter from prompt_toolkit.history import InMemoryHistory from prompt_toolkit.key_binding import KeyBindings -from schemas.typings import Colors +from swarms.schemas.typings import Colors bindings = KeyBindings() diff --git a/swarms/models/vllm.py b/swarms/models/vllm.py new file mode 100644 index 00000000..9234c284 --- /dev/null +++ b/swarms/models/vllm.py @@ -0,0 +1,55 @@ +from vllm import LLM, SamplingParams +import openai +import ray +import uvicorn +from vllm.entrypoints import api_server as vllm_api_server +from vllm.entrypoints.openai import api_server as openai_api_server +from skypilot import SkyPilot + +class VLLMModel: + def __init__(self, model_name="facebook/opt-125m", tensor_parallel_size=1): + self.model_name = model_name + self.tensor_parallel_size = tensor_parallel_size + self.model = LLM(model_name, tensor_parallel_size=tensor_parallel_size) + self.temperature = 1.0 + self.max_tokens = None + self.sampling_params = SamplingParams(temperature=self.temperature) + + def generate_text(self, prompt: str) -> str: + output = self.model.generate([prompt], self.sampling_params) + return output[0].outputs[0].text + + def set_temperature(self, value: float): + self.temperature = value + self.sampling_params = SamplingParams(temperature=self.temperature) + + def set_max_tokens(self, value: int): + self.max_tokens = value + self.sampling_params = SamplingParams(temperature=self.temperature, max_tokens=self.max_tokens) + + def offline_batched_inference(self, prompts: list) -> list: + outputs = self.model.generate(prompts, self.sampling_params) + return [output.outputs[0].text for output in outputs] + + def start_api_server(self): + uvicorn.run(vllm_api_server.app, host="0.0.0.0", port=8000) + + def start_openai_compatible_server(self): + uvicorn.run(openai_api_server.app, host="0.0.0.0", port=8000) + + def query_openai_compatible_server(self, prompt: str): + openai.api_key = "EMPTY" + openai.api_base = "http://localhost:8000/v1" + completion = openai.Completion.create(model=self.model_name, prompt=prompt) + return completion + + def distributed_inference(self, prompt: str): + ray.init() + self.model = LLM(self.model_name, tensor_parallel_size=self.tensor_parallel_size) + output = self.model.generate(prompt, self.sampling_params) + ray.shutdown() + return output[0].outputs[0].text + + def run_on_cloud_with_skypilot(self, yaml_file): + sky = SkyPilot() + sky.launch(yaml_file) diff --git a/swarms/workers/worker.py b/swarms/workers/worker.py index 6106bd9f..191c88f4 100644 --- a/swarms/workers/worker.py +++ b/swarms/workers/worker.py @@ -68,11 +68,13 @@ class Worker: temperature: float = 0.5, llm=None, openai_api_key: str = None, + use_openai: bool = True, ): self.temperature = temperature self.human_in_the_loop = human_in_the_loop self.llm = llm self.openai_api_key = openai_api_key + self.use_openai = use_openai self.ai_name = ai_name self.ai_role = ai_role self.coordinates = ( @@ -149,24 +151,25 @@ class Worker: self.tools.extend(external_tools) def setup_memory(self): - """ - Set up memory for the worker. - """ - openai_api_key = os.getenv("OPENAI_API_KEY") or self.openai_api_key - try: - embeddings_model = OpenAIEmbeddings(openai_api_key=openai_api_key) - embedding_size = 1536 - index = faiss.IndexFlatL2(embedding_size) - - self.vectorstore = FAISS( - embeddings_model.embed_query, index, InMemoryDocstore({}), {} - ) - - except Exception as error: - raise RuntimeError( - f"Error setting up memory perhaps try try tuning the embedding size: {error}" - ) - + """ + Set up memory for the worker. + """ + if self.use_openai: # Only use OpenAI if use_openai is True + openai_api_key = os.getenv("OPENAI_API_KEY") or self.openai_api_key + try: + embeddings_model = OpenAIEmbeddings(openai_api_key=openai_api_key) + embedding_size = 1536 + index = faiss.IndexFlatL2(embedding_size) + + self.vectorstore = FAISS( + embeddings_model.embed_query, index, InMemoryDocstore({}), {} + ) + + except Exception as error: + raise RuntimeError( + f"Error setting up memory perhaps try try tuning the embedding size: {error}" + ) + def setup_agent(self): """ Set up the autonomous agent. From 65b91fd25d801f861d0d3bed3da3e9f9d95d5467 Mon Sep 17 00:00:00 2001 From: Zack Date: Fri, 27 Oct 2023 16:38:53 -0500 Subject: [PATCH 12/13] chore: cleanup bing_chat --- bingchat.py | 1 - 1 file changed, 1 deletion(-) diff --git a/bingchat.py b/bingchat.py index e3113166..d857e9e5 100644 --- a/bingchat.py +++ b/bingchat.py @@ -3,5 +3,4 @@ from swarms.models.bing_chat import BingChat bing = BingChat(cookies_path="./cookies.json") task = "generate topics for PositiveMed.com,: 1. Monitor Health Trends: Scan Google Alerts, authoritative health websites, and social media for emerging health, wellness, and medical discussions. 2. Keyword Research: Utilize tools like SEMrush to identify keywords with moderate to high search volume and low competition. Focus on long-tail, conversational keywords. 3. Analyze Site Data: Review PositiveMed's analytics to pinpoint popular articles and areas lacking recent content. 4. Crowdsourcing: Gather topic suggestions from the brand's audience and internal team, ensuring alignment with PositiveMed's mission. 5. Topic Evaluation: Assess topics for audience relevance, uniqueness, brand fit, current relevance, and SEO potential. 6. Tone and Style: Ensure topics can be approached with an educational, empowering, and ethical tone, in line with the brand's voice. Use this framework to generate a list of potential topics that cater to PositiveMed's audience while staying true to its brand ethos. Find trending topics for slowing and reversing aging think step by step and o into as much detail as possible" response = bing(task) - print(response) From 1e7bb46d51453a7f36a75a861d012af89af56374 Mon Sep 17 00:00:00 2001 From: Zack Date: Wed, 1 Nov 2023 10:29:47 -0500 Subject: [PATCH 13/13] feat: sync with main --- apps/BingBot/.env.example | 4 - apps/BingBot/Dockerfile | 6 - apps/BingBot/bot.py | 120 --- apps/BingBot/cogs/edgegpt.py | 148 ---- apps/BingBot/cogs/event.py | 321 -------- apps/BingBot/cogs/help.py | 35 - apps/BingBot/compose.yaml | 11 - apps/BingBot/cookies.json | 6 - apps/BingBot/core/classes.py | 6 - apps/BingBot/requirements.txt | 4 - apps/BingBot/src/imageCreate.py | 40 - apps/BingBot/src/log.py | 66 -- apps/BingBot/src/response.py | 194 ----- apps/GradioBot/main.py | 197 ----- apps/GradioBot/utils.py | 41 -- apps/MythGen/.env.example | 2 - apps/MythGen/README.md | 71 -- apps/MythGen/cookies.json | 6 - apps/MythGen/requirements.txt | 8 - apps/open-sourcerer/.env.example | 6 - apps/open-sourcerer/Dockerfile | 17 - apps/open-sourcerer/README.md | 124 ---- apps/open-sourcerer/docker-compose.yaml | 9 - apps/open-sourcerer/main.py | 77 -- apps/open-sourcerer/requirements.txt | 6 - apps/open-sourcerer/voice.py | 97 --- apps/orchistrator/.dockerignore | 2 - apps/orchistrator/.env.example | 3 - apps/orchistrator/Dockerfile | 10 - apps/orchistrator/README.md | 42 -- apps/orchistrator/backend/deleteCommands.js | 22 - apps/orchistrator/backend/deployCommands.js | 53 -- apps/orchistrator/commands/allContainers.js | 39 - apps/orchistrator/commands/restart.js | 69 -- apps/orchistrator/commands/startContainer.js | 92 --- apps/orchistrator/commands/stopContainer.js | 68 -- apps/orchistrator/docker-compose.yml | 12 - apps/orchistrator/index.js | 89 --- apps/orchistrator/package-lock.json | 723 ------------------- apps/orchistrator/package.json | 30 - 40 files changed, 2876 deletions(-) delete mode 100644 apps/BingBot/.env.example delete mode 100644 apps/BingBot/Dockerfile delete mode 100644 apps/BingBot/bot.py delete mode 100644 apps/BingBot/cogs/edgegpt.py delete mode 100644 apps/BingBot/cogs/event.py delete mode 100644 apps/BingBot/cogs/help.py delete mode 100644 apps/BingBot/compose.yaml delete mode 100644 apps/BingBot/cookies.json delete mode 100644 apps/BingBot/core/classes.py delete mode 100644 apps/BingBot/requirements.txt delete mode 100644 apps/BingBot/src/imageCreate.py delete mode 100644 apps/BingBot/src/log.py delete mode 100644 apps/BingBot/src/response.py delete mode 100644 apps/GradioBot/main.py delete mode 100644 apps/GradioBot/utils.py delete mode 100644 apps/MythGen/.env.example delete mode 100644 apps/MythGen/README.md delete mode 100644 apps/MythGen/cookies.json delete mode 100644 apps/MythGen/requirements.txt delete mode 100644 apps/open-sourcerer/.env.example delete mode 100644 apps/open-sourcerer/Dockerfile delete mode 100644 apps/open-sourcerer/README.md delete mode 100644 apps/open-sourcerer/docker-compose.yaml delete mode 100644 apps/open-sourcerer/main.py delete mode 100644 apps/open-sourcerer/requirements.txt delete mode 100644 apps/open-sourcerer/voice.py delete mode 100644 apps/orchistrator/.dockerignore delete mode 100644 apps/orchistrator/.env.example delete mode 100644 apps/orchistrator/Dockerfile delete mode 100644 apps/orchistrator/README.md delete mode 100644 apps/orchistrator/backend/deleteCommands.js delete mode 100644 apps/orchistrator/backend/deployCommands.js delete mode 100644 apps/orchistrator/commands/allContainers.js delete mode 100644 apps/orchistrator/commands/restart.js delete mode 100644 apps/orchistrator/commands/startContainer.js delete mode 100644 apps/orchistrator/commands/stopContainer.js delete mode 100644 apps/orchistrator/docker-compose.yml delete mode 100644 apps/orchistrator/index.js delete mode 100644 apps/orchistrator/package-lock.json delete mode 100644 apps/orchistrator/package.json diff --git a/apps/BingBot/.env.example b/apps/BingBot/.env.example deleted file mode 100644 index 341406f7..00000000 --- a/apps/BingBot/.env.example +++ /dev/null @@ -1,4 +0,0 @@ -DISCORD_BOT_TOKEN= -MENTION_CHANNEL_ID= -AUTH_COOKIE= -AUTH_COOKIE_SRCHHPGUSR= diff --git a/apps/BingBot/Dockerfile b/apps/BingBot/Dockerfile deleted file mode 100644 index e276b5c8..00000000 --- a/apps/BingBot/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM python:3.9.16 -WORKDIR /bot -COPY requirements.txt /bot/ -RUN pip install -r requirements.txt -COPY . /bot -CMD python bot.py diff --git a/apps/BingBot/bot.py b/apps/BingBot/bot.py deleted file mode 100644 index 233ca9a7..00000000 --- a/apps/BingBot/bot.py +++ /dev/null @@ -1,120 +0,0 @@ -import discord -import os -import src.log -import sys -import pkg_resources -import json -from discord.ext import commands -from dotenv import load_dotenv - -load_dotenv() - -bot = commands.Bot(command_prefix="!", intents=discord.Intents.all()) - -# init loggger -logger = src.log.setup_logger(__name__) - - -def restart_bot(): - # Replace current process with new instance of bot.py - os.execl(sys.executable, sys.executable, "bot.py") - - -def check_verion() -> None: - # Read the requirements.txt file and add each line to a list - with open("requirements.txt") as f: - required = f.read().splitlines() - - # For each library listed in requirements.txt, check if the corresponding version is installed - for package in required: - # Use the pkg_resources library to get information about the installed version of the library - package_name, package_verion = package.split("==") - installed = pkg_resources.get_distribution(package_name) - # Extract the library name and version number - name, version = installed.project_name, installed.version - # Compare the version number to see if it matches the one in requirements.txt - if package != f"{name}=={version}": - logger.error( - f"{name} version {version} is installed but does not match the requirements" - ) - sys.exit() - - -@bot.event -async def on_ready(): - bot_status = discord.Status.online - bot_activity = discord.Activity(type=discord.ActivityType.playing, name="bing.com") - await bot.change_presence(status=bot_status, activity=bot_activity) - for Filename in os.listdir("./cogs"): - if Filename.endswith(".py"): - await bot.load_extension(f"cogs.{Filename[:-3]}") - logger.info(f"{bot.user} is now running!") - print("Bot is Up and Ready!") - try: - synced = await bot.tree.sync() - print(f"Synced {len(synced)} commands") - except Exception as e: - print(e) - - -# Load command -@commands.is_owner() -@bot.command() -async def load(ctx, extension): - await bot.load_extension(f"cogs.{extension}") - await ctx.author.send(f"> **Loaded {extension} done.**") - - -# Unload command -@commands.is_owner() -@bot.command() -async def unload(ctx, extension): - await bot.unload_extension(f"cogs.{extension}") - await ctx.author.send(f"> **Un-Loaded {extension} done.**") - - -# Empty discord_bot.log file -@commands.is_owner() -@bot.command() -async def clean(ctx): - open("discord_bot.log", "w").close() - await ctx.author.send(f"> **Successfully emptied the file!**") - - -# Get discord_bot.log file -@commands.is_owner() -@bot.command() -async def getLog(ctx): - try: - with open("discord_bot.log", "rb") as f: - file = discord.File(f) - await ctx.author.send(file=file) - await ctx.author.send("> **Send successfully!**") - except: - await ctx.author.send("> **Send failed!**") - - -# Upload new Bing cookies and restart the bot -@commands.is_owner() -@bot.command() -async def upload(ctx): - if ctx.message.attachments: - for attachment in ctx.message.attachments: - if str(attachment)[-4:] == ".txt": - content = await attachment.read() - with open("cookies.json", "w", encoding="utf-8") as f: - json.dump(json.loads(content), f, indent=2) - if not isinstance(ctx.channel, discord.abc.PrivateChannel): - await ctx.message.delete() - await ctx.author.send(f"> **Upload new cookies successfully!**") - logger.warning("\x1b[31mCookies has been setup successfully\x1b[0m") - restart_bot() - else: - await ctx.author.send("> **Didn't get any txt file.**") - else: - await ctx.author.send("> **Didn't get any file.**") - - -if __name__ == "__main__": - check_verion() - bot.run(os.getenv("DISCORD_BOT_TOKEN")) diff --git a/apps/BingBot/cogs/edgegpt.py b/apps/BingBot/cogs/edgegpt.py deleted file mode 100644 index c95662b0..00000000 --- a/apps/BingBot/cogs/edgegpt.py +++ /dev/null @@ -1,148 +0,0 @@ -import os -import discord -import json -from typing import Optional -from EdgeGPT.ImageGen import ImageGenAsync, ImageGen -from EdgeGPT.EdgeGPT import Chatbot -from discord import app_commands -from core.classes import Cog_Extension -from src import log -from src.imageCreate import create_image, get_using_create, set_using_create -from src.response import send_message, get_using_send, set_using_send -from dotenv import load_dotenv - -load_dotenv() - -logger = log.setup_logger(__name__) - -users_chatbot = {} -users_image_generator = {} -user_conversation_style = {} - -async def init_chatbot(user_id): - with open("./cookies.json", encoding="utf-8") as file: - cookie_json = json.load(file) - for cookie in cookie_json: - if cookie.get("name") == "_U": - auth_cookie = cookie.get("value") - break - - auth_cookie = os.environ.get("AUTH_COOKIE") - auth_cookie_SRCHHPGUSR = os.environ.get("AUTH_COOKIE_SRCHHPGUSR") - # auth_cookie_SRCHHPGUSR = os.environ.get("AUTH_COOKIE_SRCHHPGUSR") - users_chatbot[user_id] = UserChatbot(cookies=cookie_json) - users_image_generator[user_id] = ImageGenAsync(auth_cookie, quiet=True) - user_conversation_style[user_id] = "balanced" - -class UserChatbot: - def __init__(self, cookies): - self.chatbot = Chatbot(cookies=cookies) - - async def send_message(self, interaction, message, conversation_style): - await send_message(self.chatbot, interaction, message, conversation_style) - - async def create_image(self, interaction, prompt: str, image_generator): - await create_image(interaction, prompt, image_generator) - - async def reset(self): - await self.chatbot.reset() - -class EdgeGPT(Cog_Extension): - # Chat with Bing - @app_commands.command(name="bing", description="Have a chat with Bing") - async def bing(self, interaction: discord.Interaction, *, message: str): - try: - using = await get_using_send(interaction.user.id) - except: - await set_using_send(interaction.user.id, False) - using = await get_using_send(interaction.user.id) - if not using: - await interaction.response.defer(ephemeral=False, thinking=True) - username = str(interaction.user) - usermessage = message - channel = str(interaction.channel) - user_id = interaction.user.id - if user_id not in users_chatbot: - await init_chatbot(interaction.user.id) - conversation_style = user_conversation_style[user_id] - logger.info(f"\x1b[31m{username}\x1b[0m : '{usermessage}' ({channel}) [Style: {conversation_style}]") - await users_chatbot[user_id].send_message(interaction, usermessage, conversation_style) - else: - await interaction.response.defer(ephemeral=True, thinking=True) - await interaction.followup.send("> **Please wait for your last conversation to finish.**") - - # Reset Bing conversation - @app_commands.command(name="reset", description="Reset Bing conversation") - async def reset(self, interaction: discord.Interaction): - await interaction.response.defer(ephemeral=True, thinking=True) - user_id = interaction.user.id - try: - await users_chatbot[user_id].reset() - await interaction.followup.send("> **Info: Reset finish.**") - logger.warning("\x1b[31mBing has been successfully reset\x1b[0m") - except: - await interaction.followup.send(f"> **You don't have any conversation yet.**") - logger.exception("Bing reset failed.") - - # Switch conversation style - @app_commands.command(name="switch_style", description="Switch conversation style") - @app_commands.choices(style=[app_commands.Choice(name="Creative", value="creative"), app_commands.Choice(name="Balanced", value="balanced"), app_commands.Choice(name="Precise", value="precise")]) - async def switch_style(self, interaction: discord.Interaction, style: app_commands.Choice[str]): - await interaction.response.defer(ephemeral=True, thinking=True) - user_id = interaction.user.id - if user_id not in users_chatbot: - await init_chatbot(user_id) - user_conversation_style[user_id] = style.value - await interaction.followup.send(f"> **Info: successfull switch conversation style to {style.value}.**") - logger.warning(f"\x1b[31mConversation style has been successfully switch to {style.value}\x1b[0m") - - # Set and delete personal Bing Cookies - @app_commands.command(name="bing_cookies", description="Set or delete Bing Cookies") - @app_commands.choices(choice=[app_commands.Choice(name="set", value="set"), app_commands.Choice(name="delete", value="delete")]) - async def cookies_setting(self, interaction: discord.Interaction, choice: app_commands.Choice[str], cookies_file: Optional[discord.Attachment]=None): - await interaction.response.defer(ephemeral=True, thinking=True) - user_id = interaction.user.id - if choice.value == "set": - try: - content = json.loads(await cookies_file.read()) - for cookie in content: - if cookie.get("name") == "_U": - auth_cookie = cookie.get("value") - break - users_image_generator[user_id] = ImageGenAsync(auth_cookie, quiet=True) - users_chatbot[user_id] = UserChatbot(cookies=content) - user_conversation_style[user_id] = "balanced" - await interaction.followup.send("> **Upload successful!**") - logger.warning(f"\x1b[31m{interaction.user} set Bing Cookies successful\x1b[0m") - except: - await interaction.followup.send("> **Please upload your Bing Cookies.**") - else: - try: - del users_chatbot[user_id] - del users_image_generator[user_id] - del user_conversation_style[user_id] - await interaction.followup.send("> **Delete finish.**") - logger.warning(f"\x1b[31m{interaction.user} delete Cookies\x1b[0m") - except: - await interaction.followup.send("> **You don't have any Bing Cookies.**") - - # Create images - @app_commands.command(name="create_image", description="generate image by Bing image creator") - async def create_image(self, interaction: discord.Interaction, *, prompt: str): - user_id = interaction.user.id - if interaction.user.id not in users_chatbot: - await init_chatbot(user_id) - try: - using = await get_using_create(user_id) - except: - await set_using_create(user_id, False) - using = await get_using_create(user_id) - if not using: - logger.info(f"\x1b[31m{interaction.user}\x1b[0m : '{prompt}' ({interaction.channel}) [BingImageCreator]") - await users_chatbot[user_id].create_image(interaction, prompt, users_image_generator[user_id] ) - else: - await interaction.response.defer(ephemeral=True, thinking=True) - await interaction.followup.send("> **Please wait for your last image to create finish.**") - -async def setup(bot): - await bot.add_cog(EdgeGPT(bot)) diff --git a/apps/BingBot/cogs/event.py b/apps/BingBot/cogs/event.py deleted file mode 100644 index 9b81cf30..00000000 --- a/apps/BingBot/cogs/event.py +++ /dev/null @@ -1,321 +0,0 @@ -import discord -import re -import os -import json -import asyncio -from EdgeGPT.EdgeGPT import Chatbot, ConversationStyle -from dotenv import load_dotenv -from discord.ext import commands -from core.classes import Cog_Extension -from functools import partial -from src import log - -load_dotenv() - -USE_SUGGEST_RESPONSES = True -try: - MENTION_CHANNEL_ID = int(os.getenv("MENTION_CHANNEL_ID")) -except: - MENTION_CHANNEL_ID = None -logger = log.setup_logger(__name__) -sem = asyncio.Semaphore(1) -conversation_style = "balanced" - -with open("./cookies.json", encoding="utf-8") as file: - cookies = json.load(file) -chatbot = Chatbot(cookies=cookies) - - -# To add suggest responses -class MyView(discord.ui.View): - def __init__(self, chatbot: Chatbot, suggest_responses: list): - super().__init__(timeout=120) - # Add buttons - for label in suggest_responses: - button = discord.ui.Button(label=label) - - # Button event - async def callback( - interaction: discord.Interaction, button: discord.ui.Button - ): - await interaction.response.defer(ephemeral=False, thinking=True) - # When click the button, all buttons will disable. - for child in self.children: - child.disabled = True - await interaction.followup.edit_message( - message_id=interaction.message.id, view=self - ) - username = str(interaction.user) - usermessage = button.label - channel = str(interaction.channel) - logger.info( - f"\x1b[31m{username}\x1b[0m : '{usermessage}' ({channel}) [Style: {conversation_style}] [button]" - ) - task = asyncio.create_task( - send_message(chatbot, interaction, usermessage) - ) - await asyncio.gather(task) - - self.add_item(button) - self.children[-1].callback = partial(callback, button=button) - - -# Show Dropdown -class DropdownView(discord.ui.View): - def __init__(self): - super().__init__(timeout=180) - - options = [ - discord.SelectOption( - label="Creative", - description="Switch conversation style to Creative", - emoji="🎨", - ), - discord.SelectOption( - label="Balanced", - description="Switch conversation style to Balanced", - emoji="⚖️", - ), - discord.SelectOption( - label="Precise", - description="Switch conversation style to Precise", - emoji="🔎", - ), - discord.SelectOption( - label="Reset", description="Reset conversation", emoji="🔄" - ), - ] - - dropdown = discord.ui.Select( - placeholder="Choose setting", min_values=1, max_values=1, options=options - ) - - dropdown.callback = self.dropdown_callback - self.add_item(dropdown) - - # Dropdown event - async def dropdown_callback(self, interaction: discord.Interaction): - await interaction.response.defer(ephemeral=False, thinking=True) - if interaction.data["values"][0] == "Creative": - await set_conversation_style("creative") - await interaction.followup.send( - f"> **Info: successfull switch conversation style to *{interaction.data['values'][0]}*.**" - ) - logger.warning( - f"\x1b[31mConversation style has been successfully switch to {interaction.data['values'][0]}\x1b[0m" - ) - elif interaction.data["values"][0] == "Balanced": - await set_conversation_style("balanced") - await interaction.followup.send( - f"> **Info: successfull switch conversation style to *{interaction.data['values'][0]}*.**" - ) - logger.warning( - f"\x1b[31mConversation style has been successfully switch to {interaction.data['values'][0]}\x1b[0m" - ) - elif interaction.data["values"][0] == "Precise": - await set_conversation_style("precise") - await interaction.followup.send( - f"> **Info: successfull switch conversation style to *{interaction.data['values'][0]}*.**" - ) - logger.warning( - f"\x1b[31mConversation style has been successfully switch to {interaction.data['values'][0]}\x1b[0m" - ) - else: - await chatbot.reset() - await interaction.followup.send(f"> **Info: Reset finish.**") - logger.warning("\x1b[31mBing has been successfully reset\x1b[0m") - # disable dropdown after select - for dropdown in self.children: - dropdown.disabled = True - await interaction.followup.edit_message( - message_id=interaction.message.id, view=self - ) - - -# Set conversation style -async def set_conversation_style(style: str): - global conversation_style - conversation_style = style - - -async def set_chatbot(cookies): - global chatbot - chatbot = Chatbot(cookies=cookies) - - -async def send_message(chatbot: Chatbot, message, user_message: str): - async with sem: - if isinstance(message, discord.message.Message): - await message.channel.typing() - reply = "" - text = "" - link_embed = "" - images_embed = [] - all_url = [] - try: - # Change conversation style - if conversation_style == "creative": - reply = await chatbot.ask( - prompt=user_message, - conversation_style=ConversationStyle.creative, - simplify_response=True, - ) - elif conversation_style == "precise": - reply = await chatbot.ask( - prompt=user_message, - conversation_style=ConversationStyle.precise, - simplify_response=True, - ) - else: - reply = await chatbot.ask( - prompt=user_message, - conversation_style=ConversationStyle.balanced, - simplify_response=True, - ) - - # Get reply text - text = f"{reply['text']}" - text = re.sub(r"\[\^(\d+)\^\]", lambda match: "", text) - - # Get the URL, if available - try: - if len(reply["sources"]) != 0: - for i, url in enumerate(reply["sources"], start=1): - if len(url["providerDisplayName"]) == 0: - all_url.append(f"{i}. {url['seeMoreUrl']}") - else: - all_url.append( - f"{i}. [{url['providerDisplayName']}]({url['seeMoreUrl']})" - ) - link_text = "\n".join(all_url) - link_embed = discord.Embed(description=link_text) - except: - pass - - # Set the final message - if isinstance(message, discord.interactions.Interaction): - user_message = user_message.replace("\n", "") - ask = f"> **{user_message}**\t(***style: {conversation_style}***)\n\n" - response = f"{ask}{text}" - else: - response = f"{text}\t(***style: {conversation_style}***)" - - # Discord limit about 2000 characters for a message - while len(response) > 2000: - temp = response[:2000] - response = response[2000:] - if isinstance(message, discord.interactions.Interaction): - await message.followup.send(temp) - else: - await message.channel.send(temp) - - # Get the image, if available - try: - if len(link_embed) == 0: - all_image = re.findall( - "https?://[\w\./]+", str(reply["sources_text"]) - ) - [ - images_embed.append( - discord.Embed(url="https://www.bing.com/").set_image( - url=image_link - ) - ) - for image_link in all_image - ] - except: - pass - - if USE_SUGGEST_RESPONSES: - suggest_responses = reply["suggestions"] - if images_embed: - if isinstance(message, discord.interactions.Interaction): - await message.followup.send( - response, - view=MyView(chatbot, suggest_responses), - embeds=images_embed, - wait=True, - ) - else: - await message.channel.send( - response, - view=MyView(chatbot, suggest_responses), - embeds=images_embed, - ) - elif link_embed: - if isinstance(message, discord.interactions.Interaction): - await message.followup.send( - response, - view=MyView(chatbot, suggest_responses), - embed=link_embed, - wait=True, - ) - else: - await message.channel.send( - response, - view=MyView(chatbot, suggest_responses), - embed=link_embed, - ) - else: - if isinstance(message, discord.interactions.Interaction): - await message.followup.send( - response, view=MyView(chatbot, suggest_responses), wait=True - ) - else: - await message.channel.send( - response, view=MyView(chatbot, suggest_responses) - ) - else: - if images_embed: - if isinstance(message, discord.interactions.Interaction): - await message.followup.send( - response, embeds=images_embed, wait=True - ) - else: - await message.channel.send(response, embeds=images_embed) - elif link_embed: - if isinstance(message, discord.interactions.Interaction): - await message.followup.send( - response, embed=link_embed, wait=True - ) - else: - await message.channel.send(response, embed=link_embed) - else: - if isinstance(message, discord.interactions.Interaction): - await message.followup.send(response, wait=True) - else: - await message.channel.send(response) - except Exception as e: - if isinstance(message, discord.interactions.Interaction): - await message.followup.send(f">>> **Error: {e}**") - else: - await message.channel.send(f">>> **Error: {e}**") - logger.exception(f"Error while sending message: {e}") - - -class Event(Cog_Extension): - @commands.Cog.listener() - async def on_message(self, message: discord.Message): - if message.author == self.bot.user: - return - if self.bot.user in message.mentions: - if not MENTION_CHANNEL_ID or message.channel.id == MENTION_CHANNEL_ID: - content = re.sub(r"<@.*?>", "", message.content).strip() - if len(content) > 0: - username = str(message.author) - channel = str(message.channel) - logger.info( - f"\x1b[31m{username}\x1b[0m : '{content}' ({channel}) [Style: {conversation_style}]" - ) - task = asyncio.create_task(send_message(chatbot, message, content)) - await asyncio.gather(task) - else: - await message.channel.send(view=DropdownView()) - elif MENTION_CHANNEL_ID is not None: - await message.channel.send( - f"> **Can only be mentioned at <#{self.bot.get_channel(MENTION_CHANNEL_ID).id}>**" - ) - - -async def setup(bot): - await bot.add_cog(Event(bot)) diff --git a/apps/BingBot/cogs/help.py b/apps/BingBot/cogs/help.py deleted file mode 100644 index ae82e1d7..00000000 --- a/apps/BingBot/cogs/help.py +++ /dev/null @@ -1,35 +0,0 @@ -import discord -from core.classes import Cog_Extension -from discord import app_commands - - -class Help(Cog_Extension): - @app_commands.command(name="help", description="Show how to use") - async def help(self, interaction: discord.Interaction): - embed = discord.Embed( - title="Help", - ) - embed.add_field( - name="/bing_cookies", - value="Set and delete your Bing Cookies.", - inline=False, - ) - embed.add_field(name="/bing", value="Chat with Bing.", inline=False) - embed.add_field( - name="/reset", value="Reset your Bing conversation.", inline=False - ) - embed.add_field( - name="/switch_style", - value="Switch your Bing conversation style.", - inline=False, - ) - embed.add_field( - name="/create_image", - value="Generate image by Bing Image Creator.", - inline=False, - ) - await interaction.response.send_message(embed=embed) - - -async def setup(bot): - await bot.add_cog(Help(bot)) diff --git a/apps/BingBot/compose.yaml b/apps/BingBot/compose.yaml deleted file mode 100644 index b6442b19..00000000 --- a/apps/BingBot/compose.yaml +++ /dev/null @@ -1,11 +0,0 @@ -version: '3' - -services: - spartan: - container_name: Spartan - build: . - environment: - - DISCORD_BOT_TOKEN=${DISCORD_BOT_TOKEN} - volumes: - - ./cookies.json:/bot/cookies.json - - ./config.yml:/bot/config.yml diff --git a/apps/BingBot/cookies.json b/apps/BingBot/cookies.json deleted file mode 100644 index 4d0748fc..00000000 --- a/apps/BingBot/cookies.json +++ /dev/null @@ -1,6 +0,0 @@ -[ - { - "name": "cookie1", - "value": "1lEXeWRSIPUsQ0S3tdAc3v7BexGK2qBlzsXz8j52w_HNBoOsegjiwRySQHmfoWduHVUxSXo6cETPP2qNrYWAz6k7wn43WGO9i7ll9_Wl7M6HA2c9twbKByfAtAB5fr26wPawQ6y1GCdakD_Kr4xdD20fvkytnmOmZu7Ktnb9mUVE605AAbJcIA9SOlRN5410ZPOnZA1cIzr4WtAFWNfQKPG6Sxk_zO5zvXQfYTyMNmOI" - } -] diff --git a/apps/BingBot/core/classes.py b/apps/BingBot/core/classes.py deleted file mode 100644 index 23c4cbb1..00000000 --- a/apps/BingBot/core/classes.py +++ /dev/null @@ -1,6 +0,0 @@ -from discord.ext import commands - - -class Cog_Extension(commands.Cog): - def __init__(self, bot): - self.bot = bot diff --git a/apps/BingBot/requirements.txt b/apps/BingBot/requirements.txt deleted file mode 100644 index 31ae25b7..00000000 --- a/apps/BingBot/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -discord.py==2.3.2 -python-dotenv==1.0.0 -PyYAML==6.0.1 -bing-chat==1.9.3 diff --git a/apps/BingBot/src/imageCreate.py b/apps/BingBot/src/imageCreate.py deleted file mode 100644 index 0b68b44d..00000000 --- a/apps/BingBot/src/imageCreate.py +++ /dev/null @@ -1,40 +0,0 @@ -import discord -import asyncio -from src import log - -logger = log.setup_logger(__name__) -using_func = {} - - -async def get_using_create(user_id): - return using_func[user_id] - - -async def set_using_create(user_id, status: bool): - using_func[user_id] = status - - -async def create_image(interaction: discord.Interaction, prompt: str, image_generator): - await interaction.response.defer(ephemeral=False, thinking=True) - using_func[interaction.user.id] = True - try: - embeds = [] - prompts = f"> **{prompt}** - <@{str(interaction.user.id)}> (***BingImageCreator***)\n\n" - # Fetches image links - images = await image_generator.get_images(prompt) - # Add embed to list of embeds - [ - embeds.append( - discord.Embed(url="https://www.bing.com/").set_image(url=image_link) - ) - for image_link in images - ] - await interaction.followup.send(prompts, embeds=embeds, wait=True) - except asyncio.TimeoutError: - await interaction.followup.send("> **Error: Request timed out.**") - logger.exception("Error while create image: Request timed out.") - except Exception as e: - await interaction.followup.send(f"> **Error: {e}**") - logger.exception(f"Error while create image: {e}") - finally: - using_func[interaction.user.id] = False diff --git a/apps/BingBot/src/log.py b/apps/BingBot/src/log.py deleted file mode 100644 index ed04a4a3..00000000 --- a/apps/BingBot/src/log.py +++ /dev/null @@ -1,66 +0,0 @@ -import os -import logging -import logging.handlers - - -class CustomFormatter(logging.Formatter): - LEVEL_COLORS = [ - (logging.DEBUG, "\x1b[40;1m"), - (logging.INFO, "\x1b[34;1m"), - (logging.WARNING, "\x1b[33;1m"), - (logging.ERROR, "\x1b[31m"), - (logging.CRITICAL, "\x1b[41m"), - ] - FORMATS = { - level: logging.Formatter( - f"\x1b[30;1m%(asctime)s\x1b[0m {color}%(levelname)-8s\x1b[0m \x1b[35m%(name)s\x1b[0m -> %(message)s", - "%Y-%m-%d %H:%M:%S", - ) - for level, color in LEVEL_COLORS - } - - def format(self, record): - formatter = self.FORMATS.get(record.levelno) - if formatter is None: - formatter = self.FORMATS[logging.DEBUG] - - # Override the traceback to always print in red - if record.exc_info: - text = formatter.formatException(record.exc_info) - record.exc_text = f"\x1b[31m{text}\x1b[0m" - - output = formatter.format(record) - - # Remove the cache layer - record.exc_text = None - return output - - -def setup_logger(module_name: str) -> logging.Logger: - # create logger - library, _, _ = module_name.partition(".py") - logger = logging.getLogger(library) - logger.setLevel(logging.INFO) - - if not logger.handlers: - # create console handler - console_handler = logging.StreamHandler() - console_handler.setLevel(logging.INFO) - console_handler.setFormatter(CustomFormatter()) - # specify that the log file path is the same as `main.py` file path - grandparent_dir = os.path.abspath(__file__ + "/../../") - log_name = "discord_bot.log" - log_path = os.path.join(grandparent_dir, log_name) - # create local log handler - log_handler = logging.handlers.RotatingFileHandler( - filename=log_path, - encoding="utf-8", - maxBytes=32 * 1024 * 1024, # 32 MiB - backupCount=2, # Rotate through 5 files - ) - log_handler.setFormatter(CustomFormatter()) - # Add handlers to logger - logger.addHandler(log_handler) - logger.addHandler(console_handler) - - return logger diff --git a/apps/BingBot/src/response.py b/apps/BingBot/src/response.py deleted file mode 100644 index 47960a73..00000000 --- a/apps/BingBot/src/response.py +++ /dev/null @@ -1,194 +0,0 @@ -import discord -import re -from EdgeGPT.EdgeGPT import Chatbot, ConversationStyle -from src import log -from functools import partial - -USE_SUGGEST_RESPONSES = True -logger = log.setup_logger(__name__) -using_func = {} - - -# To add suggest responses -class MyView(discord.ui.View): - def __init__( - self, - interaction: discord.Interaction, - chatbot: Chatbot, - conversation_style: str, - suggest_responses: list, - ): - super().__init__(timeout=120) - self.button_author = interaction.user.id - # Add buttons - for label in suggest_responses: - button = discord.ui.Button(label=label) - - # Button event - async def callback( - interaction: discord.Interaction, - button_author: int, - button: discord.ui.Button, - ): - if interaction.user.id != button_author: - await interaction.response.defer(ephemeral=True, thinking=True) - await interaction.followup.send( - "You don't have permission to press this button." - ) - elif not using_func[interaction.user.id]: - await interaction.response.defer(ephemeral=False, thinking=True) - # When click the button, all buttons will disable. - for child in self.children: - child.disabled = True - await interaction.followup.edit_message( - message_id=interaction.message.id, view=self - ) - username = str(interaction.user) - usermessage = button.label - channel = str(interaction.channel) - logger.info( - f"\x1b[31m{username}\x1b[0m : '{usermessage}' ({channel}) [Style: {conversation_style}] [button]" - ) - await send_message( - chatbot, interaction, usermessage, conversation_style - ) - else: - await interaction.response.defer(ephemeral=True, thinking=True) - await interaction.followup.send( - "Please wait for your last conversation to finish." - ) - - self.add_item(button) - self.children[-1].callback = partial( - callback, button_author=self.button_author, button=button - ) - - -async def get_using_send(user_id): - return using_func[user_id] - - -async def set_using_send(user_id, status: bool): - using_func[user_id] = status - - -async def send_message( - chatbot: Chatbot, - interaction: discord.Interaction, - user_message: str, - conversation_style: str, -): - using_func[interaction.user.id] = True - reply = "" - text = "" - link_embed = "" - images_embed = [] - all_url = [] - try: - # Change conversation style - if conversation_style == "creative": - reply = await chatbot.ask( - prompt=user_message, - conversation_style=ConversationStyle.creative, - simplify_response=True, - ) - elif conversation_style == "precise": - reply = await chatbot.ask( - prompt=user_message, - conversation_style=ConversationStyle.precise, - simplify_response=True, - ) - else: - reply = await chatbot.ask( - prompt=user_message, - conversation_style=ConversationStyle.balanced, - simplify_response=True, - ) - - # Get reply text - text = f"{reply['text']}" - text = re.sub(r"\[\^(\d+)\^\]", lambda match: "", text) - - # Get the URL, if available - try: - if len(reply["sources"]) != 0: - for i, url in enumerate(reply["sources"], start=1): - if len(url["providerDisplayName"]) == 0: - all_url.append(f"{i}. {url['seeMoreUrl']}") - else: - all_url.append( - f"{i}. [{url['providerDisplayName']}]({url['seeMoreUrl']})" - ) - link_text = "\n".join(all_url) - link_embed = discord.Embed(description=link_text) - except: - pass - - # Set the final message - user_message = user_message.replace("\n", "") - ask = f"> **{user_message}** - <@{str(interaction.user.id)}> (***style: {conversation_style}***)\n\n" - response = f"{ask}{text}" - - # Discord limit about 2000 characters for a message - while len(response) > 2000: - temp = response[:2000] - response = response[2000:] - await interaction.followup.send(temp) - - # Get the image, if available - try: - if len(link_embed) == 0: - all_image = re.findall("https?://[\w\./]+", str(reply["sources_text"])) - [ - images_embed.append( - discord.Embed(url="https://www.bing.com/").set_image( - url=image_link - ) - ) - for image_link in all_image - ] - except: - pass - # Add all suggest responses in list - if USE_SUGGEST_RESPONSES: - suggest_responses = reply["suggestions"] - if images_embed: - await interaction.followup.send( - response, - view=MyView( - interaction, chatbot, conversation_style, suggest_responses - ), - embeds=images_embed, - wait=True, - ) - elif link_embed: - await interaction.followup.send( - response, - view=MyView( - interaction, chatbot, conversation_style, suggest_responses - ), - embed=link_embed, - wait=True, - ) - else: - await interaction.followup.send( - response, - view=MyView( - interaction, chatbot, conversation_style, suggest_responses - ), - wait=True, - ) - else: - if images_embed: - await interaction.followup.send( - response, embeds=images_embed, wait=True - ) - elif link_embed: - await interaction.followup.send(response, embed=link_embed, wait=True) - else: - await interaction.followup.send(response, wait=True) - except Exception as e: - await interaction.followup.send(f">>> **Error: {e}**") - logger.exception(f"Error while sending message: {e}") - finally: - using_func[interaction.user.id] = False diff --git a/apps/GradioBot/main.py b/apps/GradioBot/main.py deleted file mode 100644 index fb985531..00000000 --- a/apps/GradioBot/main.py +++ /dev/null @@ -1,197 +0,0 @@ -import asyncio -import argparse -from collections import Counter -import json -import pathlib -import re - - -import discord -from discord.ext import commands -import gradio as gr -from gradio import utils -import requests - -from typing import Dict, List - -from utils import * - - -lock = asyncio.Lock() - -bot = commands.Bot("", intents=discord.Intents(messages=True, guilds=True)) - - -GUILD_SPACES_FILE = "guild_spaces.pkl" - - -if pathlib.Path(GUILD_SPACES_FILE).exists(): - guild_spaces = read_pickle_file(GUILD_SPACES_FILE) - assert isinstance(guild_spaces, dict), f"{GUILD_SPACES_FILE} in invalid format." - guild_blocks = {} - delete_keys = [] - for k, v in guild_spaces.items(): - try: - guild_blocks[k] = gr.Interface.load(v, src="spaces") - except ValueError: - delete_keys.append(k) - for k in delete_keys: - del guild_spaces[k] -else: - guild_spaces: Dict[int, str] = {} - guild_blocks: Dict[int, gr.Blocks] = {} - - -HASHED_USERS_FILE = "users.pkl" - -if pathlib.Path(HASHED_USERS_FILE).exists(): - hashed_users = read_pickle_file(HASHED_USERS_FILE) - assert isinstance(hashed_users, list), f"{HASHED_USERS_FILE} in invalid format." -else: - hashed_users: List[str] = [] - - -@bot.event -async def on_ready(): - print(f"Logged in as {bot.user}") - print(f"Running in {len(bot.guilds)} servers...") - - -async def run_prediction(space: gr.Blocks, *inputs): - inputs = list(inputs) - fn_index = 0 - processed_inputs = space.serialize_data(fn_index=fn_index, inputs=inputs) - batch = space.dependencies[fn_index]["batch"] - - if batch: - processed_inputs = [[inp] for inp in processed_inputs] - - outputs = await space.process_api( - fn_index=fn_index, inputs=processed_inputs, request=None, state={} - ) - outputs = outputs["data"] - - if batch: - outputs = [out[0] for out in outputs] - - processed_outputs = space.deserialize_data(fn_index, outputs) - processed_outputs = utils.resolve_singleton(processed_outputs) - - return processed_outputs - - -async def display_stats(message: discord.Message): - await message.channel.send( - f"Running in {len(bot.guilds)} servers\n" - f"Total # of users: {len(hashed_users)}\n" - f"------------------" - ) - await message.channel.send(f"Most popular spaces:") - # display the top 10 most frequently occurring strings and their counts - spaces = guild_spaces.values() - counts = Counter(spaces) - for space, count in counts.most_common(10): - await message.channel.send(f"- {space}: {count}") - - -async def load_space(guild: discord.Guild, message: discord.Message, content: str): - iframe_url = ( - requests.get(f"https://huggingface.co/api/spaces/{content}/host") - .json() - .get("host") - ) - if iframe_url is None: - return await message.channel.send( - f"Space: {content} not found. If you'd like to make a prediction, enclose the inputs in quotation marks." - ) - else: - await message.channel.send( - f"Loading Space: https://huggingface.co/spaces/{content}..." - ) - interface = gr.Interface.load(content, src="spaces") - guild_spaces[guild.id] = content - guild_blocks[guild.id] = interface - asyncio.create_task(update_pickle_file(guild_spaces, GUILD_SPACES_FILE)) - if len(content) > 32 - len(f"{bot.name} []"): # type: ignore - nickname = content[: 32 - len(f"{bot.name} []") - 3] + "..." # type: ignore - else: - nickname = content - nickname = f"{bot.name} [{nickname}]" # type: ignore - await guild.me.edit(nick=nickname) - await message.channel.send( - "Ready to make predictions! Type in your inputs and enclose them in quotation marks." - ) - - -async def disconnect_space(bot: commands.Bot, guild: discord.Guild): - guild_spaces.pop(guild.id, None) - guild_blocks.pop(guild.id, None) - asyncio.create_task(update_pickle_file(guild_spaces, GUILD_SPACES_FILE)) - await guild.me.edit(nick=bot.name) # type: ignore - - -async def make_prediction(guild: discord.Guild, message: discord.Message, content: str): - if guild.id in guild_spaces: - params = re.split(r' (?=")', content) - params = [p.strip("'\"") for p in params] - space = guild_blocks[guild.id] - predictions = await run_prediction(space, *params) - if isinstance(predictions, (tuple, list)): - for p in predictions: - await send_file_or_text(message.channel, p) - else: - await send_file_or_text(message.channel, predictions) - return - else: - await message.channel.send( - "No Space is currently running. Please type in the name of a Hugging Face Space name first, e.g. abidlabs/en2fr" - ) - await guild.me.edit(nick=bot.name) # type: ignore - - -@bot.event -async def on_message(message: discord.Message): - if message.author == bot.user: - return - h = hash_user_id(message.author.id) - if h not in hashed_users: - hashed_users.append(h) - asyncio.create_task(update_pickle_file(hashed_users, HASHED_USERS_FILE)) - else: - if message.content: - content = remove_tags(message.content) - guild = message.channel.guild - assert guild, "Message not sent in a guild." - - if content.strip() == "exit": - await disconnect_space(bot, guild) - elif content.strip() == "stats": - await display_stats(message) - elif content.startswith('"') or content.startswith("'"): - await make_prediction(guild, message, content) - else: - await load_space(guild, message, content) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "--token", - type=str, - help="API key for the Discord bot. You can set this to your Discord token if you'd like to make your own clone of the Gradio Bot.", - required=False, - default="", - ) - args = parser.parse_args() - - if args.token.strip(): - discord_token = args.token - bot.env = "staging" # type: ignore - bot.name = "StagingBot" # type: ignore - else: - with open("secrets.json") as fp: - discord_token = json.load(fp)["discord_token"] - bot.env = "prod" # type: ignore - bot.name = "GradioBot" # type: ignore - - bot.run(discord_token) diff --git a/apps/GradioBot/utils.py b/apps/GradioBot/utils.py deleted file mode 100644 index 5657b36f..00000000 --- a/apps/GradioBot/utils.py +++ /dev/null @@ -1,41 +0,0 @@ -from __future__ import annotations - -import asyncio -import pickle -import hashlib -import pathlib -from typing import Dict, List - -import discord - -lock = asyncio.Lock() - - -async def update_pickle_file(data: Dict | List, file_path: str): - async with lock: - with open(file_path, "wb") as fp: - pickle.dump(data, fp) - - -def read_pickle_file(file_path: str): - with open(file_path, "rb") as fp: - return pickle.load(fp) - - -async def send_file_or_text(channel, file_or_text: str): - # if the file exists, send as a file - if pathlib.Path(str(file_or_text)).exists(): - with open(file_or_text, "rb") as f: - return await channel.send(file=discord.File(f)) - else: - return await channel.send(file_or_text) - - -def remove_tags(content: str) -> str: - content = content.replace("<@1040198143695933501>", "") - content = content.replace("<@1057338428938788884>", "") - return content.strip() - - -def hash_user_id(user_id: int) -> str: - return hashlib.sha256(str(user_id).encode("utf-8")).hexdigest() diff --git a/apps/MythGen/.env.example b/apps/MythGen/.env.example deleted file mode 100644 index 95580b46..00000000 --- a/apps/MythGen/.env.example +++ /dev/null @@ -1,2 +0,0 @@ -OPENAI_API_KEY="YOUR_API_KEY" -DALLE_COOKIE="YOUR_COOKIE" diff --git a/apps/MythGen/README.md b/apps/MythGen/README.md deleted file mode 100644 index ef684287..00000000 --- a/apps/MythGen/README.md +++ /dev/null @@ -1,71 +0,0 @@ -MythGen: A Dynamic New Art Form -Overview - -![panel_2](https://github.com/elder-plinius/MythGen/assets/133052465/86bb5784-845b-4db8-a38f-217169ea5201) - - -MythGen is an Iterative Multimedia Generator that allows users to create their own comic stories based on textual prompts. The system integrates state-of-the-art language and image models to provide a seamless and creative experience. -Features - - Initial Prompting: Kick-start your story with an initial text prompt. - Artistic Style Suffix: Maintain a consistent artistic style throughout your comic. - Image Generation: Generate captivating comic panels based on textual captions. - Caption Generation: Produce engaging captions for each comic panel. - Interactive Story Building: Select your favorite panels and captions to build your story iteratively. - Storyboard: View the sequence of your selected panels and their associated captions. - State Management: Keep track of the current state of your comic generation process. - User-Friendly Interface: Easy-to-use interface built on Gradio. - -Prerequisites -OpenAI API Key - -You will need an OpenAI API key to access GPT-3 for generating captions. Follow these steps to obtain one: - - Visit OpenAI's Developer Dashboard. - Sign up for an API key and follow the verification process. - Once verified, you will be provided with an API key. - -Bing Image Creator Cookie - -You should obtain your cookie to run this program. Follow these steps to obtain your cookie: - - Go to Bing Image Creator in your browser and log in to your account. - Press Ctrl+Shift+J to open developer tools. - Navigate to the Application section. - Click on the Cookies section. - Find the variable _U and copy its value. - -How to Use - - Initial Prompt: Start by inputting your initial comic concept. - Select a Panel: Choose your favorite panel and caption from the generated options. - Iterate: Use the "Next Part" button to generate the next part of your comic based on your latest selection. - View Storyboard: See your selected comic panels and captions in a storyboard for a comprehensive view of your comic. - Finalize: Continue this process until you've created your full comic story. - -Installation - -bash - -pip install -r requirements.txt - -Running MythGen - -bash - -python main.py - -This will launch the Gradio interface where you can interact with MythGen. -Dependencies - - Python 3.x - Gradio - OpenAI's GPT-3 - DALL-E - -Contributing - -We welcome contributions! Please read the CONTRIBUTING.md for guidelines on how to contribute to this project. -License - -This project is licensed under the MIT License. See LICENSE.md for details. diff --git a/apps/MythGen/cookies.json b/apps/MythGen/cookies.json deleted file mode 100644 index 4d0748fc..00000000 --- a/apps/MythGen/cookies.json +++ /dev/null @@ -1,6 +0,0 @@ -[ - { - "name": "cookie1", - "value": "1lEXeWRSIPUsQ0S3tdAc3v7BexGK2qBlzsXz8j52w_HNBoOsegjiwRySQHmfoWduHVUxSXo6cETPP2qNrYWAz6k7wn43WGO9i7ll9_Wl7M6HA2c9twbKByfAtAB5fr26wPawQ6y1GCdakD_Kr4xdD20fvkytnmOmZu7Ktnb9mUVE605AAbJcIA9SOlRN5410ZPOnZA1cIzr4WtAFWNfQKPG6Sxk_zO5zvXQfYTyMNmOI" - } -] diff --git a/apps/MythGen/requirements.txt b/apps/MythGen/requirements.txt deleted file mode 100644 index e1fda005..00000000 --- a/apps/MythGen/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -dalle3==0.0.7 -Flask==2.3.2 -gradio==3.48.0 -openai==0.28.1 -Pillow==10.1.0 -python-dotenv==1.0.0 -Requests==2.31.0 -swarms==1.8.2 diff --git a/apps/open-sourcerer/.env.example b/apps/open-sourcerer/.env.example deleted file mode 100644 index 663b2532..00000000 --- a/apps/open-sourcerer/.env.example +++ /dev/null @@ -1,6 +0,0 @@ -ELEVEN_LABS_API_KEY = "" # https://elevenlabs.io/speech-synthesis -OPENAI_API_KEY = "" # https://platform.openai.com/account/api-keys -DISCORD_TOKEN="discord_token" -API_BASE="api_base" -SYSTEM_MESSAGE="" -BOT_ID="your_bot_token" diff --git a/apps/open-sourcerer/Dockerfile b/apps/open-sourcerer/Dockerfile deleted file mode 100644 index 8033ee16..00000000 --- a/apps/open-sourcerer/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -# Use an official Python runtime as a parent image -FROM python:3.10 - -# Set the working directory in the container to /app -WORKDIR /app - -# Add the current directory contents into the container at /app -ADD . /app - -# Install any needed packages specified in requirements.txt -RUN pip install --no-cache-dir -r requirements.txt - -# Make port 80 available to the world outside this container -EXPOSE 80 - -# Run DiscordInterpreter.py when the container launches -CMD ["python", "main.py"] diff --git a/apps/open-sourcerer/README.md b/apps/open-sourcerer/README.md deleted file mode 100644 index 9387cad8..00000000 --- a/apps/open-sourcerer/README.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: open-interpreter -app_file: jarvis.py -sdk: gradio -sdk_version: 3.33.1 ---- -# Open-Sourcerer: The Code Sorcerer's Apprentice -![Sourcerer](Open-Sourcerer.jpg) - -Greetings, fellow developer! Welcome to the realm of the Open-Sourcerer, your trusty assistant in the magical world of open source projects. Open-Sourcerer is here to assist you in finding, integrating, and mastering the arcane arts of open source code. - -## Introduction - -Open-Sourcerer is your magical companion, capable of traversing the vast landscapes of the internet, particularly GitHub, to discover open source projects that align with your desires. It can also lend you a hand in weaving these projects into your own creations. - -### How Does Open-Sourcerer Work? - -Open-Sourcerer operates in two phases: - -1. **Discovery**: It explores the realms of GitHub to unearth repositories that resonate with your quest. - -2. **Integration and Assistance**: Once you've chosen your allies (repositories), Open-Sourcerer helps you integrate them into your own codebase. It can even conjure code snippets to assist you. - -## Installation - -Before embarking on this mystical journey, ensure you have the following: - -- Python (version X.X.X) -- Git (version X.X.X) -- Your favorite code editor (e.g., Visual Studio Code) - -Now, let's summon the Open-Sourcerer: - -```shell -pip install open-sourcerer -``` - -## Configuration - -Open-Sourcerer must be attuned to your intentions. Let's configure it: - -```shell -open-sourcerer configure -``` - -Follow the instructions to set up your preferences, such as the programming languages and search keywords that align with your project. - -## MVP (Minimum Viable Potion) Tasks - -1. **Prepare the Cauldron** - - [ ] Create a dedicated workspace/repository for Open-Sourcerer. - -2. **Web Scrying** - - [ ] Implement web scraping to search GitHub for relevant open source projects. - -3. **Submodule Conjuring** - - [ ] Develop a submodule management system to add selected GitHub repositories as submodules to your workspace. - -4. **Bloop Integration** - - [ ] Integrate Open-Sourcerer with the Bloop tool (https://github.com/BloopAI/bloop). - - [ ] Implement code generation and assistance features. - -5. **Version Control & Long-Term Memory** - - [ ] Set up version control for the workspace and submodules. - - [ ] Create a vector database to store project information for long-term memory. - -6. **Magical Interface (Optional)** - - [ ] Create a user-friendly interface for interacting with Open-Sourcerer. - -7. **Testing & Documentation** - - [ ] Ensure the reliability of Open-Sourcerer through thorough testing. - - [ ] Document the magic spells for fellow developers. - -## Stretch Goals (Beyond the Sorcerer's Hat) - -1. **Advanced Recommendation Alchemy** - - [ ] Enhance the recommendation algorithm using machine learning or NLP. - -2. **Explore Other Realms** - - [ ] Expand Open-Sourcerer's reach to platforms like GitLab, Bitbucket, and more. - -3. **Code Quality Insights** - - [ ] Add code review and quality analysis features for recommended projects. - -4. **Summon a Community** - - [ ] Create a community where developers can collaborate on recommended open source projects. - -5. **Editor Enchantments** - - [ ] Develop plugins/extensions for popular code editors to provide real-time assistance. - -6. **Language Understanding Scrolls** - - [ ] Improve Open-Sourcerer's natural language understanding capabilities. - -7. **Continuous Learning** - - [ ] Implement a mechanism for Open-Sourcerer to learn and adapt from user interactions. - -8. **Security Warding** - - [ ] Add security scanning to identify vulnerabilities in recommended projects. - -9. **Mobile App (Optional)** - - [ ] Create a mobile app version of Open-Sourcerer for convenience on your travels. - -10. **Licensing & Compliance** - - [ ] Ensure Open-Sourcerer checks the licensing of recommended projects for legal compliance. - -11. **Performance Enhancements** - - [ ] Optimize Open-Sourcerer's performance for faster results. - -## How to Contribute - -As we embark on this magical quest, we invite other sorcerers to join us. Feel free to contribute to Open-Sourcerer's development and help us unlock even more mystical powers. - -```shell -git clone https://github.com/your-fork/open-sourcerer.git -cd open-sourcerer -# Create a virtual environment and activate it -pip install -r requirements.txt -python setup.py install -``` - -May your code be bug-free and your projects prosperous! The Open-Sourcerer awaits your commands. -``` - -Feel free to adapt and expand this README with more details, graphics, and styling to make it engaging and in line with the sorcerer theme. diff --git a/apps/open-sourcerer/docker-compose.yaml b/apps/open-sourcerer/docker-compose.yaml deleted file mode 100644 index 7168363b..00000000 --- a/apps/open-sourcerer/docker-compose.yaml +++ /dev/null @@ -1,9 +0,0 @@ -version: '3' -services: - my-python-app: - container_name: Open-Soucerer - build: . - ports: - - "80:80" - env_file: - - ./.env diff --git a/apps/open-sourcerer/main.py b/apps/open-sourcerer/main.py deleted file mode 100644 index db209cd9..00000000 --- a/apps/open-sourcerer/main.py +++ /dev/null @@ -1,77 +0,0 @@ -import openai -import os -import dotenv -import logging -import gradio as gr -from BingImageCreator import ImageGen -from swarms.models.bing_chat import BingChat - -# from swarms.models.bingchat import BingChat -dotenv.load_dotenv(".env") - -# Initialize the EdgeGPTModel -model = BingChat() - -response = model("Generate") - -logging.basicConfig(level=logging.INFO) - -accumulated_story = "" -latest_caption = "" -standard_suffix = "" -storyboard = [] - -caption = "Create comic about opensourcerer a robot wizard" - -def generate_images_with_bingchat(caption): - img_path = model.create_img(caption) - img_urls = model.images(caption) - return img_urls - -def generate_single_caption(text): - prompt = f"A comic about {text}." - response = model(text) - return response - -def interpret_text_with_gpt(text, suffix): - return generate_single_caption(f"{text} {suffix}") - -def create_standard_suffix(original_prompt): - return f"In the style of {original_prompt}" - -def gradio_interface(text=None, next_button_clicked=False): - global accumulated_story, latest_caption, standard_suffix, storyboard - - if not standard_suffix: - standard_suffix = create_standard_suffix(text) - - if next_button_clicked: - new_caption = generate_single_caption(latest_caption + " " + standard_suffix) - new_urls = generate_images_with_bingchat(new_caption) - latest_caption = new_caption - storyboard.append((new_urls, new_caption)) - - elif text: - caption = generate_single_caption(text + " " + standard_suffix) - comic_panel_urls = generate_images_with_bingchat(caption) - latest_caption = caption - storyboard.append((comic_panel_urls, caption)) - - storyboard_html = "" - for urls, cap in storyboard: - for url in urls: - storyboard_html += f'{cap}
{cap}
' - - return storyboard_html - -if __name__ == "__main__": - iface = gr.Interface( - fn=gradio_interface, - inputs=[ - gr.inputs.Textbox(default="Type your story concept here", optional=True, label="Story Concept"), - gr.inputs.Checkbox(label="Generate Next Part") - ], - outputs=[gr.outputs.HTML()], - live=False # Submit button will appear - ) - iface.launch() diff --git a/apps/open-sourcerer/requirements.txt b/apps/open-sourcerer/requirements.txt deleted file mode 100644 index 1f9ab0f1..00000000 --- a/apps/open-sourcerer/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -openai-whisper -py-cord -discord -open-interpreter -elevenlabs -gradio diff --git a/apps/open-sourcerer/voice.py b/apps/open-sourcerer/voice.py deleted file mode 100644 index 9013e5bf..00000000 --- a/apps/open-sourcerer/voice.py +++ /dev/null @@ -1,97 +0,0 @@ -import gradio_client as grc -import interpreter -import time -import gradio as gr -from pydub import AudioSegment -import io -from elevenlabs import generate, play, set_api_key -import dotenv - -dotenv.load_dotenv(".env") - -# interpreter.model = "TheBloke/Mistral-7B-OpenOrca-GGUF" -interpreter.auto_run = True - - -set_api_key("ELEVEN_LABS_API_KEY") - - -def get_audio_length(audio_bytes): - # Create a BytesIO object from the byte array - byte_io = io.BytesIO(audio_bytes) - - # Load the audio data with PyDub - audio = AudioSegment.from_mp3(byte_io) - - # Get the length of the audio in milliseconds - length_ms = len(audio) - - # Optionally convert to seconds - length_s = length_ms / 1000.0 - - return length_s - - -def speak(text): - speaking = True - audio = generate(text=text, voice="Daniel") - play(audio, notebook=True) - - audio_length = get_audio_length(audio) - time.sleep(audio_length) - - -# @title Text-only JARVIS -# @markdown Run this cell for a ChatGPT-like interface. - - -with gr.Blocks() as demo: - chatbot = gr.Chatbot() - msg = gr.Textbox() - - def user(user_message, history): - return "", history + [[user_message, None]] - - def bot(history): - user_message = history[-1][0] - history[-1][1] = "" - active_block_type = "" - - for chunk in interpreter.chat(user_message, stream=True, display=False): - # Message - if "message" in chunk: - if active_block_type != "message": - active_block_type = "message" - history[-1][1] += chunk["message"] - yield history - - # Code - if "language" in chunk: - language = chunk["language"] - if "code" in chunk: - if active_block_type != "code": - active_block_type = "code" - history[-1][1] += f"\n```{language}\n" - history[-1][1] += chunk["code"] - yield history - - # Output - if "executing" in chunk: - history[-1][1] += "\n```\n\n```text\n" - yield history - if "output" in chunk: - if chunk["output"] != "KeyboardInterrupt": - history[-1][1] += chunk["output"] + "\n" - yield history - if "end_of_execution" in chunk: - history[-1][1] = history[-1][1].strip() - history[-1][1] += "\n```\n" - yield history - - msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( - bot, chatbot, chatbot - ) - -if __name__ == "__main__": - demo.queue() - demo.launch(debug=True) diff --git a/apps/orchistrator/.dockerignore b/apps/orchistrator/.dockerignore deleted file mode 100644 index 37d7e734..00000000 --- a/apps/orchistrator/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -node_modules -.env diff --git a/apps/orchistrator/.env.example b/apps/orchistrator/.env.example deleted file mode 100644 index 1c5de46c..00000000 --- a/apps/orchistrator/.env.example +++ /dev/null @@ -1,3 +0,0 @@ -DISCORD_TOKEN= -DISCORD_CLIENT_ID= -DISCORD_GUILD_ID= \ No newline at end of file diff --git a/apps/orchistrator/Dockerfile b/apps/orchistrator/Dockerfile deleted file mode 100644 index 6fe95fab..00000000 --- a/apps/orchistrator/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM node:19-slim - -WORKDIR /app - -COPY package.json /app -RUN npm install - -COPY . . - -CMD [ "node", "index.js" ] diff --git a/apps/orchistrator/README.md b/apps/orchistrator/README.md deleted file mode 100644 index b1e3b419..00000000 --- a/apps/orchistrator/README.md +++ /dev/null @@ -1,42 +0,0 @@ - -# Server-Bot - -[View on Docker Hub](https://hub.docker.com/r/allenrkeen/server-bot) -### Discord bot to remotely monitor and control a docker based server. Using the docker socket. - -Setup is pretty straightforward. -1. Create a new application in the *[discord developer portal](https://discord.com/developers/applications)* -2. Go to the bot section and click *Add Bot* -3. Reset Token and keep the token somewhere secure (This will be referred to as "DISCORD_TOKEN" in .env and docker environment variables) -4. Get the "Application ID" from the General Information tab of your application (This will be referred to as "DISCORD_CLIENT_ID" in .env and docker environment variables) -5. *Optional:* If you have developer mode enabled in Discord, get your server's ID by right-clicking on the server name and clicking "Copy ID" (This will be referred to as "DISCORD_GUILD_ID" in .env and docker environment variables) - - If you skip this, it will still work, but commands will be published globally instead of to your server and can take up to an hour to be available in your server. - - Using the Server ID will be more secure, making the commands available only in the specified server. -6. Run the application in your preffered method. - - Run the docker container with the provided [docker-compose.yml](docker-compose.yml) or the docker run command below. - - ```bash - docker run -v /var/run/docker.sock:/var/run/docker.sock --name server-bot \ - -e DISCORD_TOKEN=your_token_here \ - -e DISCORD_CLIENT_ID=your_client_id_here \ - -e DISCORD_GUILD_ID=your_guild_id_here \ - allenrkeen/server-bot:latest - ``` - - - Clone the repo, cd into the server-bot directory and run "npm install" to install dependencies, then "npm run start" to start the server -7. The program will build an invite link with the correct permissions and put it in the logs. Click the link and confirm the server to add the bot to. - - -Current commands: - - /allcontainers - - provides container name and status for all containers - - /restartcontainer - - provides an autocomplete list of running containers to select from, or just type in container name then restarts the container - - /stopcontainer - - provides an autocomplete list of running containers to select from, or just type in container name then stops the container - - /startcontainer - - provides an autocomplete list of stopped containers to select from, or just type in container name then starts the container - - /ping - - Replies with "Pong!" when the bot is listening - - /server - - Replies with Server Name and member count, good for testing. diff --git a/apps/orchistrator/backend/deleteCommands.js b/apps/orchistrator/backend/deleteCommands.js deleted file mode 100644 index 79a77b4e..00000000 --- a/apps/orchistrator/backend/deleteCommands.js +++ /dev/null @@ -1,22 +0,0 @@ -/* - * This file is used to delete all commands from the Discord API. - * Only use this if you want to delete all commands and understand the consequences. -*/ - -require('dotenv').config(); -const token = process.env.DISCORD_TOKEN; -const clientID = process.env.DISCORD_CLIENT_ID; -const guildID = process.env.DISCORD_GUILD_ID; -const { REST, Routes } = require('discord.js'); -const fs = require('node:fs'); - -const rest = new REST({ version: '10' }).setToken(token); - -rest.put(Routes.applicationCommands(clientID), { body: [] }) - .then(() => console.log('Successfully deleted application (/) commands.')) - .catch(console.error); - -rest.put(Routes.applicationGuildCommands(clientID, guildID), { body: [] }) - .then(() => console.log('Successfully deleted guild (/) commands.')) - .catch(console.error); - diff --git a/apps/orchistrator/backend/deployCommands.js b/apps/orchistrator/backend/deployCommands.js deleted file mode 100644 index 4034b624..00000000 --- a/apps/orchistrator/backend/deployCommands.js +++ /dev/null @@ -1,53 +0,0 @@ -/* -This script pushes all commands in the commands folder to be usable in discord. -*/ - -require('dotenv').config(); -const token = process.env.DISCORD_TOKEN; -const clientID = process.env.DISCORD_CLIENT_ID; -const guildID = process.env.DISCORD_GUILD_ID; -const { REST, Routes } = require('discord.js'); -const fs = require('node:fs'); - -const commands = []; - -// Get all commands from the commands folder - -const commandFiles = fs.readdirSync('./commands').filter(file => file.endsWith('.js')); -console.log(commandFiles); - -for (const file of commandFiles) { - const command = require(`../commands/${file}`); - commands.push(command.data.toJSON()); -} - -const rest = new REST({ version: '10' }).setToken(token); - -// console.log(commands); - -(async () => { - try { - const rest = new REST({ version: '10' }).setToken(token); - - console.log('Started refreshing application (/) commands.'); - - //publish to guild if guildID is set, otherwise publish to global - if (guildID) { - const data = await rest.put( - Routes.applicationGuildCommands(clientID, guildID), - { body: commands }, - ); - console.log('Successfully reloaded '+ data.length +' commands.'); - } else { - const data = await rest.put( - Routes.applicationCommands(clientID), - { body: commands }, - ); - console.log('Successfully reloaded '+ data.length +' commands.'); - } - - } catch (error) { - console.error(error); - } -})(); - diff --git a/apps/orchistrator/commands/allContainers.js b/apps/orchistrator/commands/allContainers.js deleted file mode 100644 index 452d2468..00000000 --- a/apps/orchistrator/commands/allContainers.js +++ /dev/null @@ -1,39 +0,0 @@ -/* A command that lists all containers with their status */ - -const { SlashCommandBuilder, EmbedBuilder } = require("discord.js"); -const Docker = require('node-docker-api').Docker; - -module.exports = { - data: new SlashCommandBuilder() - .setName("allcontainers") - .setDescription("Lists all containers"), - async execute(interaction) { - outArray = []; - interaction.reply('Listing all containers...'); - - //create docker client - const docker = new Docker({ socketPath: '/var/run/docker.sock' }); - - // get all containers - const containers = await docker.container.list({ all: true}); - - // create array of containers with name and status - outArray = containers.map(c => { - return { - name: c.data.Names[0].slice(1), - status: c.data.State - }; - }); - - embedCount = Math.ceil(outArray.length / 25); - for (let i = 0; i < embedCount; i++) { - const embed = new EmbedBuilder() - .setTitle('Containers') - .addFields(outArray.slice(i * 25, (i + 1) * 25).map(e => { - return { name: e.name, value: e.status }; - })) - .setColor(0x00AE86); - interaction.channel.send({ embeds: [embed] }); - } - }, -}; \ No newline at end of file diff --git a/apps/orchistrator/commands/restart.js b/apps/orchistrator/commands/restart.js deleted file mode 100644 index 38a7b485..00000000 --- a/apps/orchistrator/commands/restart.js +++ /dev/null @@ -1,69 +0,0 @@ -const { SlashCommandBuilder, EmbedBuilder } = require("discord.js"); -const Docker = require('node-docker-api').Docker; - -module.exports = { - data: new SlashCommandBuilder() - .setName("restartcontainer") - .setDescription("Restarts a Docker container") - .addStringOption(option => - option.setName('container') - .setDescription('The container to restart') - .setRequired(true) - .setAutocomplete(true)), - async autocomplete(interaction) { - try { - // Create docker client - const docker = new Docker({ socketPath: '/var/run/docker.sock' }); - - // Get list of running containers - const containers = await docker.container.list({ all: true, filters: { status: ['running'] } }); - const runningContainers = containers.map(c => c.data.Names[0].slice(1)); - - // Filter list of containers by focused value - const focusedValue = interaction.options.getFocused(true); - const filteredContainers = runningContainers.filter(container => container.startsWith(focusedValue.value)); - - //slice if more than 25 - let sliced; - if (filteredContainers.length > 25) { - sliced = filteredContainers.slice(0, 25); - } else { - sliced = filteredContainers; - } - - // Respond with filtered list of containers - await interaction.respond(sliced.map(container => ({ name: container, value: container }))); - - } catch (error) { - // Handle error - console.error(error); - await interaction.reply('An error occurred while getting the list of running containers.'); - } - }, - async execute(interaction) { - try { - // create docker client - const docker = new Docker({ socketPath: '/var/run/docker.sock' }); - - // Get container name from options - const container = interaction.options.getString('container'); - - // Restart container - await interaction.reply(`Restarting container "${container}"...`); - const containers = await docker.container.list({ all: true, filters: { name: [container] } }); - if (containers.length === 0) { - await interaction.followUp(`Container "${container}" does not exist.`); - throw new Error(`Container "${container}" does not exist.`); - } - await containers[0].restart(); - - - // Confirm that container was restarted - await interaction.followUp(`Container "${container}" was successfully restarted.`); - } catch (error) { - // Handle error - console.error(error); - await interaction.followUp(`An error occurred while trying to restart the container "${container}".`); - } - } -}; diff --git a/apps/orchistrator/commands/startContainer.js b/apps/orchistrator/commands/startContainer.js deleted file mode 100644 index 43dc4e24..00000000 --- a/apps/orchistrator/commands/startContainer.js +++ /dev/null @@ -1,92 +0,0 @@ -const { SlashCommandBuilder, EmbedBuilder } = require("discord.js"); -const Docker = require('node-docker-api').Docker; - -module.exports = { - data: new SlashCommandBuilder() - .setName("startcontainer") - .setDescription("Starts a Docker container") - .addStringOption(option => - option.setName('container') - .setDescription('The container to start') - .setRequired(true) - .setAutocomplete(true)), - async autocomplete(interaction) { - try { - // Create docker client - const docker = new Docker({ socketPath: '/var/run/docker.sock' }); - - // Get list of running containers - const containers = await docker.container.list({ all: true, filters: { status: ['exited'] } }); - const runningContainers = containers.map(c => c.data.Names[0].slice(1)); - - // Filter list of containers by focused value - const focusedValue = interaction.options.getFocused(true); - const filteredContainers = runningContainers.filter(container => container.startsWith(focusedValue.value)); - - //slice if more than 25 - let sliced; - if (filteredContainers.length > 25) { - sliced = filteredContainers.slice(0, 25); - } else { - sliced = filteredContainers; - } - - // Respond with filtered list of containers - await interaction.respond(sliced.map(container => ({ name: container, value: container }))); - - } catch (error) { - // Handle error - console.error(error); - await interaction.reply('An error occurred while getting the list of running containers.'); - } - }, - async execute(interaction) { - try { - // Get container name from options - const containerName = interaction.options.getString('container'); - - // Start container in interactive mode - await interaction.reply(`Starting container "${containerName}" in interactive mode...`); - const container = docker.getContainer(containerName); - const info = await container.inspect(); - if (!info) { - await interaction.followUp(`Container "${containerName}" does not exist.`); - throw new Error(`Container "${containerName}" does not exist.`); - } - await container.start({ - AttachStdin: true, - AttachStdout: true, - AttachStderr: true, - Tty: true, - OpenStdin: true, - StdinOnce: false - }); - - // Attach to container's streams - const stream = await container.attach({ - stream: true, - stdin: true, - stdout: true, - stderr: true - }); - - // Use socket.io for real-time communication with the container - io.on('connection', (socket) => { - socket.on('containerInput', (data) => { - stream.write(data + '\n'); // Send input to the container - }); - - stream.on('data', (data) => { - socket.emit('containerOutput', data.toString()); // Send container's output to the client - }); - }); - - // Confirm that container was started - await interaction.followUp(`Container "${containerName}" was successfully started in interactive mode.`); - } catch (error) { - // Handle error - console.error(error); - await interaction.followUp(`An error occurred while trying to start the container "${containerName}" in interactive mode.`); - } - }, -}; diff --git a/apps/orchistrator/commands/stopContainer.js b/apps/orchistrator/commands/stopContainer.js deleted file mode 100644 index d424c73d..00000000 --- a/apps/orchistrator/commands/stopContainer.js +++ /dev/null @@ -1,68 +0,0 @@ -const { SlashCommandBuilder, EmbedBuilder } = require("discord.js"); -const Docker = require('node-docker-api').Docker; - -module.exports = { - data: new SlashCommandBuilder() - .setName("stopcontainer") - .setDescription("Stops a Docker container") - .addStringOption(option => - option.setName('container') - .setDescription('The container to stop') - .setRequired(true) - .setAutocomplete(true)), - async autocomplete(interaction) { - try { - // Create docker client - const docker = new Docker({ socketPath: '/var/run/docker.sock' }); - - // Get list of running containers - const containers = await docker.container.list({ all: true, filters: { status: ['running'] } }); - const runningContainers = containers.map(c => c.data.Names[0].slice(1)); - - // Filter list of containers by focused value - const focusedValue = interaction.options.getFocused(true); - const filteredContainers = runningContainers.filter(container => container.startsWith(focusedValue.value)); - - //slice if more than 25 - let sliced; - if (filteredContainers.length > 25) { - sliced = filteredContainers.slice(0, 25); - } else { - sliced = filteredContainers; - } - - // Respond with filtered list of containers - await interaction.respond(sliced.map(container => ({ name: container, value: container }))); - - } catch (error) { - // Handle error - console.error(error); - await interaction.reply('An error occurred while getting the list of running containers.'); - } - }, - async execute(interaction) { - try { - // create docker client - const docker = new Docker({ socketPath: '/var/run/docker.sock' }); - - // Get container name from options - const container = interaction.options.getString('container'); - - // Restart container - await interaction.reply(`Stopping container "${container}"...`); - const containers = await docker.container.list({ all: true, filters: { name: [container] } }); - if (containers.length === 0) { - await interaction.followUp(`Container "${container}" does not exist.`); - throw new Error(`Container "${container}" does not exist.`); - } - await containers[0].stop(); - - // Confirm that container was restarted - await interaction.followUp(`Container "${container}" was successfully stopped.`); - } catch (error) { - // Handle error - console.error(error); - await interaction.followUp(`An error occurred while trying to stop the container "${container}".`); - } - } -}; diff --git a/apps/orchistrator/docker-compose.yml b/apps/orchistrator/docker-compose.yml deleted file mode 100644 index af6c313d..00000000 --- a/apps/orchistrator/docker-compose.yml +++ /dev/null @@ -1,12 +0,0 @@ -version: '3' - -services: - server-bot: - container_name: Leonidas - build: - context: . - dockerfile: Dockerfile - volumes: - - /var/run/docker.sock:/var/run/docker.sock #required - env_file: - - ./.env # environment: diff --git a/apps/orchistrator/index.js b/apps/orchistrator/index.js deleted file mode 100644 index 9b197932..00000000 --- a/apps/orchistrator/index.js +++ /dev/null @@ -1,89 +0,0 @@ -require('dotenv').config(); -const fs = require('node:fs'); -const path = require('node:path'); -const token = process.env.DISCORD_TOKEN; -const clientID = process.env.DISCORD_CLIENT_ID; - -// Require the necessary discord.js classes -const { Client, Collection, Events, GatewayIntentBits } = require('discord.js'); - -// Create a new client instance -const client = new Client({ intents: [GatewayIntentBits.Guilds] }); - -//run backend/deployCommands.js -const { exec } = require('child_process'); -exec('node backend/deployCommands.js', (err, stdout, stderr) => { - if (err) { - //some err occurred - console.error(err); - } else { - // print complete output - console.log(stdout); - } -}); - - - -// When the client is ready, run this code -client.once(Events.ClientReady, c => { - console.log(`Ready! Logged in as ${c.user.tag}`); -}); - -// Log in to Discord with your client's token -client.login(token); - -// Create a new collection for commands -client.commands = new Collection(); - -const commandsPath = path.join(__dirname, 'commands'); -const commandFiles = fs.readdirSync(commandsPath).filter(file => file.endsWith('.js')); - -for (const file of commandFiles) { - const filePath = path.join(commandsPath, file); - const command = require(filePath); - // Set a new item in the Collection with the key as the name of the command and the value as the exported module - if ('data' in command && 'execute' in command) { - client.commands.set(command.data.name, command); - } else { - console.log(`Command ${file} is missing 'data' or 'execute'`); - } -} -//build and display invite link -const inviteLink = 'https://discord.com/oauth2/authorize?client_id='+clientID+'&permissions=2147534912&scope=bot%20applications.commands'; - -console.log(`Invite link: ${inviteLink}`); - -// execute on slash command -client.on(Events.InteractionCreate, async interaction => { - if (interaction.isChatInputCommand()) { - const command = client.commands.get(interaction.commandName); - - if (!command) { - console.error('No command matching ${interaction.commandName} was found.'); - return; - } - - try { - await command.execute(interaction); - } catch (error) { - console.error(error); - // await interaction.reply({ content: 'There was an error while executing this command!', ephemeral: true }); - } - } else if (interaction.isAutocomplete()) { - - const command = client.commands.get(interaction.commandName); - - if (!command) { - console.error('No command matching ${interaction.commandName} was found.'); - return; - } - - try { - await command.autocomplete(interaction); - } catch (error) { - console.error(error); - // await interaction.({ content: 'There was an error while executing this command!', ephemeral: true }); - } - } -}); - diff --git a/apps/orchistrator/package-lock.json b/apps/orchistrator/package-lock.json deleted file mode 100644 index f351e471..00000000 --- a/apps/orchistrator/package-lock.json +++ /dev/null @@ -1,723 +0,0 @@ -{ - "name": "server-bot", - "version": "1.0.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "server-bot", - "version": "1.0.0", - "license": "MIT", - "dependencies": { - "discord.js": "^14.7.1", - "dockerode": "^3.3.4", - "dotenv": "^16.0.3", - "node-docker-api": "^1.1.22" - } - }, - "node_modules/@balena/dockerignore": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@balena/dockerignore/-/dockerignore-1.0.2.tgz", - "integrity": "sha512-wMue2Sy4GAVTk6Ic4tJVcnfdau+gx2EnG7S+uAEe+TWJFqE4YoWN4/H8MSLj4eYJKxGg26lZwboEniNiNwZQ6Q==" - }, - "node_modules/@discordjs/builders": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/@discordjs/builders/-/builders-1.4.0.tgz", - "integrity": "sha512-nEeTCheTTDw5kO93faM1j8ZJPonAX86qpq/QVoznnSa8WWcCgJpjlu6GylfINTDW6o7zZY0my2SYdxx2mfNwGA==", - "dependencies": { - "@discordjs/util": "^0.1.0", - "@sapphire/shapeshift": "^3.7.1", - "discord-api-types": "^0.37.20", - "fast-deep-equal": "^3.1.3", - "ts-mixer": "^6.0.2", - "tslib": "^2.4.1" - }, - "engines": { - "node": ">=16.9.0" - } - }, - "node_modules/@discordjs/collection": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@discordjs/collection/-/collection-1.3.0.tgz", - "integrity": "sha512-ylt2NyZ77bJbRij4h9u/wVy7qYw/aDqQLWnadjvDqW/WoWCxrsX6M3CIw9GVP5xcGCDxsrKj5e0r5evuFYwrKg==", - "engines": { - "node": ">=16.9.0" - } - }, - "node_modules/@discordjs/rest": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@discordjs/rest/-/rest-1.5.0.tgz", - "integrity": "sha512-lXgNFqHnbmzp5u81W0+frdXN6Etf4EUi8FAPcWpSykKd8hmlWh1xy6BmE0bsJypU1pxohaA8lQCgp70NUI3uzA==", - "dependencies": { - "@discordjs/collection": "^1.3.0", - "@discordjs/util": "^0.1.0", - "@sapphire/async-queue": "^1.5.0", - "@sapphire/snowflake": "^3.2.2", - "discord-api-types": "^0.37.23", - "file-type": "^18.0.0", - "tslib": "^2.4.1", - "undici": "^5.13.0" - }, - "engines": { - "node": ">=16.9.0" - } - }, - "node_modules/@discordjs/util": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/@discordjs/util/-/util-0.1.0.tgz", - "integrity": "sha512-e7d+PaTLVQav6rOc2tojh2y6FE8S7REkqLldq1XF4soCx74XB/DIjbVbVLtBemf0nLW77ntz0v+o5DytKwFNLQ==", - "engines": { - "node": ">=16.9.0" - } - }, - "node_modules/@sapphire/async-queue": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@sapphire/async-queue/-/async-queue-1.5.0.tgz", - "integrity": "sha512-JkLdIsP8fPAdh9ZZjrbHWR/+mZj0wvKS5ICibcLrRI1j84UmLMshx5n9QmL8b95d4onJ2xxiyugTgSAX7AalmA==", - "engines": { - "node": ">=v14.0.0", - "npm": ">=7.0.0" - } - }, - "node_modules/@sapphire/shapeshift": { - "version": "3.8.1", - "resolved": "https://registry.npmjs.org/@sapphire/shapeshift/-/shapeshift-3.8.1.tgz", - "integrity": "sha512-xG1oXXBhCjPKbxrRTlox9ddaZTvVpOhYLmKmApD/vIWOV1xEYXnpoFs68zHIZBGbqztq6FrUPNPerIrO1Hqeaw==", - "dependencies": { - "fast-deep-equal": "^3.1.3", - "lodash": "^4.17.21" - }, - "engines": { - "node": ">=v14.0.0", - "npm": ">=7.0.0" - } - }, - "node_modules/@sapphire/snowflake": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@sapphire/snowflake/-/snowflake-3.4.0.tgz", - "integrity": "sha512-zZxymtVO6zeXVMPds+6d7gv/OfnCc25M1Z+7ZLB0oPmeMTPeRWVPQSS16oDJy5ZsyCOLj7M6mbZml5gWXcVRNw==", - "engines": { - "node": ">=v14.0.0", - "npm": ">=7.0.0" - } - }, - "node_modules/@tokenizer/token": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/@tokenizer/token/-/token-0.3.0.tgz", - "integrity": "sha512-OvjF+z51L3ov0OyAU0duzsYuvO01PH7x4t6DJx+guahgTnBHkhJdG7soQeTSFLWN3efnHyibZ4Z8l2EuWwJN3A==" - }, - "node_modules/@types/node": { - "version": "18.11.18", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.11.18.tgz", - "integrity": "sha512-DHQpWGjyQKSHj3ebjFI/wRKcqQcdR+MoFBygntYOZytCqNfkd2ZC4ARDJ2DQqhjH5p85Nnd3jhUJIXrszFX/JA==" - }, - "node_modules/@types/ws": { - "version": "8.5.3", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.3.tgz", - "integrity": "sha512-6YOoWjruKj1uLf3INHH7D3qTXwFfEsg1kf3c0uDdSBJwfa/llkwIjrAGV7j7mVgGNbzTQ3HiHKKDXl6bJPD97w==", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/asn1": { - "version": "0.2.6", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz", - "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==", - "dependencies": { - "safer-buffer": "~2.1.0" - } - }, - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/bcrypt-pbkdf": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", - "integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==", - "dependencies": { - "tweetnacl": "^0.14.3" - } - }, - "node_modules/bl": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", - "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", - "dependencies": { - "buffer": "^5.5.0", - "inherits": "^2.0.4", - "readable-stream": "^3.4.0" - } - }, - "node_modules/buffer": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", - "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.1.13" - } - }, - "node_modules/buildcheck": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/buildcheck/-/buildcheck-0.0.3.tgz", - "integrity": "sha512-pziaA+p/wdVImfcbsZLNF32EiWyujlQLwolMqUQE8xpKNOH7KmZQaY8sXN7DGOEzPAElo9QTaeNRfGnf3iOJbA==", - "optional": true, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/busboy": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz", - "integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==", - "dependencies": { - "streamsearch": "^1.1.0" - }, - "engines": { - "node": ">=10.16.0" - } - }, - "node_modules/chownr": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", - "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==" - }, - "node_modules/core-util-is": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", - "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" - }, - "node_modules/cpu-features": { - "version": "0.0.4", - "resolved": "https://registry.npmjs.org/cpu-features/-/cpu-features-0.0.4.tgz", - "integrity": "sha512-fKiZ/zp1mUwQbnzb9IghXtHtDoTMtNeb8oYGx6kX2SYfhnG0HNdBEBIzB9b5KlXu5DQPhfy3mInbBxFcgwAr3A==", - "hasInstallScript": true, - "optional": true, - "dependencies": { - "buildcheck": "0.0.3", - "nan": "^2.15.0" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/discord-api-types": { - "version": "0.37.24", - "resolved": "https://registry.npmjs.org/discord-api-types/-/discord-api-types-0.37.24.tgz", - "integrity": "sha512-1+Fb4huJCihdbkJLcq2p7nBmtlmAryNwjefT8wwJnL8c7bc7WA87Oaa5mbLe96QvZyfwnwRCDX40H0HhcVV50g==" - }, - "node_modules/discord.js": { - "version": "14.7.1", - "resolved": "https://registry.npmjs.org/discord.js/-/discord.js-14.7.1.tgz", - "integrity": "sha512-1FECvqJJjjeYcjSm0IGMnPxLqja/pmG1B0W2l3lUY2Gi4KXiyTeQmU1IxWcbXHn2k+ytP587mMWqva2IA87EbA==", - "dependencies": { - "@discordjs/builders": "^1.4.0", - "@discordjs/collection": "^1.3.0", - "@discordjs/rest": "^1.4.0", - "@discordjs/util": "^0.1.0", - "@sapphire/snowflake": "^3.2.2", - "@types/ws": "^8.5.3", - "discord-api-types": "^0.37.20", - "fast-deep-equal": "^3.1.3", - "lodash.snakecase": "^4.1.1", - "tslib": "^2.4.1", - "undici": "^5.13.0", - "ws": "^8.11.0" - }, - "engines": { - "node": ">=16.9.0" - } - }, - "node_modules/docker-modem": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/docker-modem/-/docker-modem-3.0.6.tgz", - "integrity": "sha512-h0Ow21gclbYsZ3mkHDfsYNDqtRhXS8fXr51bU0qr1dxgTMJj0XufbzX+jhNOvA8KuEEzn6JbvLVhXyv+fny9Uw==", - "dependencies": { - "debug": "^4.1.1", - "readable-stream": "^3.5.0", - "split-ca": "^1.0.1", - "ssh2": "^1.11.0" - }, - "engines": { - "node": ">= 8.0" - } - }, - "node_modules/dockerode": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/dockerode/-/dockerode-3.3.4.tgz", - "integrity": "sha512-3EUwuXnCU+RUlQEheDjmBE0B7q66PV9Rw5NiH1sXwINq0M9c5ERP9fxgkw36ZHOtzf4AGEEYySnkx/sACC9EgQ==", - "dependencies": { - "@balena/dockerignore": "^1.0.2", - "docker-modem": "^3.0.0", - "tar-fs": "~2.0.1" - }, - "engines": { - "node": ">= 8.0" - } - }, - "node_modules/dotenv": { - "version": "16.0.3", - "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.0.3.tgz", - "integrity": "sha512-7GO6HghkA5fYG9TYnNxi14/7K9f5occMlp3zXAuSxn7CKCxt9xbNWG7yF8hTCSUchlfWSe3uLmlPfigevRItzQ==", - "engines": { - "node": ">=12" - } - }, - "node_modules/end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "dependencies": { - "once": "^1.4.0" - } - }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" - }, - "node_modules/file-type": { - "version": "18.0.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-18.0.0.tgz", - "integrity": "sha512-jjMwFpnW8PKofLE/4ohlhqwDk5k0NC6iy0UHAJFKoY1fQeGMN0GDdLgHQrvCbSpMwbqzoCZhRI5dETCZna5qVA==", - "dependencies": { - "readable-web-to-node-stream": "^3.0.2", - "strtok3": "^7.0.0", - "token-types": "^5.0.1" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sindresorhus/file-type?sponsor=1" - } - }, - "node_modules/fs-constants": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", - "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==" - }, - "node_modules/ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" - }, - "node_modules/isarray": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", - "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==" - }, - "node_modules/jsonparse": { - "version": "0.0.5", - "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-0.0.5.tgz", - "integrity": "sha512-fw7Q/8gFR8iSekUi9I+HqWIap6mywuoe7hQIg3buTVjuZgALKj4HAmm0X6f+TaL4c9NJbvyFQdaI2ppr5p6dnQ==", - "engines": [ - "node >= 0.2.0" - ] - }, - "node_modules/JSONStream": { - "version": "0.10.0", - "resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-0.10.0.tgz", - "integrity": "sha512-8XbSFFd43EG+1thjLNFIzCBlwXti0yKa7L+ak/f0T/pkC+31b7G41DXL/JzYpAoYWZ2eCPiu4IIqzijM8N0a/w==", - "dependencies": { - "jsonparse": "0.0.5", - "through": ">=2.2.7 <3" - }, - "bin": { - "JSONStream": "index.js" - }, - "engines": { - "node": "*" - } - }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" - }, - "node_modules/lodash.snakecase": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/lodash.snakecase/-/lodash.snakecase-4.1.1.tgz", - "integrity": "sha512-QZ1d4xoBHYUeuouhEq3lk3Uq7ldgyFXGBhg04+oRLnIz8o9T65Eh+8YdroUwn846zchkA9yDsDl5CVVaV2nqYw==" - }, - "node_modules/memorystream": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/memorystream/-/memorystream-0.3.1.tgz", - "integrity": "sha512-S3UwM3yj5mtUSEfP41UZmt/0SCoVYUcU1rkXv+BQ5Ig8ndL4sPoJNBUJERafdPb5jjHJGuMgytgKvKIf58XNBw==", - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/mkdirp-classic": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", - "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==" - }, - "node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/nan": { - "version": "2.17.0", - "resolved": "https://registry.npmjs.org/nan/-/nan-2.17.0.tgz", - "integrity": "sha512-2ZTgtl0nJsO0KQCjEpxcIr5D+Yv90plTitZt9JBfQvVJDS5seMl3FOvsh3+9CoYWXf/1l5OaZzzF6nDm4cagaQ==", - "optional": true - }, - "node_modules/node-docker-api": { - "version": "1.1.22", - "resolved": "https://registry.npmjs.org/node-docker-api/-/node-docker-api-1.1.22.tgz", - "integrity": "sha512-8xfOiuLDJQw+l58i66lUNQhRhS5fAExqQbLolmyqMucrsDON7k7eLMIHphcBwwB7utwCHCQkcp73gSAmzSiAiw==", - "dependencies": { - "docker-modem": "^0.3.1", - "memorystream": "^0.3.1" - } - }, - "node_modules/node-docker-api/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/node-docker-api/node_modules/docker-modem": { - "version": "0.3.7", - "resolved": "https://registry.npmjs.org/docker-modem/-/docker-modem-0.3.7.tgz", - "integrity": "sha512-4Xn4ZVtc/2DEFtxY04lOVeF7yvxwXGVo0sN8FKRBnLhBcwQ78Hb56j+Z5yAXXUhoweVhzGeBeGWahS+af0/mcg==", - "dependencies": { - "debug": "^2.6.0", - "JSONStream": "0.10.0", - "readable-stream": "~1.0.26-4", - "split-ca": "^1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/node-docker-api/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - }, - "node_modules/node-docker-api/node_modules/readable-stream": { - "version": "1.0.34", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.0.34.tgz", - "integrity": "sha512-ok1qVCJuRkNmvebYikljxJA/UEsKwLl2nI1OmaqAu4/UE+h0wKCHok4XkL/gvi39OacXvw59RJUOFUkDib2rHg==", - "dependencies": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.1", - "isarray": "0.0.1", - "string_decoder": "~0.10.x" - } - }, - "node_modules/node-docker-api/node_modules/string_decoder": { - "version": "0.10.31", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", - "integrity": "sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ==" - }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/peek-readable": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/peek-readable/-/peek-readable-5.0.0.tgz", - "integrity": "sha512-YtCKvLUOvwtMGmrniQPdO7MwPjgkFBtFIrmfSbYmYuq3tKDV/mcfAhBth1+C3ru7uXIZasc/pHnb+YDYNkkj4A==", - "engines": { - "node": ">=14.16" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/Borewit" - } - }, - "node_modules/pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "dependencies": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "node_modules/readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/readable-web-to-node-stream": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/readable-web-to-node-stream/-/readable-web-to-node-stream-3.0.2.tgz", - "integrity": "sha512-ePeK6cc1EcKLEhJFt/AebMCLL+GgSKhuygrZ/GLaKZYEecIgIECf4UaUuaByiGtzckwR4ain9VzUh95T1exYGw==", - "dependencies": { - "readable-stream": "^3.6.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/Borewit" - } - }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" - }, - "node_modules/split-ca": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/split-ca/-/split-ca-1.0.1.tgz", - "integrity": "sha512-Q5thBSxp5t8WPTTJQS59LrGqOZqOsrhDGDVm8azCqIBjSBd7nd9o2PM+mDulQQkh8h//4U6hFZnc/mul8t5pWQ==" - }, - "node_modules/ssh2": { - "version": "1.11.0", - "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.11.0.tgz", - "integrity": "sha512-nfg0wZWGSsfUe/IBJkXVll3PEZ//YH2guww+mP88gTpuSU4FtZN7zu9JoeTGOyCNx2dTDtT9fOpWwlzyj4uOOw==", - "hasInstallScript": true, - "dependencies": { - "asn1": "^0.2.4", - "bcrypt-pbkdf": "^1.0.2" - }, - "engines": { - "node": ">=10.16.0" - }, - "optionalDependencies": { - "cpu-features": "~0.0.4", - "nan": "^2.16.0" - } - }, - "node_modules/streamsearch": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz", - "integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==", - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "dependencies": { - "safe-buffer": "~5.2.0" - } - }, - "node_modules/strtok3": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/strtok3/-/strtok3-7.0.0.tgz", - "integrity": "sha512-pQ+V+nYQdC5H3Q7qBZAz/MO6lwGhoC2gOAjuouGf/VO0m7vQRh8QNMl2Uf6SwAtzZ9bOw3UIeBukEGNJl5dtXQ==", - "dependencies": { - "@tokenizer/token": "^0.3.0", - "peek-readable": "^5.0.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/Borewit" - } - }, - "node_modules/tar-fs": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.0.1.tgz", - "integrity": "sha512-6tzWDMeroL87uF/+lin46k+Q+46rAJ0SyPGz7OW7wTgblI273hsBqk2C1j0/xNadNLKDTUL9BukSjB7cwgmlPA==", - "dependencies": { - "chownr": "^1.1.1", - "mkdirp-classic": "^0.5.2", - "pump": "^3.0.0", - "tar-stream": "^2.0.0" - } - }, - "node_modules/tar-stream": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", - "integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==", - "dependencies": { - "bl": "^4.0.3", - "end-of-stream": "^1.4.1", - "fs-constants": "^1.0.0", - "inherits": "^2.0.3", - "readable-stream": "^3.1.1" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/through": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==" - }, - "node_modules/token-types": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/token-types/-/token-types-5.0.1.tgz", - "integrity": "sha512-Y2fmSnZjQdDb9W4w4r1tswlMHylzWIeOKpx0aZH9BgGtACHhrk3OkT52AzwcuqTRBZtvvnTjDBh8eynMulu8Vg==", - "dependencies": { - "@tokenizer/token": "^0.3.0", - "ieee754": "^1.2.1" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/Borewit" - } - }, - "node_modules/ts-mixer": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/ts-mixer/-/ts-mixer-6.0.2.tgz", - "integrity": "sha512-zvHx3VM83m2WYCE8XL99uaM7mFwYSkjR2OZti98fabHrwkjsCvgwChda5xctein3xGOyaQhtTeDq/1H/GNvF3A==" - }, - "node_modules/tslib": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.4.1.tgz", - "integrity": "sha512-tGyy4dAjRIEwI7BzsB0lynWgOpfqjUdq91XXAlIWD2OwKBH7oCl/GZG/HT4BOHrTlPMOASlMQ7veyTqpmRcrNA==" - }, - "node_modules/tweetnacl": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", - "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==" - }, - "node_modules/undici": { - "version": "5.14.0", - "resolved": "https://registry.npmjs.org/undici/-/undici-5.14.0.tgz", - "integrity": "sha512-yJlHYw6yXPPsuOH0x2Ib1Km61vu4hLiRRQoafs+WUgX1vO64vgnxiCEN9dpIrhZyHFsai3F0AEj4P9zy19enEQ==", - "dependencies": { - "busboy": "^1.6.0" - }, - "engines": { - "node": ">=12.18" - } - }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" - }, - "node_modules/ws": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.11.0.tgz", - "integrity": "sha512-HPG3wQd9sNQoT9xHyNCXoDUa+Xw/VevmY9FoHyQ+g+rrMn4j6FB4np7Z0OhdTgjx6MgQLK7jwSy1YecU1+4Asg==", - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "bufferutil": "^4.0.1", - "utf-8-validate": "^5.0.2" - }, - "peerDependenciesMeta": { - "bufferutil": { - "optional": true - }, - "utf-8-validate": { - "optional": true - } - } - } - } -} diff --git a/apps/orchistrator/package.json b/apps/orchistrator/package.json deleted file mode 100644 index 97dfb055..00000000 --- a/apps/orchistrator/package.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "name": "server-bot", - "version": "1.0.0", - "description": "Discord bot to remotely monitor and control a docker based server", - "main": "index.js", - "scripts": { - "start": "nodemon index.js" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/allenrkeen/server-bot.git" - }, - "keywords": [ - "discord", - "docker", - "linux", - "selfhost" - ], - "author": "allenrkeen", - "license": "MIT", - "bugs": { - "url": "https://github.com/allenrkeen/server-bot/issues" - }, - "homepage": "https://github.com/allenrkeen/server-bot#readme", - "dependencies": { - "discord.js": "^14.7.1", - "dotenv": "^16.0.3", - "node-docker-api": "^1.1.22" - } -}