diff --git a/.env.example b/.env.example
index c0023751..345b10a1 100644
--- a/.env.example
+++ b/.env.example
@@ -35,6 +35,7 @@ REDIS_PORT=
#dbs
PINECONE_API_KEY=""
BING_COOKIE=""
+BING_AUTH=""
# RevGpt Configuration
ACCESS_TOKEN="your_access_token_here"
@@ -46,7 +47,10 @@ REVGPT_UNVERIFIED_PLUGIN_DOMAINS="showme.redstarplugin.com"
CHATGPT_BASE_URL=""
#Discord Bot
-################################
SAVE_DIRECTORY=""
STORAGE_SERVICE=""
DISCORD_TOKEN=""
+
+#Bing
+AUTH_COOKIE="_U value at bing.com"
+AUTH_COOKIE_SRCHHPGUSR"_SRCHHPGUSR value at bing.com"
diff --git a/.gitignore b/.gitignore
index 34009c13..b393a706 100644
--- a/.gitignore
+++ b/.gitignore
@@ -47,6 +47,9 @@ share/python-wheels/
.installed.cfg
*.egg
MANIFEST
+output/*
+cookes.json
+flagged/*
# PyInstaller
# Usually these files are written by a python script from a template
@@ -72,6 +75,7 @@ coverage.xml
.hypothesis/
.pytest_cache/
cover/
+cookies.json
# Translations
*.mo
@@ -179,4 +183,4 @@ cython_debug/
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
-#.idea/
\ No newline at end of file
+#.idea/
diff --git a/apps/BingBot/.env.example b/apps/BingBot/.env.example
new file mode 100644
index 00000000..341406f7
--- /dev/null
+++ b/apps/BingBot/.env.example
@@ -0,0 +1,4 @@
+DISCORD_BOT_TOKEN=
+MENTION_CHANNEL_ID=
+AUTH_COOKIE=
+AUTH_COOKIE_SRCHHPGUSR=
diff --git a/apps/BingBot/Dockerfile b/apps/BingBot/Dockerfile
new file mode 100644
index 00000000..e276b5c8
--- /dev/null
+++ b/apps/BingBot/Dockerfile
@@ -0,0 +1,6 @@
+FROM python:3.9.16
+WORKDIR /bot
+COPY requirements.txt /bot/
+RUN pip install -r requirements.txt
+COPY . /bot
+CMD python bot.py
diff --git a/apps/BingBot/bot.py b/apps/BingBot/bot.py
new file mode 100644
index 00000000..233ca9a7
--- /dev/null
+++ b/apps/BingBot/bot.py
@@ -0,0 +1,120 @@
+import discord
+import os
+import src.log
+import sys
+import pkg_resources
+import json
+from discord.ext import commands
+from dotenv import load_dotenv
+
+load_dotenv()
+
+bot = commands.Bot(command_prefix="!", intents=discord.Intents.all())
+
+# init loggger
+logger = src.log.setup_logger(__name__)
+
+
+def restart_bot():
+ # Replace current process with new instance of bot.py
+ os.execl(sys.executable, sys.executable, "bot.py")
+
+
+def check_verion() -> None:
+ # Read the requirements.txt file and add each line to a list
+ with open("requirements.txt") as f:
+ required = f.read().splitlines()
+
+ # For each library listed in requirements.txt, check if the corresponding version is installed
+ for package in required:
+ # Use the pkg_resources library to get information about the installed version of the library
+ package_name, package_verion = package.split("==")
+ installed = pkg_resources.get_distribution(package_name)
+ # Extract the library name and version number
+ name, version = installed.project_name, installed.version
+ # Compare the version number to see if it matches the one in requirements.txt
+ if package != f"{name}=={version}":
+ logger.error(
+ f"{name} version {version} is installed but does not match the requirements"
+ )
+ sys.exit()
+
+
+@bot.event
+async def on_ready():
+ bot_status = discord.Status.online
+ bot_activity = discord.Activity(type=discord.ActivityType.playing, name="bing.com")
+ await bot.change_presence(status=bot_status, activity=bot_activity)
+ for Filename in os.listdir("./cogs"):
+ if Filename.endswith(".py"):
+ await bot.load_extension(f"cogs.{Filename[:-3]}")
+ logger.info(f"{bot.user} is now running!")
+ print("Bot is Up and Ready!")
+ try:
+ synced = await bot.tree.sync()
+ print(f"Synced {len(synced)} commands")
+ except Exception as e:
+ print(e)
+
+
+# Load command
+@commands.is_owner()
+@bot.command()
+async def load(ctx, extension):
+ await bot.load_extension(f"cogs.{extension}")
+ await ctx.author.send(f"> **Loaded {extension} done.**")
+
+
+# Unload command
+@commands.is_owner()
+@bot.command()
+async def unload(ctx, extension):
+ await bot.unload_extension(f"cogs.{extension}")
+ await ctx.author.send(f"> **Un-Loaded {extension} done.**")
+
+
+# Empty discord_bot.log file
+@commands.is_owner()
+@bot.command()
+async def clean(ctx):
+ open("discord_bot.log", "w").close()
+ await ctx.author.send(f"> **Successfully emptied the file!**")
+
+
+# Get discord_bot.log file
+@commands.is_owner()
+@bot.command()
+async def getLog(ctx):
+ try:
+ with open("discord_bot.log", "rb") as f:
+ file = discord.File(f)
+ await ctx.author.send(file=file)
+ await ctx.author.send("> **Send successfully!**")
+ except:
+ await ctx.author.send("> **Send failed!**")
+
+
+# Upload new Bing cookies and restart the bot
+@commands.is_owner()
+@bot.command()
+async def upload(ctx):
+ if ctx.message.attachments:
+ for attachment in ctx.message.attachments:
+ if str(attachment)[-4:] == ".txt":
+ content = await attachment.read()
+ with open("cookies.json", "w", encoding="utf-8") as f:
+ json.dump(json.loads(content), f, indent=2)
+ if not isinstance(ctx.channel, discord.abc.PrivateChannel):
+ await ctx.message.delete()
+ await ctx.author.send(f"> **Upload new cookies successfully!**")
+ logger.warning("\x1b[31mCookies has been setup successfully\x1b[0m")
+ restart_bot()
+ else:
+ await ctx.author.send("> **Didn't get any txt file.**")
+ else:
+ await ctx.author.send("> **Didn't get any file.**")
+
+
+if __name__ == "__main__":
+ check_verion()
+ bot.run(os.getenv("DISCORD_BOT_TOKEN"))
diff --git a/apps/BingBot/cogs/edgegpt.py b/apps/BingBot/cogs/edgegpt.py
new file mode 100644
index 00000000..c95662b0
--- /dev/null
+++ b/apps/BingBot/cogs/edgegpt.py
@@ -0,0 +1,148 @@
+import os
+import discord
+import json
+from typing import Optional
+from EdgeGPT.ImageGen import ImageGenAsync, ImageGen
+from EdgeGPT.EdgeGPT import Chatbot
+from discord import app_commands
+from core.classes import Cog_Extension
+from src import log
+from src.imageCreate import create_image, get_using_create, set_using_create
+from src.response import send_message, get_using_send, set_using_send
+from dotenv import load_dotenv
+
+load_dotenv()
+
+logger = log.setup_logger(__name__)
+
+users_chatbot = {}
+users_image_generator = {}
+user_conversation_style = {}
+
+async def init_chatbot(user_id):
+ with open("./cookies.json", encoding="utf-8") as file:
+ cookie_json = json.load(file)
+ for cookie in cookie_json:
+ if cookie.get("name") == "_U":
+ auth_cookie = cookie.get("value")
+ break
+
+ auth_cookie = os.environ.get("AUTH_COOKIE")
+ auth_cookie_SRCHHPGUSR = os.environ.get("AUTH_COOKIE_SRCHHPGUSR")
+ # auth_cookie_SRCHHPGUSR = os.environ.get("AUTH_COOKIE_SRCHHPGUSR")
+ users_chatbot[user_id] = UserChatbot(cookies=cookie_json)
+ users_image_generator[user_id] = ImageGenAsync(auth_cookie, quiet=True)
+ user_conversation_style[user_id] = "balanced"
+
+class UserChatbot:
+ def __init__(self, cookies):
+ self.chatbot = Chatbot(cookies=cookies)
+
+ async def send_message(self, interaction, message, conversation_style):
+ await send_message(self.chatbot, interaction, message, conversation_style)
+
+ async def create_image(self, interaction, prompt: str, image_generator):
+ await create_image(interaction, prompt, image_generator)
+
+ async def reset(self):
+ await self.chatbot.reset()
+
+class EdgeGPT(Cog_Extension):
+ # Chat with Bing
+ @app_commands.command(name="bing", description="Have a chat with Bing")
+ async def bing(self, interaction: discord.Interaction, *, message: str):
+ try:
+ using = await get_using_send(interaction.user.id)
+ except:
+ await set_using_send(interaction.user.id, False)
+ using = await get_using_send(interaction.user.id)
+ if not using:
+ await interaction.response.defer(ephemeral=False, thinking=True)
+ username = str(interaction.user)
+ usermessage = message
+ channel = str(interaction.channel)
+ user_id = interaction.user.id
+ if user_id not in users_chatbot:
+ await init_chatbot(interaction.user.id)
+ conversation_style = user_conversation_style[user_id]
+ logger.info(f"\x1b[31m{username}\x1b[0m : '{usermessage}' ({channel}) [Style: {conversation_style}]")
+ await users_chatbot[user_id].send_message(interaction, usermessage, conversation_style)
+ else:
+ await interaction.response.defer(ephemeral=True, thinking=True)
+ await interaction.followup.send("> **Please wait for your last conversation to finish.**")
+
+ # Reset Bing conversation
+ @app_commands.command(name="reset", description="Reset Bing conversation")
+ async def reset(self, interaction: discord.Interaction):
+ await interaction.response.defer(ephemeral=True, thinking=True)
+ user_id = interaction.user.id
+ try:
+ await users_chatbot[user_id].reset()
+ await interaction.followup.send("> **Info: Reset finish.**")
+ logger.warning("\x1b[31mBing has been successfully reset\x1b[0m")
+ except:
+ await interaction.followup.send(f"> **You don't have any conversation yet.**")
+ logger.exception("Bing reset failed.")
+
+ # Switch conversation style
+ @app_commands.command(name="switch_style", description="Switch conversation style")
+ @app_commands.choices(style=[app_commands.Choice(name="Creative", value="creative"), app_commands.Choice(name="Balanced", value="balanced"), app_commands.Choice(name="Precise", value="precise")])
+ async def switch_style(self, interaction: discord.Interaction, style: app_commands.Choice[str]):
+ await interaction.response.defer(ephemeral=True, thinking=True)
+ user_id = interaction.user.id
+ if user_id not in users_chatbot:
+ await init_chatbot(user_id)
+ user_conversation_style[user_id] = style.value
+ await interaction.followup.send(f"> **Info: successfull switch conversation style to {style.value}.**")
+ logger.warning(f"\x1b[31mConversation style has been successfully switch to {style.value}\x1b[0m")
+
+ # Set and delete personal Bing Cookies
+ @app_commands.command(name="bing_cookies", description="Set or delete Bing Cookies")
+ @app_commands.choices(choice=[app_commands.Choice(name="set", value="set"), app_commands.Choice(name="delete", value="delete")])
+ async def cookies_setting(self, interaction: discord.Interaction, choice: app_commands.Choice[str], cookies_file: Optional[discord.Attachment]=None):
+ await interaction.response.defer(ephemeral=True, thinking=True)
+ user_id = interaction.user.id
+ if choice.value == "set":
+ try:
+ content = json.loads(await cookies_file.read())
+ for cookie in content:
+ if cookie.get("name") == "_U":
+ auth_cookie = cookie.get("value")
+ break
+ users_image_generator[user_id] = ImageGenAsync(auth_cookie, quiet=True)
+ users_chatbot[user_id] = UserChatbot(cookies=content)
+ user_conversation_style[user_id] = "balanced"
+ await interaction.followup.send("> **Upload successful!**")
+ logger.warning(f"\x1b[31m{interaction.user} set Bing Cookies successful\x1b[0m")
+ except:
+ await interaction.followup.send("> **Please upload your Bing Cookies.**")
+ else:
+ try:
+ del users_chatbot[user_id]
+ del users_image_generator[user_id]
+ del user_conversation_style[user_id]
+ await interaction.followup.send("> **Delete finish.**")
+ logger.warning(f"\x1b[31m{interaction.user} delete Cookies\x1b[0m")
+ except:
+ await interaction.followup.send("> **You don't have any Bing Cookies.**")
+
+ # Create images
+ @app_commands.command(name="create_image", description="generate image by Bing image creator")
+ async def create_image(self, interaction: discord.Interaction, *, prompt: str):
+ user_id = interaction.user.id
+ if interaction.user.id not in users_chatbot:
+ await init_chatbot(user_id)
+ try:
+ using = await get_using_create(user_id)
+ except:
+ await set_using_create(user_id, False)
+ using = await get_using_create(user_id)
+ if not using:
+ logger.info(f"\x1b[31m{interaction.user}\x1b[0m : '{prompt}' ({interaction.channel}) [BingImageCreator]")
+ await users_chatbot[user_id].create_image(interaction, prompt, users_image_generator[user_id] )
+ else:
+ await interaction.response.defer(ephemeral=True, thinking=True)
+ await interaction.followup.send("> **Please wait for your last image to create finish.**")
+
+async def setup(bot):
+ await bot.add_cog(EdgeGPT(bot))
diff --git a/apps/BingBot/cogs/event.py b/apps/BingBot/cogs/event.py
new file mode 100644
index 00000000..9b81cf30
--- /dev/null
+++ b/apps/BingBot/cogs/event.py
@@ -0,0 +1,321 @@
+import discord
+import re
+import os
+import json
+import asyncio
+from EdgeGPT.EdgeGPT import Chatbot, ConversationStyle
+from dotenv import load_dotenv
+from discord.ext import commands
+from core.classes import Cog_Extension
+from functools import partial
+from src import log
+
+load_dotenv()
+
+USE_SUGGEST_RESPONSES = True
+try:
+ MENTION_CHANNEL_ID = int(os.getenv("MENTION_CHANNEL_ID"))
+except:
+ MENTION_CHANNEL_ID = None
+logger = log.setup_logger(__name__)
+sem = asyncio.Semaphore(1)
+conversation_style = "balanced"
+
+with open("./cookies.json", encoding="utf-8") as file:
+ cookies = json.load(file)
+chatbot = Chatbot(cookies=cookies)
+
+
+# To add suggest responses
+class MyView(discord.ui.View):
+ def __init__(self, chatbot: Chatbot, suggest_responses: list):
+ super().__init__(timeout=120)
+ # Add buttons
+ for label in suggest_responses:
+ button = discord.ui.Button(label=label)
+
+ # Button event
+ async def callback(
+ interaction: discord.Interaction, button: discord.ui.Button
+ ):
+ await interaction.response.defer(ephemeral=False, thinking=True)
+ # When click the button, all buttons will disable.
+ for child in self.children:
+ child.disabled = True
+ await interaction.followup.edit_message(
+ message_id=interaction.message.id, view=self
+ )
+ username = str(interaction.user)
+ usermessage = button.label
+ channel = str(interaction.channel)
+ logger.info(
+ f"\x1b[31m{username}\x1b[0m : '{usermessage}' ({channel}) [Style: {conversation_style}] [button]"
+ )
+ task = asyncio.create_task(
+ send_message(chatbot, interaction, usermessage)
+ )
+ await asyncio.gather(task)
+
+ self.add_item(button)
+ self.children[-1].callback = partial(callback, button=button)
+
+
+# Show Dropdown
+class DropdownView(discord.ui.View):
+ def __init__(self):
+ super().__init__(timeout=180)
+
+ options = [
+ discord.SelectOption(
+ label="Creative",
+ description="Switch conversation style to Creative",
+ emoji="🎨",
+ ),
+ discord.SelectOption(
+ label="Balanced",
+ description="Switch conversation style to Balanced",
+ emoji="⚖️",
+ ),
+ discord.SelectOption(
+ label="Precise",
+ description="Switch conversation style to Precise",
+ emoji="🔎",
+ ),
+ discord.SelectOption(
+ label="Reset", description="Reset conversation", emoji="🔄"
+ ),
+ ]
+
+ dropdown = discord.ui.Select(
+ placeholder="Choose setting", min_values=1, max_values=1, options=options
+ )
+
+ dropdown.callback = self.dropdown_callback
+ self.add_item(dropdown)
+
+ # Dropdown event
+ async def dropdown_callback(self, interaction: discord.Interaction):
+ await interaction.response.defer(ephemeral=False, thinking=True)
+ if interaction.data["values"][0] == "Creative":
+ await set_conversation_style("creative")
+ await interaction.followup.send(
+ f"> **Info: successfull switch conversation style to *{interaction.data['values'][0]}*.**"
+ )
+ logger.warning(
+ f"\x1b[31mConversation style has been successfully switch to {interaction.data['values'][0]}\x1b[0m"
+ )
+ elif interaction.data["values"][0] == "Balanced":
+ await set_conversation_style("balanced")
+ await interaction.followup.send(
+ f"> **Info: successfull switch conversation style to *{interaction.data['values'][0]}*.**"
+ )
+ logger.warning(
+ f"\x1b[31mConversation style has been successfully switch to {interaction.data['values'][0]}\x1b[0m"
+ )
+ elif interaction.data["values"][0] == "Precise":
+ await set_conversation_style("precise")
+ await interaction.followup.send(
+ f"> **Info: successfull switch conversation style to *{interaction.data['values'][0]}*.**"
+ )
+ logger.warning(
+ f"\x1b[31mConversation style has been successfully switch to {interaction.data['values'][0]}\x1b[0m"
+ )
+ else:
+ await chatbot.reset()
+ await interaction.followup.send(f"> **Info: Reset finish.**")
+ logger.warning("\x1b[31mBing has been successfully reset\x1b[0m")
+ # disable dropdown after select
+ for dropdown in self.children:
+ dropdown.disabled = True
+ await interaction.followup.edit_message(
+ message_id=interaction.message.id, view=self
+ )
+
+
+# Set conversation style
+async def set_conversation_style(style: str):
+ global conversation_style
+ conversation_style = style
+
+
+async def set_chatbot(cookies):
+ global chatbot
+ chatbot = Chatbot(cookies=cookies)
+
+
+async def send_message(chatbot: Chatbot, message, user_message: str):
+ async with sem:
+ if isinstance(message, discord.message.Message):
+ await message.channel.typing()
+ reply = ""
+ text = ""
+ link_embed = ""
+ images_embed = []
+ all_url = []
+ try:
+ # Change conversation style
+ if conversation_style == "creative":
+ reply = await chatbot.ask(
+ prompt=user_message,
+ conversation_style=ConversationStyle.creative,
+ simplify_response=True,
+ )
+ elif conversation_style == "precise":
+ reply = await chatbot.ask(
+ prompt=user_message,
+ conversation_style=ConversationStyle.precise,
+ simplify_response=True,
+ )
+ else:
+ reply = await chatbot.ask(
+ prompt=user_message,
+ conversation_style=ConversationStyle.balanced,
+ simplify_response=True,
+ )
+
+ # Get reply text
+ text = f"{reply['text']}"
+ text = re.sub(r"\[\^(\d+)\^\]", lambda match: "", text)
+
+ # Get the URL, if available
+ try:
+ if len(reply["sources"]) != 0:
+ for i, url in enumerate(reply["sources"], start=1):
+ if len(url["providerDisplayName"]) == 0:
+ all_url.append(f"{i}. {url['seeMoreUrl']}")
+ else:
+ all_url.append(
+ f"{i}. [{url['providerDisplayName']}]({url['seeMoreUrl']})"
+ )
+ link_text = "\n".join(all_url)
+ link_embed = discord.Embed(description=link_text)
+ except:
+ pass
+
+ # Set the final message
+ if isinstance(message, discord.interactions.Interaction):
+ user_message = user_message.replace("\n", "")
+ ask = f"> **{user_message}**\t(***style: {conversation_style}***)\n\n"
+ response = f"{ask}{text}"
+ else:
+ response = f"{text}\t(***style: {conversation_style}***)"
+
+ # Discord limit about 2000 characters for a message
+ while len(response) > 2000:
+ temp = response[:2000]
+ response = response[2000:]
+ if isinstance(message, discord.interactions.Interaction):
+ await message.followup.send(temp)
+ else:
+ await message.channel.send(temp)
+
+ # Get the image, if available
+ try:
+ if len(link_embed) == 0:
+ all_image = re.findall(
+ "https?://[\w\./]+", str(reply["sources_text"])
+ )
+ [
+ images_embed.append(
+ discord.Embed(url="https://www.bing.com/").set_image(
+ url=image_link
+ )
+ )
+ for image_link in all_image
+ ]
+ except:
+ pass
+
+ if USE_SUGGEST_RESPONSES:
+ suggest_responses = reply["suggestions"]
+ if images_embed:
+ if isinstance(message, discord.interactions.Interaction):
+ await message.followup.send(
+ response,
+ view=MyView(chatbot, suggest_responses),
+ embeds=images_embed,
+ wait=True,
+ )
+ else:
+ await message.channel.send(
+ response,
+ view=MyView(chatbot, suggest_responses),
+ embeds=images_embed,
+ )
+ elif link_embed:
+ if isinstance(message, discord.interactions.Interaction):
+ await message.followup.send(
+ response,
+ view=MyView(chatbot, suggest_responses),
+ embed=link_embed,
+ wait=True,
+ )
+ else:
+ await message.channel.send(
+ response,
+ view=MyView(chatbot, suggest_responses),
+ embed=link_embed,
+ )
+ else:
+ if isinstance(message, discord.interactions.Interaction):
+ await message.followup.send(
+ response, view=MyView(chatbot, suggest_responses), wait=True
+ )
+ else:
+ await message.channel.send(
+ response, view=MyView(chatbot, suggest_responses)
+ )
+ else:
+ if images_embed:
+ if isinstance(message, discord.interactions.Interaction):
+ await message.followup.send(
+ response, embeds=images_embed, wait=True
+ )
+ else:
+ await message.channel.send(response, embeds=images_embed)
+ elif link_embed:
+ if isinstance(message, discord.interactions.Interaction):
+ await message.followup.send(
+ response, embed=link_embed, wait=True
+ )
+ else:
+ await message.channel.send(response, embed=link_embed)
+ else:
+ if isinstance(message, discord.interactions.Interaction):
+ await message.followup.send(response, wait=True)
+ else:
+ await message.channel.send(response)
+ except Exception as e:
+ if isinstance(message, discord.interactions.Interaction):
+ await message.followup.send(f">>> **Error: {e}**")
+ else:
+ await message.channel.send(f">>> **Error: {e}**")
+ logger.exception(f"Error while sending message: {e}")
+
+
+class Event(Cog_Extension):
+ @commands.Cog.listener()
+ async def on_message(self, message: discord.Message):
+ if message.author == self.bot.user:
+ return
+ if self.bot.user in message.mentions:
+ if not MENTION_CHANNEL_ID or message.channel.id == MENTION_CHANNEL_ID:
+ content = re.sub(r"<@.*?>", "", message.content).strip()
+ if len(content) > 0:
+ username = str(message.author)
+ channel = str(message.channel)
+ logger.info(
+ f"\x1b[31m{username}\x1b[0m : '{content}' ({channel}) [Style: {conversation_style}]"
+ )
+ task = asyncio.create_task(send_message(chatbot, message, content))
+ await asyncio.gather(task)
+ else:
+ await message.channel.send(view=DropdownView())
+ elif MENTION_CHANNEL_ID is not None:
+ await message.channel.send(
+ f"> **Can only be mentioned at <#{self.bot.get_channel(MENTION_CHANNEL_ID).id}>**"
+ )
+
+
+async def setup(bot):
+ await bot.add_cog(Event(bot))
diff --git a/apps/BingBot/cogs/help.py b/apps/BingBot/cogs/help.py
new file mode 100644
index 00000000..ae82e1d7
--- /dev/null
+++ b/apps/BingBot/cogs/help.py
@@ -0,0 +1,35 @@
+import discord
+from core.classes import Cog_Extension
+from discord import app_commands
+
+
+class Help(Cog_Extension):
+ @app_commands.command(name="help", description="Show how to use")
+ async def help(self, interaction: discord.Interaction):
+ embed = discord.Embed(
+ title="Help",
+ )
+ embed.add_field(
+ name="/bing_cookies",
+ value="Set and delete your Bing Cookies.",
+ inline=False,
+ )
+ embed.add_field(name="/bing", value="Chat with Bing.", inline=False)
+ embed.add_field(
+ name="/reset", value="Reset your Bing conversation.", inline=False
+ )
+ embed.add_field(
+ name="/switch_style",
+ value="Switch your Bing conversation style.",
+ inline=False,
+ )
+ embed.add_field(
+ name="/create_image",
+ value="Generate image by Bing Image Creator.",
+ inline=False,
+ )
+ await interaction.response.send_message(embed=embed)
+
+
+async def setup(bot):
+ await bot.add_cog(Help(bot))
diff --git a/apps/BingBot/compose.yaml b/apps/BingBot/compose.yaml
new file mode 100644
index 00000000..b6442b19
--- /dev/null
+++ b/apps/BingBot/compose.yaml
@@ -0,0 +1,11 @@
+version: '3'
+
+services:
+ spartan:
+ container_name: Spartan
+ build: .
+ environment:
+ - DISCORD_BOT_TOKEN=${DISCORD_BOT_TOKEN}
+ volumes:
+ - ./cookies.json:/bot/cookies.json
+ - ./config.yml:/bot/config.yml
diff --git a/apps/BingBot/cookies.json b/apps/BingBot/cookies.json
new file mode 100644
index 00000000..4d0748fc
--- /dev/null
+++ b/apps/BingBot/cookies.json
@@ -0,0 +1,6 @@
+[
+ {
+ "name": "cookie1",
+ "value": "1lEXeWRSIPUsQ0S3tdAc3v7BexGK2qBlzsXz8j52w_HNBoOsegjiwRySQHmfoWduHVUxSXo6cETPP2qNrYWAz6k7wn43WGO9i7ll9_Wl7M6HA2c9twbKByfAtAB5fr26wPawQ6y1GCdakD_Kr4xdD20fvkytnmOmZu7Ktnb9mUVE605AAbJcIA9SOlRN5410ZPOnZA1cIzr4WtAFWNfQKPG6Sxk_zO5zvXQfYTyMNmOI"
+ }
+]
diff --git a/apps/BingBot/core/classes.py b/apps/BingBot/core/classes.py
new file mode 100644
index 00000000..23c4cbb1
--- /dev/null
+++ b/apps/BingBot/core/classes.py
@@ -0,0 +1,6 @@
+from discord.ext import commands
+
+
+class Cog_Extension(commands.Cog):
+ def __init__(self, bot):
+ self.bot = bot
diff --git a/apps/BingBot/requirements.txt b/apps/BingBot/requirements.txt
new file mode 100644
index 00000000..31ae25b7
--- /dev/null
+++ b/apps/BingBot/requirements.txt
@@ -0,0 +1,4 @@
+discord.py==2.3.2
+python-dotenv==1.0.0
+PyYAML==6.0.1
+bing-chat==1.9.3
diff --git a/apps/BingBot/src/imageCreate.py b/apps/BingBot/src/imageCreate.py
new file mode 100644
index 00000000..0b68b44d
--- /dev/null
+++ b/apps/BingBot/src/imageCreate.py
@@ -0,0 +1,40 @@
+import discord
+import asyncio
+from src import log
+
+logger = log.setup_logger(__name__)
+using_func = {}
+
+
+async def get_using_create(user_id):
+ return using_func[user_id]
+
+
+async def set_using_create(user_id, status: bool):
+ using_func[user_id] = status
+
+
+async def create_image(interaction: discord.Interaction, prompt: str, image_generator):
+ await interaction.response.defer(ephemeral=False, thinking=True)
+ using_func[interaction.user.id] = True
+ try:
+ embeds = []
+ prompts = f"> **{prompt}** - <@{str(interaction.user.id)}> (***BingImageCreator***)\n\n"
+ # Fetches image links
+ images = await image_generator.get_images(prompt)
+ # Add embed to list of embeds
+ [
+ embeds.append(
+ discord.Embed(url="https://www.bing.com/").set_image(url=image_link)
+ )
+ for image_link in images
+ ]
+ await interaction.followup.send(prompts, embeds=embeds, wait=True)
+ except asyncio.TimeoutError:
+ await interaction.followup.send("> **Error: Request timed out.**")
+ logger.exception("Error while create image: Request timed out.")
+ except Exception as e:
+ await interaction.followup.send(f"> **Error: {e}**")
+ logger.exception(f"Error while create image: {e}")
+ finally:
+ using_func[interaction.user.id] = False
diff --git a/apps/BingBot/src/log.py b/apps/BingBot/src/log.py
new file mode 100644
index 00000000..ed04a4a3
--- /dev/null
+++ b/apps/BingBot/src/log.py
@@ -0,0 +1,66 @@
+import os
+import logging
+import logging.handlers
+
+
+class CustomFormatter(logging.Formatter):
+ LEVEL_COLORS = [
+ (logging.DEBUG, "\x1b[40;1m"),
+ (logging.INFO, "\x1b[34;1m"),
+ (logging.WARNING, "\x1b[33;1m"),
+ (logging.ERROR, "\x1b[31m"),
+ (logging.CRITICAL, "\x1b[41m"),
+ ]
+ FORMATS = {
+ level: logging.Formatter(
+ f"\x1b[30;1m%(asctime)s\x1b[0m {color}%(levelname)-8s\x1b[0m \x1b[35m%(name)s\x1b[0m -> %(message)s",
+ "%Y-%m-%d %H:%M:%S",
+ )
+ for level, color in LEVEL_COLORS
+ }
+
+ def format(self, record):
+ formatter = self.FORMATS.get(record.levelno)
+ if formatter is None:
+ formatter = self.FORMATS[logging.DEBUG]
+
+ # Override the traceback to always print in red
+ if record.exc_info:
+ text = formatter.formatException(record.exc_info)
+ record.exc_text = f"\x1b[31m{text}\x1b[0m"
+
+ output = formatter.format(record)
+
+ # Remove the cache layer
+ record.exc_text = None
+ return output
+
+
+def setup_logger(module_name: str) -> logging.Logger:
+ # create logger
+ library, _, _ = module_name.partition(".py")
+ logger = logging.getLogger(library)
+ logger.setLevel(logging.INFO)
+
+ if not logger.handlers:
+ # create console handler
+ console_handler = logging.StreamHandler()
+ console_handler.setLevel(logging.INFO)
+ console_handler.setFormatter(CustomFormatter())
+ # specify that the log file path is the same as `main.py` file path
+ grandparent_dir = os.path.abspath(__file__ + "/../../")
+ log_name = "discord_bot.log"
+ log_path = os.path.join(grandparent_dir, log_name)
+ # create local log handler
+ log_handler = logging.handlers.RotatingFileHandler(
+ filename=log_path,
+ encoding="utf-8",
+ maxBytes=32 * 1024 * 1024, # 32 MiB
+ backupCount=2, # Rotate through 5 files
+ )
+ log_handler.setFormatter(CustomFormatter())
+ # Add handlers to logger
+ logger.addHandler(log_handler)
+ logger.addHandler(console_handler)
+
+ return logger
diff --git a/apps/BingBot/src/response.py b/apps/BingBot/src/response.py
new file mode 100644
index 00000000..47960a73
--- /dev/null
+++ b/apps/BingBot/src/response.py
@@ -0,0 +1,194 @@
+import discord
+import re
+from EdgeGPT.EdgeGPT import Chatbot, ConversationStyle
+from src import log
+from functools import partial
+
+USE_SUGGEST_RESPONSES = True
+logger = log.setup_logger(__name__)
+using_func = {}
+
+
+# To add suggest responses
+class MyView(discord.ui.View):
+ def __init__(
+ self,
+ interaction: discord.Interaction,
+ chatbot: Chatbot,
+ conversation_style: str,
+ suggest_responses: list,
+ ):
+ super().__init__(timeout=120)
+ self.button_author = interaction.user.id
+ # Add buttons
+ for label in suggest_responses:
+ button = discord.ui.Button(label=label)
+
+ # Button event
+ async def callback(
+ interaction: discord.Interaction,
+ button_author: int,
+ button: discord.ui.Button,
+ ):
+ if interaction.user.id != button_author:
+ await interaction.response.defer(ephemeral=True, thinking=True)
+ await interaction.followup.send(
+ "You don't have permission to press this button."
+ )
+ elif not using_func[interaction.user.id]:
+ await interaction.response.defer(ephemeral=False, thinking=True)
+ # When click the button, all buttons will disable.
+ for child in self.children:
+ child.disabled = True
+ await interaction.followup.edit_message(
+ message_id=interaction.message.id, view=self
+ )
+ username = str(interaction.user)
+ usermessage = button.label
+ channel = str(interaction.channel)
+ logger.info(
+ f"\x1b[31m{username}\x1b[0m : '{usermessage}' ({channel}) [Style: {conversation_style}] [button]"
+ )
+ await send_message(
+ chatbot, interaction, usermessage, conversation_style
+ )
+ else:
+ await interaction.response.defer(ephemeral=True, thinking=True)
+ await interaction.followup.send(
+ "Please wait for your last conversation to finish."
+ )
+
+ self.add_item(button)
+ self.children[-1].callback = partial(
+ callback, button_author=self.button_author, button=button
+ )
+
+
+async def get_using_send(user_id):
+ return using_func[user_id]
+
+
+async def set_using_send(user_id, status: bool):
+ using_func[user_id] = status
+
+
+async def send_message(
+ chatbot: Chatbot,
+ interaction: discord.Interaction,
+ user_message: str,
+ conversation_style: str,
+):
+ using_func[interaction.user.id] = True
+ reply = ""
+ text = ""
+ link_embed = ""
+ images_embed = []
+ all_url = []
+ try:
+ # Change conversation style
+ if conversation_style == "creative":
+ reply = await chatbot.ask(
+ prompt=user_message,
+ conversation_style=ConversationStyle.creative,
+ simplify_response=True,
+ )
+ elif conversation_style == "precise":
+ reply = await chatbot.ask(
+ prompt=user_message,
+ conversation_style=ConversationStyle.precise,
+ simplify_response=True,
+ )
+ else:
+ reply = await chatbot.ask(
+ prompt=user_message,
+ conversation_style=ConversationStyle.balanced,
+ simplify_response=True,
+ )
+
+ # Get reply text
+ text = f"{reply['text']}"
+ text = re.sub(r"\[\^(\d+)\^\]", lambda match: "", text)
+
+ # Get the URL, if available
+ try:
+ if len(reply["sources"]) != 0:
+ for i, url in enumerate(reply["sources"], start=1):
+ if len(url["providerDisplayName"]) == 0:
+ all_url.append(f"{i}. {url['seeMoreUrl']}")
+ else:
+ all_url.append(
+ f"{i}. [{url['providerDisplayName']}]({url['seeMoreUrl']})"
+ )
+ link_text = "\n".join(all_url)
+ link_embed = discord.Embed(description=link_text)
+ except:
+ pass
+
+ # Set the final message
+ user_message = user_message.replace("\n", "")
+ ask = f"> **{user_message}** - <@{str(interaction.user.id)}> (***style: {conversation_style}***)\n\n"
+ response = f"{ask}{text}"
+
+ # Discord limit about 2000 characters for a message
+ while len(response) > 2000:
+ temp = response[:2000]
+ response = response[2000:]
+ await interaction.followup.send(temp)
+
+ # Get the image, if available
+ try:
+ if len(link_embed) == 0:
+ all_image = re.findall("https?://[\w\./]+", str(reply["sources_text"]))
+ [
+ images_embed.append(
+ discord.Embed(url="https://www.bing.com/").set_image(
+ url=image_link
+ )
+ )
+ for image_link in all_image
+ ]
+ except:
+ pass
+ # Add all suggest responses in list
+ if USE_SUGGEST_RESPONSES:
+ suggest_responses = reply["suggestions"]
+ if images_embed:
+ await interaction.followup.send(
+ response,
+ view=MyView(
+ interaction, chatbot, conversation_style, suggest_responses
+ ),
+ embeds=images_embed,
+ wait=True,
+ )
+ elif link_embed:
+ await interaction.followup.send(
+ response,
+ view=MyView(
+ interaction, chatbot, conversation_style, suggest_responses
+ ),
+ embed=link_embed,
+ wait=True,
+ )
+ else:
+ await interaction.followup.send(
+ response,
+ view=MyView(
+ interaction, chatbot, conversation_style, suggest_responses
+ ),
+ wait=True,
+ )
+ else:
+ if images_embed:
+ await interaction.followup.send(
+ response, embeds=images_embed, wait=True
+ )
+ elif link_embed:
+ await interaction.followup.send(response, embed=link_embed, wait=True)
+ else:
+ await interaction.followup.send(response, wait=True)
+ except Exception as e:
+ await interaction.followup.send(f">>> **Error: {e}**")
+ logger.exception(f"Error while sending message: {e}")
+ finally:
+ using_func[interaction.user.id] = False
diff --git a/apps/GradioBot/main.py b/apps/GradioBot/main.py
new file mode 100644
index 00000000..fb985531
--- /dev/null
+++ b/apps/GradioBot/main.py
@@ -0,0 +1,197 @@
+import asyncio
+import argparse
+from collections import Counter
+import json
+import pathlib
+import re
+
+
+import discord
+from discord.ext import commands
+import gradio as gr
+from gradio import utils
+import requests
+
+from typing import Dict, List
+
+from utils import *
+
+
+lock = asyncio.Lock()
+
+bot = commands.Bot("", intents=discord.Intents(messages=True, guilds=True))
+
+
+GUILD_SPACES_FILE = "guild_spaces.pkl"
+
+
+if pathlib.Path(GUILD_SPACES_FILE).exists():
+ guild_spaces = read_pickle_file(GUILD_SPACES_FILE)
+ assert isinstance(guild_spaces, dict), f"{GUILD_SPACES_FILE} in invalid format."
+ guild_blocks = {}
+ delete_keys = []
+ for k, v in guild_spaces.items():
+ try:
+ guild_blocks[k] = gr.Interface.load(v, src="spaces")
+ except ValueError:
+ delete_keys.append(k)
+ for k in delete_keys:
+ del guild_spaces[k]
+else:
+ guild_spaces: Dict[int, str] = {}
+ guild_blocks: Dict[int, gr.Blocks] = {}
+
+
+HASHED_USERS_FILE = "users.pkl"
+
+if pathlib.Path(HASHED_USERS_FILE).exists():
+ hashed_users = read_pickle_file(HASHED_USERS_FILE)
+ assert isinstance(hashed_users, list), f"{HASHED_USERS_FILE} in invalid format."
+else:
+ hashed_users: List[str] = []
+
+
+@bot.event
+async def on_ready():
+ print(f"Logged in as {bot.user}")
+ print(f"Running in {len(bot.guilds)} servers...")
+
+
+async def run_prediction(space: gr.Blocks, *inputs):
+ inputs = list(inputs)
+ fn_index = 0
+ processed_inputs = space.serialize_data(fn_index=fn_index, inputs=inputs)
+ batch = space.dependencies[fn_index]["batch"]
+
+ if batch:
+ processed_inputs = [[inp] for inp in processed_inputs]
+
+ outputs = await space.process_api(
+ fn_index=fn_index, inputs=processed_inputs, request=None, state={}
+ )
+ outputs = outputs["data"]
+
+ if batch:
+ outputs = [out[0] for out in outputs]
+
+ processed_outputs = space.deserialize_data(fn_index, outputs)
+ processed_outputs = utils.resolve_singleton(processed_outputs)
+
+ return processed_outputs
+
+
+async def display_stats(message: discord.Message):
+ await message.channel.send(
+ f"Running in {len(bot.guilds)} servers\n"
+ f"Total # of users: {len(hashed_users)}\n"
+ f"------------------"
+ )
+ await message.channel.send(f"Most popular spaces:")
+ # display the top 10 most frequently occurring strings and their counts
+ spaces = guild_spaces.values()
+ counts = Counter(spaces)
+ for space, count in counts.most_common(10):
+ await message.channel.send(f"- {space}: {count}")
+
+
+async def load_space(guild: discord.Guild, message: discord.Message, content: str):
+ iframe_url = (
+ requests.get(f"https://huggingface.co/api/spaces/{content}/host")
+ .json()
+ .get("host")
+ )
+ if iframe_url is None:
+ return await message.channel.send(
+ f"Space: {content} not found. If you'd like to make a prediction, enclose the inputs in quotation marks."
+ )
+ else:
+ await message.channel.send(
+ f"Loading Space: https://huggingface.co/spaces/{content}..."
+ )
+ interface = gr.Interface.load(content, src="spaces")
+ guild_spaces[guild.id] = content
+ guild_blocks[guild.id] = interface
+ asyncio.create_task(update_pickle_file(guild_spaces, GUILD_SPACES_FILE))
+ if len(content) > 32 - len(f"{bot.name} []"): # type: ignore
+ nickname = content[: 32 - len(f"{bot.name} []") - 3] + "..." # type: ignore
+ else:
+ nickname = content
+ nickname = f"{bot.name} [{nickname}]" # type: ignore
+ await guild.me.edit(nick=nickname)
+ await message.channel.send(
+ "Ready to make predictions! Type in your inputs and enclose them in quotation marks."
+ )
+
+
+async def disconnect_space(bot: commands.Bot, guild: discord.Guild):
+ guild_spaces.pop(guild.id, None)
+ guild_blocks.pop(guild.id, None)
+ asyncio.create_task(update_pickle_file(guild_spaces, GUILD_SPACES_FILE))
+ await guild.me.edit(nick=bot.name) # type: ignore
+
+
+async def make_prediction(guild: discord.Guild, message: discord.Message, content: str):
+ if guild.id in guild_spaces:
+ params = re.split(r' (?=")', content)
+ params = [p.strip("'\"") for p in params]
+ space = guild_blocks[guild.id]
+ predictions = await run_prediction(space, *params)
+ if isinstance(predictions, (tuple, list)):
+ for p in predictions:
+ await send_file_or_text(message.channel, p)
+ else:
+ await send_file_or_text(message.channel, predictions)
+ return
+ else:
+ await message.channel.send(
+ "No Space is currently running. Please type in the name of a Hugging Face Space name first, e.g. abidlabs/en2fr"
+ )
+ await guild.me.edit(nick=bot.name) # type: ignore
+
+
+@bot.event
+async def on_message(message: discord.Message):
+ if message.author == bot.user:
+ return
+ h = hash_user_id(message.author.id)
+ if h not in hashed_users:
+ hashed_users.append(h)
+ asyncio.create_task(update_pickle_file(hashed_users, HASHED_USERS_FILE))
+ else:
+ if message.content:
+ content = remove_tags(message.content)
+ guild = message.channel.guild
+ assert guild, "Message not sent in a guild."
+
+ if content.strip() == "exit":
+ await disconnect_space(bot, guild)
+ elif content.strip() == "stats":
+ await display_stats(message)
+ elif content.startswith('"') or content.startswith("'"):
+ await make_prediction(guild, message, content)
+ else:
+ await load_space(guild, message, content)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--token",
+ type=str,
+ help="API key for the Discord bot. You can set this to your Discord token if you'd like to make your own clone of the Gradio Bot.",
+ required=False,
+ default="",
+ )
+ args = parser.parse_args()
+
+ if args.token.strip():
+ discord_token = args.token
+ bot.env = "staging" # type: ignore
+ bot.name = "StagingBot" # type: ignore
+ else:
+ with open("secrets.json") as fp:
+ discord_token = json.load(fp)["discord_token"]
+ bot.env = "prod" # type: ignore
+ bot.name = "GradioBot" # type: ignore
+
+ bot.run(discord_token)
diff --git a/apps/GradioBot/utils.py b/apps/GradioBot/utils.py
new file mode 100644
index 00000000..5657b36f
--- /dev/null
+++ b/apps/GradioBot/utils.py
@@ -0,0 +1,41 @@
+from __future__ import annotations
+
+import asyncio
+import pickle
+import hashlib
+import pathlib
+from typing import Dict, List
+
+import discord
+
+lock = asyncio.Lock()
+
+
+async def update_pickle_file(data: Dict | List, file_path: str):
+ async with lock:
+ with open(file_path, "wb") as fp:
+ pickle.dump(data, fp)
+
+
+def read_pickle_file(file_path: str):
+ with open(file_path, "rb") as fp:
+ return pickle.load(fp)
+
+
+async def send_file_or_text(channel, file_or_text: str):
+ # if the file exists, send as a file
+ if pathlib.Path(str(file_or_text)).exists():
+ with open(file_or_text, "rb") as f:
+ return await channel.send(file=discord.File(f))
+ else:
+ return await channel.send(file_or_text)
+
+
+def remove_tags(content: str) -> str:
+ content = content.replace("<@1040198143695933501>", "")
+ content = content.replace("<@1057338428938788884>", "")
+ return content.strip()
+
+
+def hash_user_id(user_id: int) -> str:
+ return hashlib.sha256(str(user_id).encode("utf-8")).hexdigest()
diff --git a/apps/MythGen/.env.example b/apps/MythGen/.env.example
new file mode 100644
index 00000000..95580b46
--- /dev/null
+++ b/apps/MythGen/.env.example
@@ -0,0 +1,2 @@
+OPENAI_API_KEY="YOUR_API_KEY"
+DALLE_COOKIE="YOUR_COOKIE"
diff --git a/apps/MythGen/README.md b/apps/MythGen/README.md
new file mode 100644
index 00000000..ef684287
--- /dev/null
+++ b/apps/MythGen/README.md
@@ -0,0 +1,71 @@
+MythGen: A Dynamic New Art Form
+Overview
+
+
+
+
+MythGen is an Iterative Multimedia Generator that allows users to create their own comic stories based on textual prompts. The system integrates state-of-the-art language and image models to provide a seamless and creative experience.
+Features
+
+ Initial Prompting: Kick-start your story with an initial text prompt.
+ Artistic Style Suffix: Maintain a consistent artistic style throughout your comic.
+ Image Generation: Generate captivating comic panels based on textual captions.
+ Caption Generation: Produce engaging captions for each comic panel.
+ Interactive Story Building: Select your favorite panels and captions to build your story iteratively.
+ Storyboard: View the sequence of your selected panels and their associated captions.
+ State Management: Keep track of the current state of your comic generation process.
+ User-Friendly Interface: Easy-to-use interface built on Gradio.
+
+Prerequisites
+OpenAI API Key
+
+You will need an OpenAI API key to access GPT-3 for generating captions. Follow these steps to obtain one:
+
+ Visit OpenAI's Developer Dashboard.
+ Sign up for an API key and follow the verification process.
+ Once verified, you will be provided with an API key.
+
+Bing Image Creator Cookie
+
+You should obtain your cookie to run this program. Follow these steps to obtain your cookie:
+
+ Go to Bing Image Creator in your browser and log in to your account.
+ Press Ctrl+Shift+J to open developer tools.
+ Navigate to the Application section.
+ Click on the Cookies section.
+ Find the variable _U and copy its value.
+
+How to Use
+
+ Initial Prompt: Start by inputting your initial comic concept.
+ Select a Panel: Choose your favorite panel and caption from the generated options.
+ Iterate: Use the "Next Part" button to generate the next part of your comic based on your latest selection.
+ View Storyboard: See your selected comic panels and captions in a storyboard for a comprehensive view of your comic.
+ Finalize: Continue this process until you've created your full comic story.
+
+Installation
+
+bash
+
+pip install -r requirements.txt
+
+Running MythGen
+
+bash
+
+python main.py
+
+This will launch the Gradio interface where you can interact with MythGen.
+Dependencies
+
+ Python 3.x
+ Gradio
+ OpenAI's GPT-3
+ DALL-E
+
+Contributing
+
+We welcome contributions! Please read the CONTRIBUTING.md for guidelines on how to contribute to this project.
+License
+
+This project is licensed under the MIT License. See LICENSE.md for details.
diff --git a/apps/MythGen/cookies.json b/apps/MythGen/cookies.json
new file mode 100644
index 00000000..4d0748fc
--- /dev/null
+++ b/apps/MythGen/cookies.json
@@ -0,0 +1,6 @@
+[
+ {
+ "name": "cookie1",
+ "value": "1lEXeWRSIPUsQ0S3tdAc3v7BexGK2qBlzsXz8j52w_HNBoOsegjiwRySQHmfoWduHVUxSXo6cETPP2qNrYWAz6k7wn43WGO9i7ll9_Wl7M6HA2c9twbKByfAtAB5fr26wPawQ6y1GCdakD_Kr4xdD20fvkytnmOmZu7Ktnb9mUVE605AAbJcIA9SOlRN5410ZPOnZA1cIzr4WtAFWNfQKPG6Sxk_zO5zvXQfYTyMNmOI"
+ }
+]
diff --git a/apps/MythGen/requirements.txt b/apps/MythGen/requirements.txt
new file mode 100644
index 00000000..e1fda005
--- /dev/null
+++ b/apps/MythGen/requirements.txt
@@ -0,0 +1,8 @@
+dalle3==0.0.7
+Flask==2.3.2
+gradio==3.48.0
+openai==0.28.1
+Pillow==10.1.0
+python-dotenv==1.0.0
+Requests==2.31.0
+swarms==1.8.2
diff --git a/apps/discord.py b/apps/discord.py
index f605a108..cc19ea38 100644
--- a/apps/discord.py
+++ b/apps/discord.py
@@ -1,18 +1,126 @@
-import os
-import asyncio
-import dalle3
import discord
-import responses
-from invoke import Executor
-from dotenv import load_dotenv
from discord.ext import commands
+import asyncio
+import os
+from dotenv import load_dotenv
+from invoke import Executor
+
+
+class BotCommands(commands.Cog):
+ def __init__(self, bot):
+ self.bot = bot
+
+ @commands.command()
+ async def greet(self, ctx):
+ """greets the user."""
+ await ctx.send(f"hello, {ctx.author.name}!")
+
+ @commands.command()
+ async def help_me(self, ctx):
+ """provides a list of commands and their descriptions."""
+ help_text = """
+ - `!greet`: greets you.
+ - `!run [description]`: generates a video based on the given description.
+ - `!help_me`: provides this list of commands and their descriptions.
+ """
+ await ctx.send(help_text)
+
+ @commands.command()
+ async def join(self, ctx):
+ """joins the voice channel that the user is in."""
+ if ctx.author.voice:
+ channel = ctx.author.voice.channel
+ await channel.connect()
+ else:
+ await ctx.send("you are not in a voice channel!")
+
+ @commands.command()
+ async def leave(self, ctx):
+ """leaves the voice channel that the self.bot is in."""
+ if ctx.voice_client:
+ await ctx.voice_client.disconnect()
+ else:
+ await ctx.send("i am not in a voice channel!")
+
+ @commands.command()
+ async def listen(self, ctx):
+ """starts listening to voice in the voice channel that the bot is in."""
+ if ctx.voice_client:
+ # create a wavesink to record the audio
+ sink = discord.sinks.wavesink("audio.wav")
+ # start recording
+ ctx.voice_client.start_recording(sink)
+ await ctx.send("started listening and recording.")
+ else:
+ await ctx.send("i am not in a voice channel!")
+
+ @commands.command()
+ async def generate_image(self, ctx, *, prompt: str = None, imggen: str = None):
+ """generates images based on the provided prompt"""
+ await ctx.send(f"generating images for prompt: `{prompt}`...")
+ loop = asyncio.get_event_loop()
+
+ # initialize a future object for the dalle instance
+ future = loop.run_in_executor(Executor, imggen, prompt)
+
+ try:
+ # wait for the dalle request to complete, with a timeout of 60 seconds
+ await asyncio.wait_for(future, timeout=300)
+ print("done generating images!")
+
+ # list all files in the save_directory
+ all_files = [
+ os.path.join(root, file)
+ for root, _, files in os.walk(os.environ("SAVE_DIRECTORY"))
+ for file in files
+ ]
+
+ # sort files by their creation time (latest first)
+ sorted_files = sorted(all_files, key=os.path.getctime, reverse=True)
+
+ # get the 4 most recent files
+ latest_files = sorted_files[:4]
+ print(f"sending {len(latest_files)} images to discord...")
+
+ # send all the latest images in a single message
+ # storage_service = os.environ("STORAGE_SERVICE") # "https://storage.googleapis.com/your-bucket-name/
+ # await ctx.send(files=[storage_service.upload(filepath) for filepath in latest_files])
+
+ except asyncio.timeouterror:
+ await ctx.send(
+ "the request took too long! it might have been censored or you're out of boosts. please try entering the prompt again."
+ )
+ except Exception as e:
+ await ctx.send(f"an error occurred: {e}")
+
+ @commands.command()
+ async def send_text(self, ctx, *, text: str, use_agent: bool = True):
+ """sends the provided text to the worker and returns the response"""
+ if use_agent:
+ response = self.bot.agent.run(text)
+ else:
+ response = self.bot.llm(text)
+ await ctx.send(response)
+
+ @commands.Cog.listener()
+ async def on_ready(self):
+ print(f"we have logged in as {self.bot.user}")
+
+ @commands.Cog.listener()
+ async def on_command_error(self, ctx, error):
+ """handles errors that occur while executing commands."""
+ if isinstance(error, commands.CommandNotFound):
+ await ctx.send("that command does not exist!")
+ else:
+ await ctx.send(f"an error occurred: {error}")
+
class Bot:
- def __init__(self, agent, llm, command_prefix="!"):
+ def __init__(self, llm, command_prefix="!"):
load_dotenv()
-
- intents = discord.intents.default()
+
+ intents = discord.Intents.default()
intents.messages = True
intents.guilds = True
intents.voice_states = True
@@ -20,6 +128,15 @@ class Bot:
# setup
self.llm = llm
+ self.bot = commands.Bot(command_prefix="!", intents=intents)
+ self.discord_token = os.getenv("DISCORD_TOKEN")
+ self.storage_service = os.getenv("STORAGE_SERVICE")
+
+ # Load the BotCommands cog
+ self.bot.add_cog(BotCommands(self.bot))
+
+ def run(self):
+ self.bot.run(self.discord_token)
self.agent = agent
self.bot = commands.bot(command_prefix="!", intents=intents)
self.discord_token = os.getenv("DISCORD_TOKEN")
diff --git a/apps/open-sourcerer/docker-compose.yaml b/apps/open-sourcerer/docker-compose.yaml
index ebd08f37..7168363b 100644
--- a/apps/open-sourcerer/docker-compose.yaml
+++ b/apps/open-sourcerer/docker-compose.yaml
@@ -1,6 +1,7 @@
version: '3'
services:
my-python-app:
+ container_name: Open-Soucerer
build: .
ports:
- "80:80"
diff --git a/apps/open-sourcerer/main.py b/apps/open-sourcerer/main.py
index 48f83581..db209cd9 100644
--- a/apps/open-sourcerer/main.py
+++ b/apps/open-sourcerer/main.py
@@ -1,135 +1,77 @@
+import openai
import os
-import discord
-from discord.ext import commands
-import interpreter
import dotenv
-import whisper
+import logging
+import gradio as gr
+from BingImageCreator import ImageGen
+from swarms.models.bing_chat import BingChat
+# from swarms.models.bingchat import BingChat
dotenv.load_dotenv(".env")
-bot_id = os.getenv("BOT_ID")
-bot_token = os.getenv("DISCORD_TOKEN")
-
-interpreter.api_key = os.getenv("OPENAI_API_KEY")
-# interpreter.api_base = os.getenv("API_BASE")
-# interpreter.auto_run = True
-
-
-def split_text(text, chunk_size=1500):
- #########################################################################
- return [text[i : i + chunk_size] for i in range(0, len(text), chunk_size)]
-
-
-# discord initial
-intents = discord.Intents.all()
-intents.message_content = True
-client = commands.Bot(command_prefix="$", intents=intents)
-
-message_chunks = []
-send_image = False
-
-model = whisper.load_model("base")
-
-
-def transcribe(audio):
- # load audio and pad/trim it to fit 30 seconds
- audio = whisper.load_audio(audio)
- audio = whisper.pad_or_trim(audio)
-
- # make log-Mel spectrogram and move to the same device as the model
- mel = whisper.log_mel_spectrogram(audio).to(model.device)
-
- # detect the spoken language
- _, probs = model.detect_language(mel)
-
- # decode the audio
- options = whisper.DecodingOptions()
- result = whisper.decode(model, mel, options)
- return result.text
-
-
-@client.event
-async def on_message(message):
- await client.process_commands(message)
- bot_mention = f"<@{bot_id}>"
- # if ("<@1158923910855798804>" in message.content) or (message.author == client.user or message.content[0] == '$'):
- # return
- response = []
- for chunk in interpreter.chat(message.content, display=False, stream=False):
- # await message.channel.send(chunk)
- if "message" in chunk:
- response.append(chunk["message"])
- last_response = response[-1]
-
- max_message_length = 2000 # Discord's max message length is 2000 characters
- # Splitting the message into chunks of 2000 characters
- response_chunks = [
- last_response[i : i + max_message_length]
- for i in range(0, len(last_response), max_message_length)
- ]
- # Sending each chunk as a separate message
- for chunk in response_chunks:
- await message.channel.send(chunk)
-
-
-@client.command()
-async def join(ctx):
- if ctx.author.voice:
- channel = ctx.message.author.voice.channel
- print("joining..")
- await channel.connect()
- print("joined.")
- else:
- print("not in a voice channel!")
-
-
-@client.command()
-async def leave(ctx):
- if ctx.voice_client:
- await ctx.voice_client.disconnect()
- else:
- print("not in a voice channel!")
-
-
-@client.command()
-async def listen(ctx):
- if ctx.voice_client:
- print("trying to listen..")
- ctx.voice_client.start_recording(discord.sinks.WaveSink(), callback, ctx)
- print("listening..")
- else:
- print("not in a voice channel!")
-
-
-async def callback(sink: discord.sinks, ctx):
- print("in callback..")
- for user_id, audio in sink.audio_data.items():
- if user_id == ctx.author.id:
- print("saving audio..")
- audio: discord.sinks.core.AudioData = audio
- print(user_id)
- filename = "audio.wav"
- with open(filename, "wb") as f:
- f.write(audio.file.getvalue())
- print("audio saved.")
- transcription = transcribe(filename)
- print(transcription)
- response = []
- for chunk in interpreter.chat(transcription, display=False, stream=True):
- # await message.channel.send(chunk)
- if "message" in chunk:
- response.append(chunk["message"])
- await ctx.message.channel.send(" ".join(response))
-
-
-@client.command()
-async def stop(ctx):
- ctx.voice_client.stop_recording()
-
-
-@client.event
-async def on_ready():
- print(f"We have logged in as {client.user}")
-
-
-client.run(bot_token)
+# Initialize the EdgeGPTModel
+model = BingChat()
+
+response = model("Generate")
+
+logging.basicConfig(level=logging.INFO)
+
+accumulated_story = ""
+latest_caption = ""
+standard_suffix = ""
+storyboard = []
+
+caption = "Create comic about opensourcerer a robot wizard"
+
+def generate_images_with_bingchat(caption):
+ img_path = model.create_img(caption)
+ img_urls = model.images(caption)
+ return img_urls
+
+def generate_single_caption(text):
+ prompt = f"A comic about {text}."
+ response = model(text)
+ return response
+
+def interpret_text_with_gpt(text, suffix):
+ return generate_single_caption(f"{text} {suffix}")
+
+def create_standard_suffix(original_prompt):
+ return f"In the style of {original_prompt}"
+
+def gradio_interface(text=None, next_button_clicked=False):
+ global accumulated_story, latest_caption, standard_suffix, storyboard
+
+ if not standard_suffix:
+ standard_suffix = create_standard_suffix(text)
+
+ if next_button_clicked:
+ new_caption = generate_single_caption(latest_caption + " " + standard_suffix)
+ new_urls = generate_images_with_bingchat(new_caption)
+ latest_caption = new_caption
+ storyboard.append((new_urls, new_caption))
+
+ elif text:
+ caption = generate_single_caption(text + " " + standard_suffix)
+ comic_panel_urls = generate_images_with_bingchat(caption)
+ latest_caption = caption
+ storyboard.append((comic_panel_urls, caption))
+
+ storyboard_html = ""
+ for urls, cap in storyboard:
+ for url in urls:
+ storyboard_html += f'
{cap}
'
+
+ return storyboard_html
+
+if __name__ == "__main__":
+ iface = gr.Interface(
+ fn=gradio_interface,
+ inputs=[
+ gr.inputs.Textbox(default="Type your story concept here", optional=True, label="Story Concept"),
+ gr.inputs.Checkbox(label="Generate Next Part")
+ ],
+ outputs=[gr.outputs.HTML()],
+ live=False # Submit button will appear
+ )
+ iface.launch()
diff --git a/apps/orchistrator/commands/ping.js b/apps/orchistrator/commands/ping.js
deleted file mode 100644
index 2fee3436..00000000
--- a/apps/orchistrator/commands/ping.js
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
-A ping command that replies with "Pong!" when bot is running.
-*/
-
-const { SlashCommandBuilder } = require("discord.js");
-
-module.exports = {
- data: new SlashCommandBuilder()
- .setName("ping")
- .setDescription("Replies with Pong!"),
- async execute(interaction) {
- await interaction.reply("Pong!");
- },
-};
\ No newline at end of file
diff --git a/apps/orchistrator/commands/server.js b/apps/orchistrator/commands/server.js
deleted file mode 100644
index ae2b6627..00000000
--- a/apps/orchistrator/commands/server.js
+++ /dev/null
@@ -1,10 +0,0 @@
-const { SlashCommandBuilder } = require('discord.js');
-
-module.exports = {
- data: new SlashCommandBuilder()
- .setName("server")
- .setDescription("Replies with server name and member count."),
- async execute(interaction) {
- await interaction.reply(`Server name: ${interaction.guild.name}\nTotal members: ${interaction.guild.memberCount}`);
- },
-};
diff --git a/apps/orchistrator/docker-compose.yml b/apps/orchistrator/docker-compose.yml
index d648751f..af6c313d 100644
--- a/apps/orchistrator/docker-compose.yml
+++ b/apps/orchistrator/docker-compose.yml
@@ -2,8 +2,10 @@ version: '3'
services:
server-bot:
- container_name: server-bot
- image: allenrkeen/server-bot:latest
+ container_name: Leonidas
+ build:
+ context: .
+ dockerfile: Dockerfile
volumes:
- /var/run/docker.sock:/var/run/docker.sock #required
env_file:
diff --git a/bingchat.py b/bingchat.py
new file mode 100644
index 00000000..d857e9e5
--- /dev/null
+++ b/bingchat.py
@@ -0,0 +1,6 @@
+from swarms.models.bing_chat import BingChat
+# Initialize the EdgeGPTModel
+bing = BingChat(cookies_path="./cookies.json")
+task = "generate topics for PositiveMed.com,: 1. Monitor Health Trends: Scan Google Alerts, authoritative health websites, and social media for emerging health, wellness, and medical discussions. 2. Keyword Research: Utilize tools like SEMrush to identify keywords with moderate to high search volume and low competition. Focus on long-tail, conversational keywords. 3. Analyze Site Data: Review PositiveMed's analytics to pinpoint popular articles and areas lacking recent content. 4. Crowdsourcing: Gather topic suggestions from the brand's audience and internal team, ensuring alignment with PositiveMed's mission. 5. Topic Evaluation: Assess topics for audience relevance, uniqueness, brand fit, current relevance, and SEO potential. 6. Tone and Style: Ensure topics can be approached with an educational, empowering, and ethical tone, in line with the brand's voice. Use this framework to generate a list of potential topics that cater to PositiveMed's audience while staying true to its brand ethos. Find trending topics for slowing and reversing aging think step by step and o into as much detail as possible"
+response = bing(task)
+print(response)
diff --git a/example.py b/example.py
index d9d4e125..fe68fef4 100644
--- a/example.py
+++ b/example.py
@@ -1,22 +1,29 @@
+from tabnanny import verbose
+from click import prompt
+from langchain import LLMChain
from swarms.models import OpenAIChat
from swarms import Worker
from swarms.prompts import PRODUCT_AGENT_PROMPT
+from swarms.models.bing_chat import BingChat
-api_key = ""
+# api_key = ""
-llm = OpenAIChat(
- openai_api_key=api_key,
- temperature=0.5,
-)
+# llm = OpenAIChat(
+# openai_api_key=api_key,
+# temperature=0.5,
+# )
+
+llm = BingChat(cookies_path="./cookies.json")
+# llm = LLMChain(llm=bing.to_dict(), prompt=prompt, verbose=verbose)
node = Worker(
llm=llm,
ai_name="Optimus Prime",
- openai_api_key=api_key,
ai_role=PRODUCT_AGENT_PROMPT,
external_tools=None,
human_in_the_loop=False,
temperature=0.5,
+ use_openai=False
)
task = "Create an entirely new board game around riddles for physics"
diff --git a/playground/agents/bingchat.py b/playground/agents/bingchat.py
new file mode 100644
index 00000000..5964ede8
--- /dev/null
+++ b/playground/agents/bingchat.py
@@ -0,0 +1,15 @@
+from swarms.models.bing_chat import BingChat
+from swarms.workers.worker import Worker
+from swarms.tools.autogpt import EdgeGPTTool, tool
+from swarms.models import OpenAIChat
+import os
+
+load_dotenv("../.env")
+auth_cookie = os.environ.get("AUTH_COOKIE")
+auth_cookie_SRCHHPGUSR = os.environ.get("AUTH_COOKIE_SRCHHPGUSR")
+
+# Initialize the EdgeGPTModel
+bing = BingChat(cookies_path="./cookies.json", auth_cookie_SRCHHPGUSR)
+task = "generate topics for PositiveMed.com,: 1. Monitor Health Trends: Scan Google Alerts, authoritative health websites, and social media for emerging health, wellness, and medical discussions. 2. Keyword Research: Utilize tools like SEMrush to identify keywords with moderate to high search volume and low competition. Focus on long-tail, conversational keywords. 3. Analyze Site Data: Review PositiveMed's analytics to pinpoint popular articles and areas lacking recent content. 4. Crowdsourcing: Gather topic suggestions from the brand's audience and internal team, ensuring alignment with PositiveMed's mission. 5. Topic Evaluation: Assess topics for audience relevance, uniqueness, brand fit, current relevance, and SEO potential. 6. Tone and Style: Ensure topics can be approached with an educational, empowering, and ethical tone, in line with the brand's voice. Use this framework to generate a list of potential topics that cater to PositiveMed's audience while staying true to its brand ethos. Find trending topics for slowing and reversing aging think step by step and o into as much detail as possible"
+
+bing(task)
diff --git a/playground/apps/bing_discord.py b/playground/apps/bing_discord.py
new file mode 100644
index 00000000..d35253ff
--- /dev/null
+++ b/playground/apps/bing_discord.py
@@ -0,0 +1,15 @@
+import os
+from swarms.models.bing_chat import BingChat
+from apps.discord import Bot
+from dotenv import load_dotenv
+
+load_dotenv()
+
+# Initialize the EdgeGPTModel
+cookie = os.environ.get("BING_COOKIE")
+auth = os.environ.get("AUTH_COOKIE")
+bing = BingChat(cookies_path="./cookies.json")
+
+bot = Bot(llm=bing)
+bot.generate_image(imggen=bing.create_img(auth_cookie=cookie, auth_cookie_SRCHHPGUSR=auth))
+bot.send_text(use_agent=False)
diff --git a/playground/models/bingchat.py b/playground/models/bingchat.py
deleted file mode 100644
index bf06ecc6..00000000
--- a/playground/models/bingchat.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from swarms.models.bing_chat import BingChat
-from swarms.workers.worker import Worker
-from swarms.tools.autogpt import EdgeGPTTool, tool
-from swarms.models import OpenAIChat
-import os
-
-api_key = os.getenv("OPENAI_API_KEY")
-
-# Initialize the EdgeGPTModel
-edgegpt = BingChat(cookies_path="./cookies.txt")
-
-
-@tool
-def edgegpt(task: str = None):
- """A tool to run infrence on the EdgeGPT Model"""
- return EdgeGPTTool.run(task)
-
-
-# Initialize the language model,
-# This model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC
-llm = OpenAIChat(
- openai_api_key=api_key,
- temperature=0.5,
-)
-
-# Initialize the Worker with the custom tool
-worker = Worker(llm=llm, ai_name="EdgeGPT Worker", external_tools=[edgegpt])
-
-# Use the worker to process a task
-task = "Hello, my name is ChatGPT"
-response = worker.run(task)
-print(response)
diff --git a/revgpt.py b/revgpt.py
new file mode 100644
index 00000000..cd5bd2d6
--- /dev/null
+++ b/revgpt.py
@@ -0,0 +1,29 @@
+import os
+import sys
+from dotenv import load_dotenv
+from swarms.models.revgptV4 import RevChatGPTModelv4
+from swarms.models.revgptV1 import RevChatGPTModelv1
+
+root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+sys.path.append(root_dir)
+
+load_dotenv()
+
+config = {
+ "model": os.getenv("REVGPT_MODEL"),
+ "plugin_ids": [os.getenv("REVGPT_PLUGIN_IDS")],
+ "disable_history": os.getenv("REVGPT_DISABLE_HISTORY") == "True",
+ "PUID": os.getenv("REVGPT_PUID"),
+ "unverified_plugin_domains": [os.getenv("REVGPT_UNVERIFIED_PLUGIN_DOMAINS")],
+}
+
+# For v1 model
+model = RevChatGPTModelv1(access_token=os.getenv("ACCESS_TOKEN"), **config)
+# model = RevChatGPTModelv4(access_token=os.getenv("ACCESS_TOKEN"), **config)
+
+# For v3 model
+# model = RevChatGPTModel(access_token=os.getenv("OPENAI_API_KEY"), **config)
+
+task = "Write a cli snake game"
+response = model.run(task)
+print(response)
diff --git a/swarms/models/__init__.py b/swarms/models/__init__.py
index d79f29b8..5b8e6313 100644
--- a/swarms/models/__init__.py
+++ b/swarms/models/__init__.py
@@ -18,8 +18,8 @@ from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA
# from swarms.models.fuyu import Fuyu # Not working, wait until they update
import sys
-log_file = open("stderr_log.txt", "w")
-sys.stderr = log_file
+# log_file = open("stderr_log.txt", "w")
+# sys.stderr = log_file
__all__ = [
diff --git a/swarms/models/bing_chat.py b/swarms/models/bing_chat.py
index 1d2eb503..30263c61 100644
--- a/swarms/models/bing_chat.py
+++ b/swarms/models/bing_chat.py
@@ -1,4 +1,5 @@
-"""EdgeGPT model by OpenAI"""
+"""Bing-Chat model by Micorsoft"""
+import os
import asyncio
import json
from pathlib import Path
@@ -25,7 +26,7 @@ class BingChat:
"""
- def __init__(self, cookies_path: str):
+ def __init__(self, cookies_path: str = None):
self.cookies = json.loads(open(cookies_path, encoding="utf-8").read())
self.bot = asyncio.run(Chatbot.create(cookies=self.cookies))
@@ -43,7 +44,7 @@ class BingChat:
return response["text"]
def create_img(
- self, prompt: str, output_dir: str = "./output", auth_cookie: str = None
+ self, prompt: str, output_dir: str = "./output", auth_cookie: str = None, auth_cookie_SRCHHPGUSR: str = None
) -> str:
"""
Generate an image based on the provided prompt and save it in the given output directory.
@@ -52,11 +53,11 @@ class BingChat:
if not auth_cookie:
raise ValueError("Auth cookie is required for image generation.")
- image_generator = ImageGen(auth_cookie, quiet=True)
+ image_generator = ImageGen(auth_cookie, auth_cookie_SRCHHPGUSR, quiet=True, )
images = image_generator.get_images(prompt)
image_generator.save_images(images, output_dir=output_dir)
- return Path(output_dir) / images[0]["path"]
+ return Path(output_dir) / images[0]
@staticmethod
def set_cookie_dir_path(path: str):
diff --git a/swarms/models/bioclip.py b/swarms/models/bioclip.py
index 937634e3..facd1b61 100644
--- a/swarms/models/bioclip.py
+++ b/swarms/models/bioclip.py
@@ -75,6 +75,7 @@ class BioClip:
'adenocarcinoma histopathology',
'brain MRI',
'covid line chart',
+ 'covid line chart',
'squamous cell carcinoma histopathology',
'immunohistochemistry histopathology',
'bone X-ray',
diff --git a/swarms/models/revgptV1.py b/swarms/models/revgptV1.py
index a7327d23..400c9b25 100644
--- a/swarms/models/revgptV1.py
+++ b/swarms/models/revgptV1.py
@@ -35,13 +35,13 @@ from httpx import AsyncClient
from OpenAIAuth import Auth0 as Authenticator
from rich.live import Live
from rich.markdown import Markdown
-import schemas.typings as t
+import swarms.schemas.typings as t
from swarms.utils.revutils import create_completer
from swarms.utils.revutils import create_session
from swarms.utils.revutils import get_input
# BASE_URL = environ.get("CHATGPT_BASE_URL", "http://192.168.250.249:9898/api/")
-# BASE_URL = os.environ.get("CHATGPT_BASE_URL", "https://ai.fakeopen.com/api/")
+BASE_URL = os.environ.get("CHATGPT_BASE_URL", "https://ai.fakeopen.com/api/")
# BASE_URL = environ.get("CHATGPT_BASE_URL", "https://bypass.churchless.tech/")
bcolors = t.Colors()
diff --git a/swarms/models/revgptV4.py b/swarms/models/revgptV4.py
index c57182f1..fc989445 100644
--- a/swarms/models/revgptV4.py
+++ b/swarms/models/revgptV4.py
@@ -40,14 +40,14 @@ from rich.markdown import Markdown
import argparse
import re
-import schemas.typings as t
+import swarms.schemas.typings as t
from prompt_toolkit import prompt
from prompt_toolkit import PromptSession
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.history import InMemoryHistory
from prompt_toolkit.key_binding import KeyBindings
-from schemas.typings import Colors
+from swarms.schemas.typings import Colors
bindings = KeyBindings()
diff --git a/swarms/models/vllm.py b/swarms/models/vllm.py
new file mode 100644
index 00000000..9234c284
--- /dev/null
+++ b/swarms/models/vllm.py
@@ -0,0 +1,55 @@
+from vllm import LLM, SamplingParams
+import openai
+import ray
+import uvicorn
+from vllm.entrypoints import api_server as vllm_api_server
+from vllm.entrypoints.openai import api_server as openai_api_server
+from skypilot import SkyPilot
+
+class VLLMModel:
+ def __init__(self, model_name="facebook/opt-125m", tensor_parallel_size=1):
+ self.model_name = model_name
+ self.tensor_parallel_size = tensor_parallel_size
+ self.model = LLM(model_name, tensor_parallel_size=tensor_parallel_size)
+ self.temperature = 1.0
+ self.max_tokens = None
+ self.sampling_params = SamplingParams(temperature=self.temperature)
+
+ def generate_text(self, prompt: str) -> str:
+ output = self.model.generate([prompt], self.sampling_params)
+ return output[0].outputs[0].text
+
+ def set_temperature(self, value: float):
+ self.temperature = value
+ self.sampling_params = SamplingParams(temperature=self.temperature)
+
+ def set_max_tokens(self, value: int):
+ self.max_tokens = value
+ self.sampling_params = SamplingParams(temperature=self.temperature, max_tokens=self.max_tokens)
+
+ def offline_batched_inference(self, prompts: list) -> list:
+ outputs = self.model.generate(prompts, self.sampling_params)
+ return [output.outputs[0].text for output in outputs]
+
+ def start_api_server(self):
+ uvicorn.run(vllm_api_server.app, host="0.0.0.0", port=8000)
+
+ def start_openai_compatible_server(self):
+ uvicorn.run(openai_api_server.app, host="0.0.0.0", port=8000)
+
+ def query_openai_compatible_server(self, prompt: str):
+ openai.api_key = "EMPTY"
+ openai.api_base = "http://localhost:8000/v1"
+ completion = openai.Completion.create(model=self.model_name, prompt=prompt)
+ return completion
+
+ def distributed_inference(self, prompt: str):
+ ray.init()
+ self.model = LLM(self.model_name, tensor_parallel_size=self.tensor_parallel_size)
+ output = self.model.generate(prompt, self.sampling_params)
+ ray.shutdown()
+ return output[0].outputs[0].text
+
+ def run_on_cloud_with_skypilot(self, yaml_file):
+ sky = SkyPilot()
+ sky.launch(yaml_file)
diff --git a/swarms/workers/worker.py b/swarms/workers/worker.py
index be422ff2..5839b8f7 100644
--- a/swarms/workers/worker.py
+++ b/swarms/workers/worker.py
@@ -67,11 +67,13 @@ class Worker:
temperature: float = 0.5,
llm=None,
openai_api_key: str = None,
+ use_openai: bool = True,
):
self.temperature = temperature
self.human_in_the_loop = human_in_the_loop
self.llm = llm
self.openai_api_key = openai_api_key
+ self.use_openai = use_openai
self.ai_name = ai_name
self.ai_role = ai_role
self.coordinates = (
@@ -148,24 +150,25 @@ class Worker:
self.tools.extend(external_tools)
def setup_memory(self):
- """
- Set up memory for the worker.
- """
- openai_api_key = os.getenv("OPENAI_API_KEY") or self.openai_api_key
- try:
- embeddings_model = OpenAIEmbeddings(openai_api_key=openai_api_key)
- embedding_size = 1536
- index = faiss.IndexFlatL2(embedding_size)
-
- self.vectorstore = FAISS(
- embeddings_model.embed_query, index, InMemoryDocstore({}), {}
- )
-
- except Exception as error:
- raise RuntimeError(
- f"Error setting up memory perhaps try try tuning the embedding size: {error}"
- )
-
+ """
+ Set up memory for the worker.
+ """
+ if self.use_openai: # Only use OpenAI if use_openai is True
+ openai_api_key = os.getenv("OPENAI_API_KEY") or self.openai_api_key
+ try:
+ embeddings_model = OpenAIEmbeddings(openai_api_key=openai_api_key)
+ embedding_size = 1536
+ index = faiss.IndexFlatL2(embedding_size)
+
+ self.vectorstore = FAISS(
+ embeddings_model.embed_query, index, InMemoryDocstore({}), {}
+ )
+
+ except Exception as error:
+ raise RuntimeError(
+ f"Error setting up memory perhaps try try tuning the embedding size: {error}"
+ )
+
def setup_agent(self):
"""
Set up the autonomous agent.