feat: Integrate edge gpt

pull/64/head
Zack 1 year ago
parent 923adaa89d
commit 5830cb59d6

@ -10,20 +10,8 @@ ADD . /app
# Install any needed packages specified in requirements.txt # Install any needed packages specified in requirements.txt
RUN pip install --no-cache-dir -r requirements.txt RUN pip install --no-cache-dir -r requirements.txt
# Clone the Pycord-Development repository and install it
RUN git clone https://github.com/Pycord-Development/pycord && \
cd pycord && \
pip install -U .
# Make port 80 available to the world outside this container # Make port 80 available to the world outside this container
EXPOSE 80 EXPOSE 80
ENV ELEVEN_LABS_API_KEY="" \
OPENAI_API_KEY="" \
DISCORD_TOKEN="" \
API_KEY="" \
API_BASE="" \
SYSTEM_MESSAGE=""
# Run DiscordInterpreter.py when the container launches # Run DiscordInterpreter.py when the container launches
CMD ["python", "main.py"] CMD ["python", "main.py"]

@ -4,3 +4,5 @@ services:
build: . build: .
ports: ports:
- "80:80" - "80:80"
env_file:
- ./.env

@ -3,15 +3,15 @@ import discord
from discord.ext import commands from discord.ext import commands
import interpreter import interpreter
import dotenv import dotenv
from voice import transcribe import whisper
dotenv.load_dotenv(".env") dotenv.load_dotenv(".env")
bot_id = os.getenv("BOT_ID") bot_id = os.getenv("BOT_ID")
bot_token = os.getenv("DISCORD_TOKEN") bot_token = os.getenv("DISCORD_TOKEN")
interpreter.api_key = os.getenv("API_KEY") interpreter.api_key = os.getenv("OPENAI_API_KEY")
interpreter.api_base = os.getenv("API_BASE") # interpreter.api_base = os.getenv("API_BASE")
# interpreter.auto_run = True # interpreter.auto_run = True
def split_text(text, chunk_size=1500): def split_text(text, chunk_size=1500):
@ -26,12 +26,31 @@ client = commands.Bot(command_prefix="$", intents=intents)
message_chunks = [] message_chunks = []
send_image = False send_image = False
model = whisper.load_model("base")
def transcribe(audio):
# load audio and pad/trim it to fit 30 seconds
audio = whisper.load_audio(audio)
audio = whisper.pad_or_trim(audio)
# make log-Mel spectrogram and move to the same device as the model
mel = whisper.log_mel_spectrogram(audio).to(model.device)
# detect the spoken language
_, probs = model.detect_language(mel)
# decode the audio
options = whisper.DecodingOptions()
result = whisper.decode(model, mel, options)
return result.text
@client.event @client.event
async def on_message(message): async def on_message(message):
await client.process_commands(message) await client.process_commands(message)
bot_mention = f"<@{bot_id}>" bot_mention = f"<@{bot_id}>"
if (bot_mention in message.content) or (message.author == client.user or message.content[0] == '$'): # if ("<@1158923910855798804>" in message.content) or (message.author == client.user or message.content[0] == '$'):
return # return
response = [] response = []
for chunk in interpreter.chat(message.content, display=False, stream=False): for chunk in interpreter.chat(message.content, display=False, stream=False):
# await message.channel.send(chunk) # await message.channel.send(chunk)

@ -1,4 +1,5 @@
openai-whisper openai-whisper
py-cord
discord discord
open-interpreter open-interpreter
elevenlabs elevenlabs

@ -5,33 +5,14 @@ import gradio as gr
from pydub import AudioSegment from pydub import AudioSegment
import io import io
from elevenlabs import generate, play, set_api_key from elevenlabs import generate, play, set_api_key
import whisper
import dotenv import dotenv
dotenv.load_dotenv(".env") dotenv.load_dotenv(".env")
# interpreter.model = "TheBloke/Mistral-7B-OpenOrca-GGUF" # interpreter.model = "TheBloke/Mistral-7B-OpenOrca-GGUF"
interpreter.auto_run = True interpreter.auto_run = True
model = whisper.load_model("base")
def transcribe(audio):
# load audio and pad/trim it to fit 30 seconds
audio = whisper.load_audio(audio)
audio = whisper.pad_or_trim(audio)
# make log-Mel spectrogram and move to the same device as the model
mel = whisper.log_mel_spectrogram(audio).to(model.device)
# detect the spoken language
_, probs = model.detect_language(mel)
# decode the audio
options = whisper.DecodingOptions()
result = whisper.decode(model, mel, options)
return result.text
set_api_key("ELEVEN_LABS_API_KEY") set_api_key("ELEVEN_LABS_API_KEY")

@ -7,8 +7,4 @@ RUN npm install
COPY . . COPY . .
ENV DISCORD_TOKEN "" \
DISCORD_CLIENT_ID "" \
DISCORD_GUILD_ID ""
CMD [ "node", "index.js" ] CMD [ "node", "index.js" ]

@ -6,7 +6,5 @@ services:
image: allenrkeen/server-bot:latest image: allenrkeen/server-bot:latest
volumes: volumes:
- /var/run/docker.sock:/var/run/docker.sock #required - /var/run/docker.sock:/var/run/docker.sock #required
environment: env_file:
- DISCORD_TOKEN=your_token_here #required - ./.env # environment:
- DISCORD_CLIENT_ID=your_client_id_here #required
- DISCORD_GUILD_ID=your_guild_id_here #optional

@ -0,0 +1,8 @@
attrs==21.2.0
griptape==0.18.2
oceandb==0.1.0
pgvector==0.2.3
pydantic==1.10.8
SQLAlchemy==1.4.49
SQLAlchemy==2.0.20
swarms==1.8.2

@ -0,0 +1,62 @@
"""EdgeGPT model by OpenAI"""
import asyncio, json
from EdgeGPT.EdgeGPT import Chatbot, ConversationStyle
from EdgeGPT.EdgeUtils import ImageQuery, Query, Cookie
from EdgeGPT.ImageGen import ImageGen
from pathlib import Path
class EdgeGPTModel:
"""
EdgeGPT model by OpenAI
Parameters
----------
cookies_path : str
Path to the cookies.json necessary for authenticating with EdgeGPT
Examples
--------
>>> edgegpt = EdgeGPTModel(cookies_path="./path/to/cookies.json")
>>> response = edgegpt.ask("Hello, my name is ChatGPT")
>>> image_path = edgegpt.generate_image("Sunset over mountains")
"""
def __init__(self, cookies_path: str):
self.cookies = json.loads(open(cookies_path, encoding="utf-8").read())
self.bot = asyncio.run(Chatbot.create(cookies=self.cookies))
def ask(self, prompt: str, style: ConversationStyle = ConversationStyle.creative) -> str:
"""
Get a text response using the EdgeGPT model based on the provided prompt.
"""
response = asyncio.run(self.bot.ask(prompt=prompt, conversation_style=style, simplify_response=True))
return response['text']
def generate_image(self, prompt: str, output_dir: str = "./output", auth_cookie: str = None) -> str:
"""
Generate an image based on the provided prompt and save it in the given output directory.
Returns the path of the generated image.
"""
if not auth_cookie:
raise ValueError("Auth cookie is required for image generation.")
image_generator = ImageGen(auth_cookie, quiet=True)
images = image_generator.get_images(prompt)
image_generator.save_images(images, output_dir=output_dir)
return Path(output_dir) / images[0]['path']
@staticmethod
def set_cookie_dir_path(path: str):
"""
Set the directory path for managing cookies.
"""
Cookie.dir_path = Path(path)
# Example Usage:
# edgegpt = EdgeGPTModel(cookies_path="./path/to/cookies.json")
# text_response = edgegpt.ask("Hello, my name is ChatGPT")
# image_path = edgegpt.generate_image("Sunset over mountains", auth_cookie="YOUR_AUTH_COOKIE")
Loading…
Cancel
Save