diff --git a/apps/open-sourcerer/Dockerfile b/apps/open-sourcerer/Dockerfile index 6f52006b..8033ee16 100644 --- a/apps/open-sourcerer/Dockerfile +++ b/apps/open-sourcerer/Dockerfile @@ -10,20 +10,8 @@ ADD . /app # Install any needed packages specified in requirements.txt RUN pip install --no-cache-dir -r requirements.txt -# Clone the Pycord-Development repository and install it -RUN git clone https://github.com/Pycord-Development/pycord && \ - cd pycord && \ - pip install -U . - # Make port 80 available to the world outside this container EXPOSE 80 -ENV ELEVEN_LABS_API_KEY="" \ - OPENAI_API_KEY="" \ - DISCORD_TOKEN="" \ - API_KEY="" \ - API_BASE="" \ - SYSTEM_MESSAGE="" - # Run DiscordInterpreter.py when the container launches CMD ["python", "main.py"] diff --git a/apps/open-sourcerer/docker-compose.yaml b/apps/open-sourcerer/docker-compose.yaml index b1552617..ebd08f37 100644 --- a/apps/open-sourcerer/docker-compose.yaml +++ b/apps/open-sourcerer/docker-compose.yaml @@ -4,3 +4,5 @@ services: build: . ports: - "80:80" + env_file: + - ./.env diff --git a/apps/open-sourcerer/main.py b/apps/open-sourcerer/main.py index 6ff4ac02..4f66d9eb 100644 --- a/apps/open-sourcerer/main.py +++ b/apps/open-sourcerer/main.py @@ -3,15 +3,15 @@ import discord from discord.ext import commands import interpreter import dotenv -from voice import transcribe +import whisper dotenv.load_dotenv(".env") bot_id = os.getenv("BOT_ID") bot_token = os.getenv("DISCORD_TOKEN") -interpreter.api_key = os.getenv("API_KEY") -interpreter.api_base = os.getenv("API_BASE") +interpreter.api_key = os.getenv("OPENAI_API_KEY") +# interpreter.api_base = os.getenv("API_BASE") # interpreter.auto_run = True def split_text(text, chunk_size=1500): @@ -26,12 +26,31 @@ client = commands.Bot(command_prefix="$", intents=intents) message_chunks = [] send_image = False +model = whisper.load_model("base") + +def transcribe(audio): + + # load audio and pad/trim it to fit 30 seconds + audio = whisper.load_audio(audio) + audio = whisper.pad_or_trim(audio) + + # make log-Mel spectrogram and move to the same device as the model + mel = whisper.log_mel_spectrogram(audio).to(model.device) + + # detect the spoken language + _, probs = model.detect_language(mel) + + # decode the audio + options = whisper.DecodingOptions() + result = whisper.decode(model, mel, options) + return result.text + @client.event async def on_message(message): await client.process_commands(message) bot_mention = f"<@{bot_id}>" - if (bot_mention in message.content) or (message.author == client.user or message.content[0] == '$'): - return + # if ("<@1158923910855798804>" in message.content) or (message.author == client.user or message.content[0] == '$'): + # return response = [] for chunk in interpreter.chat(message.content, display=False, stream=False): # await message.channel.send(chunk) diff --git a/apps/open-sourcerer/requirements.txt b/apps/open-sourcerer/requirements.txt index fe6eb21f..1f9ab0f1 100644 --- a/apps/open-sourcerer/requirements.txt +++ b/apps/open-sourcerer/requirements.txt @@ -1,4 +1,5 @@ openai-whisper +py-cord discord open-interpreter elevenlabs diff --git a/apps/open-sourcerer/voice.py b/apps/open-sourcerer/voice.py index 3b42aa40..b26fa299 100644 --- a/apps/open-sourcerer/voice.py +++ b/apps/open-sourcerer/voice.py @@ -5,33 +5,14 @@ import gradio as gr from pydub import AudioSegment import io from elevenlabs import generate, play, set_api_key -import whisper import dotenv dotenv.load_dotenv(".env") # interpreter.model = "TheBloke/Mistral-7B-OpenOrca-GGUF" interpreter.auto_run = True -model = whisper.load_model("base") -def transcribe(audio): - - # load audio and pad/trim it to fit 30 seconds - audio = whisper.load_audio(audio) - audio = whisper.pad_or_trim(audio) - - # make log-Mel spectrogram and move to the same device as the model - mel = whisper.log_mel_spectrogram(audio).to(model.device) - - # detect the spoken language - _, probs = model.detect_language(mel) - - # decode the audio - options = whisper.DecodingOptions() - result = whisper.decode(model, mel, options) - return result.text - set_api_key("ELEVEN_LABS_API_KEY") diff --git a/apps/orchistrator/Dockerfile b/apps/orchistrator/Dockerfile index 3a112e19..6fe95fab 100644 --- a/apps/orchistrator/Dockerfile +++ b/apps/orchistrator/Dockerfile @@ -7,8 +7,4 @@ RUN npm install COPY . . -ENV DISCORD_TOKEN "" \ - DISCORD_CLIENT_ID "" \ - DISCORD_GUILD_ID "" - CMD [ "node", "index.js" ] diff --git a/apps/orchistrator/docker-compose.yml b/apps/orchistrator/docker-compose.yml index 044875a5..d648751f 100644 --- a/apps/orchistrator/docker-compose.yml +++ b/apps/orchistrator/docker-compose.yml @@ -6,7 +6,5 @@ services: image: allenrkeen/server-bot:latest volumes: - /var/run/docker.sock:/var/run/docker.sock #required - environment: - - DISCORD_TOKEN=your_token_here #required - - DISCORD_CLIENT_ID=your_client_id_here #required - - DISCORD_GUILD_ID=your_guild_id_here #optional + env_file: + - ./.env # environment: diff --git a/swarms/memory/requirements.txt b/swarms/memory/requirements.txt new file mode 100644 index 00000000..00934f5c --- /dev/null +++ b/swarms/memory/requirements.txt @@ -0,0 +1,8 @@ +attrs==21.2.0 +griptape==0.18.2 +oceandb==0.1.0 +pgvector==0.2.3 +pydantic==1.10.8 +SQLAlchemy==1.4.49 +SQLAlchemy==2.0.20 +swarms==1.8.2 diff --git a/swarms/models/bing_chat.py b/swarms/models/bing_chat.py new file mode 100644 index 00000000..7074ed62 --- /dev/null +++ b/swarms/models/bing_chat.py @@ -0,0 +1,62 @@ +"""EdgeGPT model by OpenAI""" +import asyncio, json +from EdgeGPT.EdgeGPT import Chatbot, ConversationStyle +from EdgeGPT.EdgeUtils import ImageQuery, Query, Cookie +from EdgeGPT.ImageGen import ImageGen +from pathlib import Path + +class EdgeGPTModel: + """ + EdgeGPT model by OpenAI + + Parameters + ---------- + cookies_path : str + Path to the cookies.json necessary for authenticating with EdgeGPT + + Examples + -------- + >>> edgegpt = EdgeGPTModel(cookies_path="./path/to/cookies.json") + >>> response = edgegpt.ask("Hello, my name is ChatGPT") + >>> image_path = edgegpt.generate_image("Sunset over mountains") + + """ + + def __init__(self, cookies_path: str): + self.cookies = json.loads(open(cookies_path, encoding="utf-8").read()) + self.bot = asyncio.run(Chatbot.create(cookies=self.cookies)) + + def ask(self, prompt: str, style: ConversationStyle = ConversationStyle.creative) -> str: + """ + Get a text response using the EdgeGPT model based on the provided prompt. + """ + response = asyncio.run(self.bot.ask(prompt=prompt, conversation_style=style, simplify_response=True)) + return response['text'] + + def generate_image(self, prompt: str, output_dir: str = "./output", auth_cookie: str = None) -> str: + """ + Generate an image based on the provided prompt and save it in the given output directory. + Returns the path of the generated image. + """ + if not auth_cookie: + raise ValueError("Auth cookie is required for image generation.") + + image_generator = ImageGen(auth_cookie, quiet=True) + images = image_generator.get_images(prompt) + image_generator.save_images(images, output_dir=output_dir) + + return Path(output_dir) / images[0]['path'] + + @staticmethod + def set_cookie_dir_path(path: str): + """ + Set the directory path for managing cookies. + """ + Cookie.dir_path = Path(path) + +# Example Usage: +# edgegpt = EdgeGPTModel(cookies_path="./path/to/cookies.json") +# text_response = edgegpt.ask("Hello, my name is ChatGPT") +# image_path = edgegpt.generate_image("Sunset over mountains", auth_cookie="YOUR_AUTH_COOKIE") + +