flow and clean up

Former-commit-id: a0e6d44d1e
pull/88/head
Kye 1 year ago
parent d035b8aa1d
commit 64253ec9c2

@ -1,48 +0,0 @@
# This is a Dockerfile for running unit tests
ARG POETRY_HOME=/opt/poetry
# Use the Python base image
FROM python:3.11.2-bullseye AS builder
# Define the version of Poetry to install (default is 1.4.2)
ARG POETRY_VERSION=1.4.2
# Define the directory to install Poetry to (default is /opt/poetry)
ARG POETRY_HOME
# Create a Python virtual environment for Poetry and install it
RUN python3 -m venv ${POETRY_HOME} && \
$POETRY_HOME/bin/pip install --upgrade pip && \
$POETRY_HOME/bin/pip install poetry==${POETRY_VERSION}
# Test if Poetry is installed in the expected path
RUN echo "Poetry version:" && $POETRY_HOME/bin/poetry --version
# Set the working directory for the app
WORKDIR /app
# Use a multi-stage build to install dependencies
FROM builder AS dependencies
ARG POETRY_HOME
# Copy only the dependency files for installation
COPY pyproject.toml poetry.lock poetry.toml ./
# Install the Poetry dependencies (this layer will be cached as long as the dependencies don't change)
RUN $POETRY_HOME/bin/poetry install --no-interaction --no-ansi --with test
# Use a multi-stage build to run tests
FROM dependencies AS tests
# Copy the rest of the app source code (this layer will be invalidated and rebuilt whenever the source code changes)
COPY . .
RUN /opt/poetry/bin/poetry install --no-interaction --no-ansi --with test
# Set the entrypoint to run tests using Poetry
ENTRYPOINT ["/opt/poetry/bin/poetry", "run", "pytest"]
# Set the default command to run all unit tests
CMD ["tests/"]

@ -17,7 +17,7 @@ Swarms is a modular framework that enables reliable and useful multi-agent colla
</div> </div>
## Purpose ## Vision
At Swarms, we're transforming the landscape of AI from siloed AI agents to a unified 'swarm' of intelligence. Through relentless iteration and the power of collective insight from our 1500+ Agora researchers, we're developing a groundbreaking framework for AI collaboration. Our mission is to catalyze a paradigm shift, advancing Humanity with the power of unified autonomous AI agent swarms. At Swarms, we're transforming the landscape of AI from siloed AI agents to a unified 'swarm' of intelligence. Through relentless iteration and the power of collective insight from our 1500+ Agora researchers, we're developing a groundbreaking framework for AI collaboration. Our mission is to catalyze a paradigm shift, advancing Humanity with the power of unified autonomous AI agent swarms.
----- -----

@ -1,149 +0,0 @@
import os
import asyncio
import dalle3
import discord
import responses
from invoke import Executor
from dotenv import load_dotenv
from discord.ext import commands
class Bot:
def __init__(self, agent, llm, command_prefix="!"):
load_dotenv()
intents = discord.intents.default()
intents.messages = True
intents.guilds = True
intents.voice_states = True
intents.message_content = True
# setup
self.llm = llm
self.agent = agent
self.bot = commands.bot(command_prefix="!", intents=intents)
self.discord_token = os.getenv("DISCORD_TOKEN")
self.storage_service = os.getenv("STORAGE_SERVICE")
@self.bot.event
async def on_ready():
print(f"we have logged in as {self.bot.user}")
@self.bot.command()
async def greet(ctx):
"""greets the user."""
await ctx.send(f"hello, {ctx.author.name}!")
@self.bot.command()
async def help_me(ctx):
"""provides a list of commands and their descriptions."""
help_text = """
- `!greet`: greets you.
- `!run [description]`: generates a video based on the given description.
- `!help_me`: provides this list of commands and their descriptions.
"""
await ctx.send(help_text)
@self.bot.event
async def on_command_error(ctx, error):
"""handles errors that occur while executing commands."""
if isinstance(error, commands.commandnotfound):
await ctx.send("that command does not exist!")
else:
await ctx.send(f"an error occurred: {error}")
@self.bot.command()
async def join(ctx):
"""joins the voice channel that the user is in."""
if ctx.author.voice:
channel = ctx.author.voice.channel
await channel.connect()
else:
await ctx.send("you are not in a voice channel!")
@self.bot.command()
async def leave(ctx):
"""leaves the voice channel that the self.bot is in."""
if ctx.voice_client:
await ctx.voice_client.disconnect()
else:
await ctx.send("i am not in a voice channel!")
# voice_transcription.py
@self.bot.command()
async def listen(ctx):
"""starts listening to voice in the voice channel that the bot is in."""
if ctx.voice_client:
# create a wavesink to record the audio
sink = discord.sinks.wavesink("audio.wav")
# start recording
ctx.voice_client.start_recording(sink)
await ctx.send("started listening and recording.")
else:
await ctx.send("i am not in a voice channel!")
# image_generator.py
@self.bot.command()
async def generate_image(ctx, *, prompt: str):
"""generates images based on the provided prompt"""
await ctx.send(f"generating images for prompt: `{prompt}`...")
loop = asyncio.get_event_loop()
# initialize a future object for the dalle instance
model_instance = dalle3()
future = loop.run_in_executor(Executor, model_instance.run, prompt)
try:
# wait for the dalle request to complete, with a timeout of 60 seconds
await asyncio.wait_for(future, timeout=300)
print("done generating images!")
# list all files in the save_directory
all_files = [
os.path.join(root, file)
for root, _, files in os.walk(os.environ("SAVE_DIRECTORY"))
for file in files
]
# sort files by their creation time (latest first)
sorted_files = sorted(all_files, key=os.path.getctime, reverse=True)
# get the 4 most recent files
latest_files = sorted_files[:4]
print(f"sending {len(latest_files)} images to discord...")
# send all the latest images in a single message
storage_service = os.environ(
"STORAGE_SERVICE"
) # "https://storage.googleapis.com/your-bucket-name/
await ctx.send(
files=[
storage_service.upload(filepath) for filepath in latest_files
]
)
except asyncio.timeouterror:
await ctx.send(
"the request took too long! it might have been censored or you're out of boosts. please try entering the prompt again."
)
except Exception as e:
await ctx.send(f"an error occurred: {e}")
@self.bot.command()
async def send_text(ctx, *, text: str, use_agent: bool = True):
"""sends the provided text to the worker and returns the response"""
if use_agent:
response = self.agent.run(text)
else:
response = self.llm.run(text)
await ctx.send(response)
def add_command(self, name, func):
@self.bot.command()
async def command(ctx, *args):
reponse = func(*args)
await ctx.send(responses)
def run(self):
self.bot.run("DISCORD_TOKEN")

@ -1,94 +0,0 @@
# Import required libraries
from gradio import Interface, Textbox, HTML
import threading
import os
import glob
import base64
from swarms.models import OpenAIChat
from swarms.agents import OmniModalAgent
# Function to convert image to base64
def image_to_base64(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode()
# Function to get the most recently created image in the directory
def get_latest_image():
list_of_files = glob.glob("./*.png") # Replace with your image file type
if not list_of_files:
return None
latest_file = max(list_of_files, key=os.path.getctime)
return latest_file
# Initialize your OmniModalAgent
llm = OpenAIChat(model_name="gpt-4") # Replace with your actual initialization
agent = OmniModalAgent(llm) # Replace with your actual initialization
# Global variable to store chat history
chat_history = []
# Function to update chat
def update_chat(user_input):
global chat_history
chat_history.append({"type": "user", "content": user_input})
# Get agent response
agent_response = agent.run(user_input)
# Handle the case where agent_response is not in the expected dictionary format
if not isinstance(agent_response, dict):
agent_response = {"type": "text", "content": str(agent_response)}
chat_history.append(agent_response)
# Check for the most recently created image and add it to the chat history
latest_image = get_latest_image()
if latest_image:
chat_history.append({"type": "image", "content": latest_image})
return render_chat(chat_history)
# Function to render chat as HTML
def render_chat(chat_history):
chat_str = "<div style='max-height:400px;overflow-y:scroll;'>"
for message in chat_history:
if message["type"] == "user":
chat_str += f"<p><strong>User:</strong> {message['content']}</p>"
elif message["type"] == "text":
chat_str += f"<p><strong>Agent:</strong> {message['content']}</p>"
elif message["type"] == "image":
img_path = os.path.join(".", message["content"])
base64_img = image_to_base64(img_path)
chat_str += f"<p><strong>Agent:</strong> <img src='data:image/png;base64,{base64_img}' alt='image' width='200'/></p>"
chat_str += "</div>"
return chat_str
# Define Gradio interface
iface = Interface(
fn=update_chat,
inputs=Textbox(label="Your Message", type="text"),
outputs=HTML(label="Chat History"),
live=True,
)
# Function to update the chat display
def update_display():
global chat_history
while True:
iface.update(render_chat(chat_history))
# Run the update_display function in a separate thread
threading.Thread(target=update_display).start()
# Run Gradio interface
iface.launch()

@ -1,6 +0,0 @@
ELEVEN_LABS_API_KEY = "<your_api_key>" # https://elevenlabs.io/speech-synthesis
OPENAI_API_KEY = "<your_api_key>" # https://platform.openai.com/account/api-keys
DISCORD_TOKEN="discord_token"
API_BASE="api_base"
SYSTEM_MESSAGE=""
BOT_ID="your_bot_token"

@ -1,17 +0,0 @@
# Use an official Python runtime as a parent image
FROM python:3.10
# Set the working directory in the container to /app
WORKDIR /app
# Add the current directory contents into the container at /app
ADD . /app
# Install any needed packages specified in requirements.txt
RUN pip install --no-cache-dir -r requirements.txt
# Make port 80 available to the world outside this container
EXPOSE 80
# Run DiscordInterpreter.py when the container launches
CMD ["python", "main.py"]

@ -1,124 +0,0 @@
---
title: open-interpreter
app_file: jarvis.py
sdk: gradio
sdk_version: 3.33.1
---
# Open-Sourcerer: The Code Sorcerer's Apprentice
![Sourcerer](Open-Sourcerer.jpg)
Greetings, fellow developer! Welcome to the realm of the Open-Sourcerer, your trusty assistant in the magical world of open source projects. Open-Sourcerer is here to assist you in finding, integrating, and mastering the arcane arts of open source code.
## Introduction
Open-Sourcerer is your magical companion, capable of traversing the vast landscapes of the internet, particularly GitHub, to discover open source projects that align with your desires. It can also lend you a hand in weaving these projects into your own creations.
### How Does Open-Sourcerer Work?
Open-Sourcerer operates in two phases:
1. **Discovery**: It explores the realms of GitHub to unearth repositories that resonate with your quest.
2. **Integration and Assistance**: Once you've chosen your allies (repositories), Open-Sourcerer helps you integrate them into your own codebase. It can even conjure code snippets to assist you.
## Installation
Before embarking on this mystical journey, ensure you have the following:
- Python (version X.X.X)
- Git (version X.X.X)
- Your favorite code editor (e.g., Visual Studio Code)
Now, let's summon the Open-Sourcerer:
```shell
pip install open-sourcerer
```
## Configuration
Open-Sourcerer must be attuned to your intentions. Let's configure it:
```shell
open-sourcerer configure
```
Follow the instructions to set up your preferences, such as the programming languages and search keywords that align with your project.
## MVP (Minimum Viable Potion) Tasks
1. **Prepare the Cauldron**
- [ ] Create a dedicated workspace/repository for Open-Sourcerer.
2. **Web Scrying**
- [ ] Implement web scraping to search GitHub for relevant open source projects.
3. **Submodule Conjuring**
- [ ] Develop a submodule management system to add selected GitHub repositories as submodules to your workspace.
4. **Bloop Integration**
- [ ] Integrate Open-Sourcerer with the Bloop tool (https://github.com/BloopAI/bloop).
- [ ] Implement code generation and assistance features.
5. **Version Control & Long-Term Memory**
- [ ] Set up version control for the workspace and submodules.
- [ ] Create a vector database to store project information for long-term memory.
6. **Magical Interface (Optional)**
- [ ] Create a user-friendly interface for interacting with Open-Sourcerer.
7. **Testing & Documentation**
- [ ] Ensure the reliability of Open-Sourcerer through thorough testing.
- [ ] Document the magic spells for fellow developers.
## Stretch Goals (Beyond the Sorcerer's Hat)
1. **Advanced Recommendation Alchemy**
- [ ] Enhance the recommendation algorithm using machine learning or NLP.
2. **Explore Other Realms**
- [ ] Expand Open-Sourcerer's reach to platforms like GitLab, Bitbucket, and more.
3. **Code Quality Insights**
- [ ] Add code review and quality analysis features for recommended projects.
4. **Summon a Community**
- [ ] Create a community where developers can collaborate on recommended open source projects.
5. **Editor Enchantments**
- [ ] Develop plugins/extensions for popular code editors to provide real-time assistance.
6. **Language Understanding Scrolls**
- [ ] Improve Open-Sourcerer's natural language understanding capabilities.
7. **Continuous Learning**
- [ ] Implement a mechanism for Open-Sourcerer to learn and adapt from user interactions.
8. **Security Warding**
- [ ] Add security scanning to identify vulnerabilities in recommended projects.
9. **Mobile App (Optional)**
- [ ] Create a mobile app version of Open-Sourcerer for convenience on your travels.
10. **Licensing & Compliance**
- [ ] Ensure Open-Sourcerer checks the licensing of recommended projects for legal compliance.
11. **Performance Enhancements**
- [ ] Optimize Open-Sourcerer's performance for faster results.
## How to Contribute
As we embark on this magical quest, we invite other sorcerers to join us. Feel free to contribute to Open-Sourcerer's development and help us unlock even more mystical powers.
```shell
git clone https://github.com/your-fork/open-sourcerer.git
cd open-sourcerer
# Create a virtual environment and activate it
pip install -r requirements.txt
python setup.py install
```
May your code be bug-free and your projects prosperous! The Open-Sourcerer awaits your commands.
```
Feel free to adapt and expand this README with more details, graphics, and styling to make it engaging and in line with the sorcerer theme.

@ -1,8 +0,0 @@
version: '3'
services:
my-python-app:
build: .
ports:
- "80:80"
env_file:
- ./.env

@ -1,135 +0,0 @@
import os
import discord
from discord.ext import commands
import interpreter
import dotenv
import whisper
dotenv.load_dotenv(".env")
bot_id = os.getenv("BOT_ID")
bot_token = os.getenv("DISCORD_TOKEN")
interpreter.api_key = os.getenv("OPENAI_API_KEY")
# interpreter.api_base = os.getenv("API_BASE")
# interpreter.auto_run = True
def split_text(text, chunk_size=1500):
#########################################################################
return [text[i : i + chunk_size] for i in range(0, len(text), chunk_size)]
# discord initial
intents = discord.Intents.all()
intents.message_content = True
client = commands.Bot(command_prefix="$", intents=intents)
message_chunks = []
send_image = False
model = whisper.load_model("base")
def transcribe(audio):
# load audio and pad/trim it to fit 30 seconds
audio = whisper.load_audio(audio)
audio = whisper.pad_or_trim(audio)
# make log-Mel spectrogram and move to the same device as the model
mel = whisper.log_mel_spectrogram(audio).to(model.device)
# detect the spoken language
_, probs = model.detect_language(mel)
# decode the audio
options = whisper.DecodingOptions()
result = whisper.decode(model, mel, options)
return result.text
@client.event
async def on_message(message):
await client.process_commands(message)
bot_mention = f"<@{bot_id}>"
# if ("<@1158923910855798804>" in message.content) or (message.author == client.user or message.content[0] == '$'):
# return
response = []
for chunk in interpreter.chat(message.content, display=False, stream=False):
# await message.channel.send(chunk)
if "message" in chunk:
response.append(chunk["message"])
last_response = response[-1]
max_message_length = 2000 # Discord's max message length is 2000 characters
# Splitting the message into chunks of 2000 characters
response_chunks = [
last_response[i : i + max_message_length]
for i in range(0, len(last_response), max_message_length)
]
# Sending each chunk as a separate message
for chunk in response_chunks:
await message.channel.send(chunk)
@client.command()
async def join(ctx):
if ctx.author.voice:
channel = ctx.message.author.voice.channel
print("joining..")
await channel.connect()
print("joined.")
else:
print("not in a voice channel!")
@client.command()
async def leave(ctx):
if ctx.voice_client:
await ctx.voice_client.disconnect()
else:
print("not in a voice channel!")
@client.command()
async def listen(ctx):
if ctx.voice_client:
print("trying to listen..")
ctx.voice_client.start_recording(discord.sinks.WaveSink(), callback, ctx)
print("listening..")
else:
print("not in a voice channel!")
async def callback(sink: discord.sinks, ctx):
print("in callback..")
for user_id, audio in sink.audio_data.items():
if user_id == ctx.author.id:
print("saving audio..")
audio: discord.sinks.core.AudioData = audio
print(user_id)
filename = "audio.wav"
with open(filename, "wb") as f:
f.write(audio.file.getvalue())
print("audio saved.")
transcription = transcribe(filename)
print(transcription)
response = []
for chunk in interpreter.chat(transcription, display=False, stream=True):
# await message.channel.send(chunk)
if "message" in chunk:
response.append(chunk["message"])
await ctx.message.channel.send(" ".join(response))
@client.command()
async def stop(ctx):
ctx.voice_client.stop_recording()
@client.event
async def on_ready():
print(f"We have logged in as {client.user}")
client.run(bot_token)

@ -1,6 +0,0 @@
openai-whisper
py-cord
discord
open-interpreter
elevenlabs
gradio

@ -1,97 +0,0 @@
import gradio_client as grc
import interpreter
import time
import gradio as gr
from pydub import AudioSegment
import io
from elevenlabs import generate, play, set_api_key
import dotenv
dotenv.load_dotenv(".env")
# interpreter.model = "TheBloke/Mistral-7B-OpenOrca-GGUF"
interpreter.auto_run = True
set_api_key("ELEVEN_LABS_API_KEY")
def get_audio_length(audio_bytes):
# Create a BytesIO object from the byte array
byte_io = io.BytesIO(audio_bytes)
# Load the audio data with PyDub
audio = AudioSegment.from_mp3(byte_io)
# Get the length of the audio in milliseconds
length_ms = len(audio)
# Optionally convert to seconds
length_s = length_ms / 1000.0
return length_s
def speak(text):
speaking = True
audio = generate(text=text, voice="Daniel")
play(audio, notebook=True)
audio_length = get_audio_length(audio)
time.sleep(audio_length)
# @title Text-only JARVIS
# @markdown Run this cell for a ChatGPT-like interface.
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
msg = gr.Textbox()
def user(user_message, history):
return "", history + [[user_message, None]]
def bot(history):
user_message = history[-1][0]
history[-1][1] = ""
active_block_type = ""
for chunk in interpreter.chat(user_message, stream=True, display=False):
# Message
if "message" in chunk:
if active_block_type != "message":
active_block_type = "message"
history[-1][1] += chunk["message"]
yield history
# Code
if "language" in chunk:
language = chunk["language"]
if "code" in chunk:
if active_block_type != "code":
active_block_type = "code"
history[-1][1] += f"\n```{language}\n"
history[-1][1] += chunk["code"]
yield history
# Output
if "executing" in chunk:
history[-1][1] += "\n```\n\n```text\n"
yield history
if "output" in chunk:
if chunk["output"] != "KeyboardInterrupt":
history[-1][1] += chunk["output"] + "\n"
yield history
if "end_of_execution" in chunk:
history[-1][1] = history[-1][1].strip()
history[-1][1] += "\n```\n"
yield history
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
bot, chatbot, chatbot
)
if __name__ == "__main__":
demo.queue()
demo.launch(debug=True)

@ -1,2 +0,0 @@
node_modules
.env

@ -1,3 +0,0 @@
DISCORD_TOKEN=
DISCORD_CLIENT_ID=
DISCORD_GUILD_ID=

@ -1,10 +0,0 @@
FROM node:19-slim
WORKDIR /app
COPY package.json /app
RUN npm install
COPY . .
CMD [ "node", "index.js" ]

@ -1,42 +0,0 @@
# Server-Bot
[View on Docker Hub](https://hub.docker.com/r/allenrkeen/server-bot)
### Discord bot to remotely monitor and control a docker based server. Using the docker socket.
Setup is pretty straightforward.
1. Create a new application in the *[discord developer portal](https://discord.com/developers/applications)*
2. Go to the bot section and click *Add Bot*
3. Reset Token and keep the token somewhere secure (This will be referred to as "DISCORD_TOKEN" in .env and docker environment variables)
4. Get the "Application ID" from the General Information tab of your application (This will be referred to as "DISCORD_CLIENT_ID" in .env and docker environment variables)
5. *Optional:* If you have developer mode enabled in Discord, get your server's ID by right-clicking on the server name and clicking "Copy ID" (This will be referred to as "DISCORD_GUILD_ID" in .env and docker environment variables)
- If you skip this, it will still work, but commands will be published globally instead of to your server and can take up to an hour to be available in your server.
- Using the Server ID will be more secure, making the commands available only in the specified server.
6. Run the application in your preffered method.
- Run the docker container with the provided [docker-compose.yml](docker-compose.yml) or the docker run command below.
```bash
docker run -v /var/run/docker.sock:/var/run/docker.sock --name server-bot \
-e DISCORD_TOKEN=your_token_here \
-e DISCORD_CLIENT_ID=your_client_id_here \
-e DISCORD_GUILD_ID=your_guild_id_here \
allenrkeen/server-bot:latest
```
- Clone the repo, cd into the server-bot directory and run "npm install" to install dependencies, then "npm run start" to start the server
7. The program will build an invite link with the correct permissions and put it in the logs. Click the link and confirm the server to add the bot to.
Current commands:
- /allcontainers
- provides container name and status for all containers
- /restartcontainer
- provides an autocomplete list of running containers to select from, or just type in container name then restarts the container
- /stopcontainer
- provides an autocomplete list of running containers to select from, or just type in container name then stops the container
- /startcontainer
- provides an autocomplete list of stopped containers to select from, or just type in container name then starts the container
- /ping
- Replies with "Pong!" when the bot is listening
- /server
- Replies with Server Name and member count, good for testing.

@ -1,22 +0,0 @@
/*
* This file is used to delete all commands from the Discord API.
* Only use this if you want to delete all commands and understand the consequences.
*/
require('dotenv').config();
const token = process.env.DISCORD_TOKEN;
const clientID = process.env.DISCORD_CLIENT_ID;
const guildID = process.env.DISCORD_GUILD_ID;
const { REST, Routes } = require('discord.js');
const fs = require('node:fs');
const rest = new REST({ version: '10' }).setToken(token);
rest.put(Routes.applicationCommands(clientID), { body: [] })
.then(() => console.log('Successfully deleted application (/) commands.'))
.catch(console.error);
rest.put(Routes.applicationGuildCommands(clientID, guildID), { body: [] })
.then(() => console.log('Successfully deleted guild (/) commands.'))
.catch(console.error);

@ -1,53 +0,0 @@
/*
This script pushes all commands in the commands folder to be usable in discord.
*/
require('dotenv').config();
const token = process.env.DISCORD_TOKEN;
const clientID = process.env.DISCORD_CLIENT_ID;
const guildID = process.env.DISCORD_GUILD_ID;
const { REST, Routes } = require('discord.js');
const fs = require('node:fs');
const commands = [];
// Get all commands from the commands folder
const commandFiles = fs.readdirSync('./commands').filter(file => file.endsWith('.js'));
console.log(commandFiles);
for (const file of commandFiles) {
const command = require(`../commands/${file}`);
commands.push(command.data.toJSON());
}
const rest = new REST({ version: '10' }).setToken(token);
// console.log(commands);
(async () => {
try {
const rest = new REST({ version: '10' }).setToken(token);
console.log('Started refreshing application (/) commands.');
//publish to guild if guildID is set, otherwise publish to global
if (guildID) {
const data = await rest.put(
Routes.applicationGuildCommands(clientID, guildID),
{ body: commands },
);
console.log('Successfully reloaded '+ data.length +' commands.');
} else {
const data = await rest.put(
Routes.applicationCommands(clientID),
{ body: commands },
);
console.log('Successfully reloaded '+ data.length +' commands.');
}
} catch (error) {
console.error(error);
}
})();

@ -1,39 +0,0 @@
/* A command that lists all containers with their status */
const { SlashCommandBuilder, EmbedBuilder } = require("discord.js");
const Docker = require('node-docker-api').Docker;
module.exports = {
data: new SlashCommandBuilder()
.setName("allcontainers")
.setDescription("Lists all containers"),
async execute(interaction) {
outArray = [];
interaction.reply('Listing all containers...');
//create docker client
const docker = new Docker({ socketPath: '/var/run/docker.sock' });
// get all containers
const containers = await docker.container.list({ all: true});
// create array of containers with name and status
outArray = containers.map(c => {
return {
name: c.data.Names[0].slice(1),
status: c.data.State
};
});
embedCount = Math.ceil(outArray.length / 25);
for (let i = 0; i < embedCount; i++) {
const embed = new EmbedBuilder()
.setTitle('Containers')
.addFields(outArray.slice(i * 25, (i + 1) * 25).map(e => {
return { name: e.name, value: e.status };
}))
.setColor(0x00AE86);
interaction.channel.send({ embeds: [embed] });
}
},
};

@ -1,14 +0,0 @@
/*
A ping command that replies with "Pong!" when bot is running.
*/
const { SlashCommandBuilder } = require("discord.js");
module.exports = {
data: new SlashCommandBuilder()
.setName("ping")
.setDescription("Replies with Pong!"),
async execute(interaction) {
await interaction.reply("Pong!");
},
};

@ -1,69 +0,0 @@
const { SlashCommandBuilder, EmbedBuilder } = require("discord.js");
const Docker = require('node-docker-api').Docker;
module.exports = {
data: new SlashCommandBuilder()
.setName("restartcontainer")
.setDescription("Restarts a Docker container")
.addStringOption(option =>
option.setName('container')
.setDescription('The container to restart')
.setRequired(true)
.setAutocomplete(true)),
async autocomplete(interaction) {
try {
// Create docker client
const docker = new Docker({ socketPath: '/var/run/docker.sock' });
// Get list of running containers
const containers = await docker.container.list({ all: true, filters: { status: ['running'] } });
const runningContainers = containers.map(c => c.data.Names[0].slice(1));
// Filter list of containers by focused value
const focusedValue = interaction.options.getFocused(true);
const filteredContainers = runningContainers.filter(container => container.startsWith(focusedValue.value));
//slice if more than 25
let sliced;
if (filteredContainers.length > 25) {
sliced = filteredContainers.slice(0, 25);
} else {
sliced = filteredContainers;
}
// Respond with filtered list of containers
await interaction.respond(sliced.map(container => ({ name: container, value: container })));
} catch (error) {
// Handle error
console.error(error);
await interaction.reply('An error occurred while getting the list of running containers.');
}
},
async execute(interaction) {
try {
// create docker client
const docker = new Docker({ socketPath: '/var/run/docker.sock' });
// Get container name from options
const container = interaction.options.getString('container');
// Restart container
await interaction.reply(`Restarting container "${container}"...`);
const containers = await docker.container.list({ all: true, filters: { name: [container] } });
if (containers.length === 0) {
await interaction.followUp(`Container "${container}" does not exist.`);
throw new Error(`Container "${container}" does not exist.`);
}
await containers[0].restart();
// Confirm that container was restarted
await interaction.followUp(`Container "${container}" was successfully restarted.`);
} catch (error) {
// Handle error
console.error(error);
await interaction.followUp(`An error occurred while trying to restart the container "${container}".`);
}
}
};

@ -1,10 +0,0 @@
const { SlashCommandBuilder } = require('discord.js');
module.exports = {
data: new SlashCommandBuilder()
.setName("server")
.setDescription("Replies with server name and member count."),
async execute(interaction) {
await interaction.reply(`Server name: ${interaction.guild.name}\nTotal members: ${interaction.guild.memberCount}`);
},
};

@ -1,92 +0,0 @@
const { SlashCommandBuilder, EmbedBuilder } = require("discord.js");
const Docker = require('node-docker-api').Docker;
module.exports = {
data: new SlashCommandBuilder()
.setName("startcontainer")
.setDescription("Starts a Docker container")
.addStringOption(option =>
option.setName('container')
.setDescription('The container to start')
.setRequired(true)
.setAutocomplete(true)),
async autocomplete(interaction) {
try {
// Create docker client
const docker = new Docker({ socketPath: '/var/run/docker.sock' });
// Get list of running containers
const containers = await docker.container.list({ all: true, filters: { status: ['exited'] } });
const runningContainers = containers.map(c => c.data.Names[0].slice(1));
// Filter list of containers by focused value
const focusedValue = interaction.options.getFocused(true);
const filteredContainers = runningContainers.filter(container => container.startsWith(focusedValue.value));
//slice if more than 25
let sliced;
if (filteredContainers.length > 25) {
sliced = filteredContainers.slice(0, 25);
} else {
sliced = filteredContainers;
}
// Respond with filtered list of containers
await interaction.respond(sliced.map(container => ({ name: container, value: container })));
} catch (error) {
// Handle error
console.error(error);
await interaction.reply('An error occurred while getting the list of running containers.');
}
},
async execute(interaction) {
try {
// Get container name from options
const containerName = interaction.options.getString('container');
// Start container in interactive mode
await interaction.reply(`Starting container "${containerName}" in interactive mode...`);
const container = docker.getContainer(containerName);
const info = await container.inspect();
if (!info) {
await interaction.followUp(`Container "${containerName}" does not exist.`);
throw new Error(`Container "${containerName}" does not exist.`);
}
await container.start({
AttachStdin: true,
AttachStdout: true,
AttachStderr: true,
Tty: true,
OpenStdin: true,
StdinOnce: false
});
// Attach to container's streams
const stream = await container.attach({
stream: true,
stdin: true,
stdout: true,
stderr: true
});
// Use socket.io for real-time communication with the container
io.on('connection', (socket) => {
socket.on('containerInput', (data) => {
stream.write(data + '\n'); // Send input to the container
});
stream.on('data', (data) => {
socket.emit('containerOutput', data.toString()); // Send container's output to the client
});
});
// Confirm that container was started
await interaction.followUp(`Container "${containerName}" was successfully started in interactive mode.`);
} catch (error) {
// Handle error
console.error(error);
await interaction.followUp(`An error occurred while trying to start the container "${containerName}" in interactive mode.`);
}
},
};

@ -1,68 +0,0 @@
const { SlashCommandBuilder, EmbedBuilder } = require("discord.js");
const Docker = require('node-docker-api').Docker;
module.exports = {
data: new SlashCommandBuilder()
.setName("stopcontainer")
.setDescription("Stops a Docker container")
.addStringOption(option =>
option.setName('container')
.setDescription('The container to stop')
.setRequired(true)
.setAutocomplete(true)),
async autocomplete(interaction) {
try {
// Create docker client
const docker = new Docker({ socketPath: '/var/run/docker.sock' });
// Get list of running containers
const containers = await docker.container.list({ all: true, filters: { status: ['running'] } });
const runningContainers = containers.map(c => c.data.Names[0].slice(1));
// Filter list of containers by focused value
const focusedValue = interaction.options.getFocused(true);
const filteredContainers = runningContainers.filter(container => container.startsWith(focusedValue.value));
//slice if more than 25
let sliced;
if (filteredContainers.length > 25) {
sliced = filteredContainers.slice(0, 25);
} else {
sliced = filteredContainers;
}
// Respond with filtered list of containers
await interaction.respond(sliced.map(container => ({ name: container, value: container })));
} catch (error) {
// Handle error
console.error(error);
await interaction.reply('An error occurred while getting the list of running containers.');
}
},
async execute(interaction) {
try {
// create docker client
const docker = new Docker({ socketPath: '/var/run/docker.sock' });
// Get container name from options
const container = interaction.options.getString('container');
// Restart container
await interaction.reply(`Stopping container "${container}"...`);
const containers = await docker.container.list({ all: true, filters: { name: [container] } });
if (containers.length === 0) {
await interaction.followUp(`Container "${container}" does not exist.`);
throw new Error(`Container "${container}" does not exist.`);
}
await containers[0].stop();
// Confirm that container was restarted
await interaction.followUp(`Container "${container}" was successfully stopped.`);
} catch (error) {
// Handle error
console.error(error);
await interaction.followUp(`An error occurred while trying to stop the container "${container}".`);
}
}
};

@ -1,10 +0,0 @@
version: '3'
services:
server-bot:
container_name: server-bot
image: allenrkeen/server-bot:latest
volumes:
- /var/run/docker.sock:/var/run/docker.sock #required
env_file:
- ./.env # environment:

@ -1,89 +0,0 @@
require('dotenv').config();
const fs = require('node:fs');
const path = require('node:path');
const token = process.env.DISCORD_TOKEN;
const clientID = process.env.DISCORD_CLIENT_ID;
// Require the necessary discord.js classes
const { Client, Collection, Events, GatewayIntentBits } = require('discord.js');
// Create a new client instance
const client = new Client({ intents: [GatewayIntentBits.Guilds] });
//run backend/deployCommands.js
const { exec } = require('child_process');
exec('node backend/deployCommands.js', (err, stdout, stderr) => {
if (err) {
//some err occurred
console.error(err);
} else {
// print complete output
console.log(stdout);
}
});
// When the client is ready, run this code
client.once(Events.ClientReady, c => {
console.log(`Ready! Logged in as ${c.user.tag}`);
});
// Log in to Discord with your client's token
client.login(token);
// Create a new collection for commands
client.commands = new Collection();
const commandsPath = path.join(__dirname, 'commands');
const commandFiles = fs.readdirSync(commandsPath).filter(file => file.endsWith('.js'));
for (const file of commandFiles) {
const filePath = path.join(commandsPath, file);
const command = require(filePath);
// Set a new item in the Collection with the key as the name of the command and the value as the exported module
if ('data' in command && 'execute' in command) {
client.commands.set(command.data.name, command);
} else {
console.log(`Command ${file} is missing 'data' or 'execute'`);
}
}
//build and display invite link
const inviteLink = 'https://discord.com/oauth2/authorize?client_id='+clientID+'&permissions=2147534912&scope=bot%20applications.commands';
console.log(`Invite link: ${inviteLink}`);
// execute on slash command
client.on(Events.InteractionCreate, async interaction => {
if (interaction.isChatInputCommand()) {
const command = client.commands.get(interaction.commandName);
if (!command) {
console.error('No command matching ${interaction.commandName} was found.');
return;
}
try {
await command.execute(interaction);
} catch (error) {
console.error(error);
// await interaction.reply({ content: 'There was an error while executing this command!', ephemeral: true });
}
} else if (interaction.isAutocomplete()) {
const command = client.commands.get(interaction.commandName);
if (!command) {
console.error('No command matching ${interaction.commandName} was found.');
return;
}
try {
await command.autocomplete(interaction);
} catch (error) {
console.error(error);
// await interaction.({ content: 'There was an error while executing this command!', ephemeral: true });
}
}
});

@ -1,723 +0,0 @@
{
"name": "server-bot",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "server-bot",
"version": "1.0.0",
"license": "MIT",
"dependencies": {
"discord.js": "^14.7.1",
"dockerode": "^3.3.4",
"dotenv": "^16.0.3",
"node-docker-api": "^1.1.22"
}
},
"node_modules/@balena/dockerignore": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@balena/dockerignore/-/dockerignore-1.0.2.tgz",
"integrity": "sha512-wMue2Sy4GAVTk6Ic4tJVcnfdau+gx2EnG7S+uAEe+TWJFqE4YoWN4/H8MSLj4eYJKxGg26lZwboEniNiNwZQ6Q=="
},
"node_modules/@discordjs/builders": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/@discordjs/builders/-/builders-1.4.0.tgz",
"integrity": "sha512-nEeTCheTTDw5kO93faM1j8ZJPonAX86qpq/QVoznnSa8WWcCgJpjlu6GylfINTDW6o7zZY0my2SYdxx2mfNwGA==",
"dependencies": {
"@discordjs/util": "^0.1.0",
"@sapphire/shapeshift": "^3.7.1",
"discord-api-types": "^0.37.20",
"fast-deep-equal": "^3.1.3",
"ts-mixer": "^6.0.2",
"tslib": "^2.4.1"
},
"engines": {
"node": ">=16.9.0"
}
},
"node_modules/@discordjs/collection": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/@discordjs/collection/-/collection-1.3.0.tgz",
"integrity": "sha512-ylt2NyZ77bJbRij4h9u/wVy7qYw/aDqQLWnadjvDqW/WoWCxrsX6M3CIw9GVP5xcGCDxsrKj5e0r5evuFYwrKg==",
"engines": {
"node": ">=16.9.0"
}
},
"node_modules/@discordjs/rest": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/@discordjs/rest/-/rest-1.5.0.tgz",
"integrity": "sha512-lXgNFqHnbmzp5u81W0+frdXN6Etf4EUi8FAPcWpSykKd8hmlWh1xy6BmE0bsJypU1pxohaA8lQCgp70NUI3uzA==",
"dependencies": {
"@discordjs/collection": "^1.3.0",
"@discordjs/util": "^0.1.0",
"@sapphire/async-queue": "^1.5.0",
"@sapphire/snowflake": "^3.2.2",
"discord-api-types": "^0.37.23",
"file-type": "^18.0.0",
"tslib": "^2.4.1",
"undici": "^5.13.0"
},
"engines": {
"node": ">=16.9.0"
}
},
"node_modules/@discordjs/util": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/@discordjs/util/-/util-0.1.0.tgz",
"integrity": "sha512-e7d+PaTLVQav6rOc2tojh2y6FE8S7REkqLldq1XF4soCx74XB/DIjbVbVLtBemf0nLW77ntz0v+o5DytKwFNLQ==",
"engines": {
"node": ">=16.9.0"
}
},
"node_modules/@sapphire/async-queue": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/@sapphire/async-queue/-/async-queue-1.5.0.tgz",
"integrity": "sha512-JkLdIsP8fPAdh9ZZjrbHWR/+mZj0wvKS5ICibcLrRI1j84UmLMshx5n9QmL8b95d4onJ2xxiyugTgSAX7AalmA==",
"engines": {
"node": ">=v14.0.0",
"npm": ">=7.0.0"
}
},
"node_modules/@sapphire/shapeshift": {
"version": "3.8.1",
"resolved": "https://registry.npmjs.org/@sapphire/shapeshift/-/shapeshift-3.8.1.tgz",
"integrity": "sha512-xG1oXXBhCjPKbxrRTlox9ddaZTvVpOhYLmKmApD/vIWOV1xEYXnpoFs68zHIZBGbqztq6FrUPNPerIrO1Hqeaw==",
"dependencies": {
"fast-deep-equal": "^3.1.3",
"lodash": "^4.17.21"
},
"engines": {
"node": ">=v14.0.0",
"npm": ">=7.0.0"
}
},
"node_modules/@sapphire/snowflake": {
"version": "3.4.0",
"resolved": "https://registry.npmjs.org/@sapphire/snowflake/-/snowflake-3.4.0.tgz",
"integrity": "sha512-zZxymtVO6zeXVMPds+6d7gv/OfnCc25M1Z+7ZLB0oPmeMTPeRWVPQSS16oDJy5ZsyCOLj7M6mbZml5gWXcVRNw==",
"engines": {
"node": ">=v14.0.0",
"npm": ">=7.0.0"
}
},
"node_modules/@tokenizer/token": {
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/@tokenizer/token/-/token-0.3.0.tgz",
"integrity": "sha512-OvjF+z51L3ov0OyAU0duzsYuvO01PH7x4t6DJx+guahgTnBHkhJdG7soQeTSFLWN3efnHyibZ4Z8l2EuWwJN3A=="
},
"node_modules/@types/node": {
"version": "18.11.18",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.11.18.tgz",
"integrity": "sha512-DHQpWGjyQKSHj3ebjFI/wRKcqQcdR+MoFBygntYOZytCqNfkd2ZC4ARDJ2DQqhjH5p85Nnd3jhUJIXrszFX/JA=="
},
"node_modules/@types/ws": {
"version": "8.5.3",
"resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.3.tgz",
"integrity": "sha512-6YOoWjruKj1uLf3INHH7D3qTXwFfEsg1kf3c0uDdSBJwfa/llkwIjrAGV7j7mVgGNbzTQ3HiHKKDXl6bJPD97w==",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/asn1": {
"version": "0.2.6",
"resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz",
"integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==",
"dependencies": {
"safer-buffer": "~2.1.0"
}
},
"node_modules/base64-js": {
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
"integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/bcrypt-pbkdf": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz",
"integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==",
"dependencies": {
"tweetnacl": "^0.14.3"
}
},
"node_modules/bl": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz",
"integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==",
"dependencies": {
"buffer": "^5.5.0",
"inherits": "^2.0.4",
"readable-stream": "^3.4.0"
}
},
"node_modules/buffer": {
"version": "5.7.1",
"resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz",
"integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"dependencies": {
"base64-js": "^1.3.1",
"ieee754": "^1.1.13"
}
},
"node_modules/buildcheck": {
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/buildcheck/-/buildcheck-0.0.3.tgz",
"integrity": "sha512-pziaA+p/wdVImfcbsZLNF32EiWyujlQLwolMqUQE8xpKNOH7KmZQaY8sXN7DGOEzPAElo9QTaeNRfGnf3iOJbA==",
"optional": true,
"engines": {
"node": ">=10.0.0"
}
},
"node_modules/busboy": {
"version": "1.6.0",
"resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz",
"integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==",
"dependencies": {
"streamsearch": "^1.1.0"
},
"engines": {
"node": ">=10.16.0"
}
},
"node_modules/chownr": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz",
"integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg=="
},
"node_modules/core-util-is": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz",
"integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ=="
},
"node_modules/cpu-features": {
"version": "0.0.4",
"resolved": "https://registry.npmjs.org/cpu-features/-/cpu-features-0.0.4.tgz",
"integrity": "sha512-fKiZ/zp1mUwQbnzb9IghXtHtDoTMtNeb8oYGx6kX2SYfhnG0HNdBEBIzB9b5KlXu5DQPhfy3mInbBxFcgwAr3A==",
"hasInstallScript": true,
"optional": true,
"dependencies": {
"buildcheck": "0.0.3",
"nan": "^2.15.0"
},
"engines": {
"node": ">=10.0.0"
}
},
"node_modules/debug": {
"version": "4.3.4",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
"integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
"dependencies": {
"ms": "2.1.2"
},
"engines": {
"node": ">=6.0"
},
"peerDependenciesMeta": {
"supports-color": {
"optional": true
}
}
},
"node_modules/discord-api-types": {
"version": "0.37.24",
"resolved": "https://registry.npmjs.org/discord-api-types/-/discord-api-types-0.37.24.tgz",
"integrity": "sha512-1+Fb4huJCihdbkJLcq2p7nBmtlmAryNwjefT8wwJnL8c7bc7WA87Oaa5mbLe96QvZyfwnwRCDX40H0HhcVV50g=="
},
"node_modules/discord.js": {
"version": "14.7.1",
"resolved": "https://registry.npmjs.org/discord.js/-/discord.js-14.7.1.tgz",
"integrity": "sha512-1FECvqJJjjeYcjSm0IGMnPxLqja/pmG1B0W2l3lUY2Gi4KXiyTeQmU1IxWcbXHn2k+ytP587mMWqva2IA87EbA==",
"dependencies": {
"@discordjs/builders": "^1.4.0",
"@discordjs/collection": "^1.3.0",
"@discordjs/rest": "^1.4.0",
"@discordjs/util": "^0.1.0",
"@sapphire/snowflake": "^3.2.2",
"@types/ws": "^8.5.3",
"discord-api-types": "^0.37.20",
"fast-deep-equal": "^3.1.3",
"lodash.snakecase": "^4.1.1",
"tslib": "^2.4.1",
"undici": "^5.13.0",
"ws": "^8.11.0"
},
"engines": {
"node": ">=16.9.0"
}
},
"node_modules/docker-modem": {
"version": "3.0.6",
"resolved": "https://registry.npmjs.org/docker-modem/-/docker-modem-3.0.6.tgz",
"integrity": "sha512-h0Ow21gclbYsZ3mkHDfsYNDqtRhXS8fXr51bU0qr1dxgTMJj0XufbzX+jhNOvA8KuEEzn6JbvLVhXyv+fny9Uw==",
"dependencies": {
"debug": "^4.1.1",
"readable-stream": "^3.5.0",
"split-ca": "^1.0.1",
"ssh2": "^1.11.0"
},
"engines": {
"node": ">= 8.0"
}
},
"node_modules/dockerode": {
"version": "3.3.4",
"resolved": "https://registry.npmjs.org/dockerode/-/dockerode-3.3.4.tgz",
"integrity": "sha512-3EUwuXnCU+RUlQEheDjmBE0B7q66PV9Rw5NiH1sXwINq0M9c5ERP9fxgkw36ZHOtzf4AGEEYySnkx/sACC9EgQ==",
"dependencies": {
"@balena/dockerignore": "^1.0.2",
"docker-modem": "^3.0.0",
"tar-fs": "~2.0.1"
},
"engines": {
"node": ">= 8.0"
}
},
"node_modules/dotenv": {
"version": "16.0.3",
"resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.0.3.tgz",
"integrity": "sha512-7GO6HghkA5fYG9TYnNxi14/7K9f5occMlp3zXAuSxn7CKCxt9xbNWG7yF8hTCSUchlfWSe3uLmlPfigevRItzQ==",
"engines": {
"node": ">=12"
}
},
"node_modules/end-of-stream": {
"version": "1.4.4",
"resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz",
"integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==",
"dependencies": {
"once": "^1.4.0"
}
},
"node_modules/fast-deep-equal": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="
},
"node_modules/file-type": {
"version": "18.0.0",
"resolved": "https://registry.npmjs.org/file-type/-/file-type-18.0.0.tgz",
"integrity": "sha512-jjMwFpnW8PKofLE/4ohlhqwDk5k0NC6iy0UHAJFKoY1fQeGMN0GDdLgHQrvCbSpMwbqzoCZhRI5dETCZna5qVA==",
"dependencies": {
"readable-web-to-node-stream": "^3.0.2",
"strtok3": "^7.0.0",
"token-types": "^5.0.1"
},
"engines": {
"node": ">=14.16"
},
"funding": {
"url": "https://github.com/sindresorhus/file-type?sponsor=1"
}
},
"node_modules/fs-constants": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz",
"integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow=="
},
"node_modules/ieee754": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz",
"integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
},
"node_modules/isarray": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
"integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ=="
},
"node_modules/jsonparse": {
"version": "0.0.5",
"resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-0.0.5.tgz",
"integrity": "sha512-fw7Q/8gFR8iSekUi9I+HqWIap6mywuoe7hQIg3buTVjuZgALKj4HAmm0X6f+TaL4c9NJbvyFQdaI2ppr5p6dnQ==",
"engines": [
"node >= 0.2.0"
]
},
"node_modules/JSONStream": {
"version": "0.10.0",
"resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-0.10.0.tgz",
"integrity": "sha512-8XbSFFd43EG+1thjLNFIzCBlwXti0yKa7L+ak/f0T/pkC+31b7G41DXL/JzYpAoYWZ2eCPiu4IIqzijM8N0a/w==",
"dependencies": {
"jsonparse": "0.0.5",
"through": ">=2.2.7 <3"
},
"bin": {
"JSONStream": "index.js"
},
"engines": {
"node": "*"
}
},
"node_modules/lodash": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
"integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="
},
"node_modules/lodash.snakecase": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/lodash.snakecase/-/lodash.snakecase-4.1.1.tgz",
"integrity": "sha512-QZ1d4xoBHYUeuouhEq3lk3Uq7ldgyFXGBhg04+oRLnIz8o9T65Eh+8YdroUwn846zchkA9yDsDl5CVVaV2nqYw=="
},
"node_modules/memorystream": {
"version": "0.3.1",
"resolved": "https://registry.npmjs.org/memorystream/-/memorystream-0.3.1.tgz",
"integrity": "sha512-S3UwM3yj5mtUSEfP41UZmt/0SCoVYUcU1rkXv+BQ5Ig8ndL4sPoJNBUJERafdPb5jjHJGuMgytgKvKIf58XNBw==",
"engines": {
"node": ">= 0.10.0"
}
},
"node_modules/mkdirp-classic": {
"version": "0.5.3",
"resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz",
"integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A=="
},
"node_modules/ms": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
},
"node_modules/nan": {
"version": "2.17.0",
"resolved": "https://registry.npmjs.org/nan/-/nan-2.17.0.tgz",
"integrity": "sha512-2ZTgtl0nJsO0KQCjEpxcIr5D+Yv90plTitZt9JBfQvVJDS5seMl3FOvsh3+9CoYWXf/1l5OaZzzF6nDm4cagaQ==",
"optional": true
},
"node_modules/node-docker-api": {
"version": "1.1.22",
"resolved": "https://registry.npmjs.org/node-docker-api/-/node-docker-api-1.1.22.tgz",
"integrity": "sha512-8xfOiuLDJQw+l58i66lUNQhRhS5fAExqQbLolmyqMucrsDON7k7eLMIHphcBwwB7utwCHCQkcp73gSAmzSiAiw==",
"dependencies": {
"docker-modem": "^0.3.1",
"memorystream": "^0.3.1"
}
},
"node_modules/node-docker-api/node_modules/debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"dependencies": {
"ms": "2.0.0"
}
},
"node_modules/node-docker-api/node_modules/docker-modem": {
"version": "0.3.7",
"resolved": "https://registry.npmjs.org/docker-modem/-/docker-modem-0.3.7.tgz",
"integrity": "sha512-4Xn4ZVtc/2DEFtxY04lOVeF7yvxwXGVo0sN8FKRBnLhBcwQ78Hb56j+Z5yAXXUhoweVhzGeBeGWahS+af0/mcg==",
"dependencies": {
"debug": "^2.6.0",
"JSONStream": "0.10.0",
"readable-stream": "~1.0.26-4",
"split-ca": "^1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/node-docker-api/node_modules/ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
},
"node_modules/node-docker-api/node_modules/readable-stream": {
"version": "1.0.34",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.0.34.tgz",
"integrity": "sha512-ok1qVCJuRkNmvebYikljxJA/UEsKwLl2nI1OmaqAu4/UE+h0wKCHok4XkL/gvi39OacXvw59RJUOFUkDib2rHg==",
"dependencies": {
"core-util-is": "~1.0.0",
"inherits": "~2.0.1",
"isarray": "0.0.1",
"string_decoder": "~0.10.x"
}
},
"node_modules/node-docker-api/node_modules/string_decoder": {
"version": "0.10.31",
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz",
"integrity": "sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ=="
},
"node_modules/once": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
"dependencies": {
"wrappy": "1"
}
},
"node_modules/peek-readable": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/peek-readable/-/peek-readable-5.0.0.tgz",
"integrity": "sha512-YtCKvLUOvwtMGmrniQPdO7MwPjgkFBtFIrmfSbYmYuq3tKDV/mcfAhBth1+C3ru7uXIZasc/pHnb+YDYNkkj4A==",
"engines": {
"node": ">=14.16"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/Borewit"
}
},
"node_modules/pump": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz",
"integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
"dependencies": {
"end-of-stream": "^1.1.0",
"once": "^1.3.1"
}
},
"node_modules/readable-stream": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz",
"integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==",
"dependencies": {
"inherits": "^2.0.3",
"string_decoder": "^1.1.1",
"util-deprecate": "^1.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/readable-web-to-node-stream": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/readable-web-to-node-stream/-/readable-web-to-node-stream-3.0.2.tgz",
"integrity": "sha512-ePeK6cc1EcKLEhJFt/AebMCLL+GgSKhuygrZ/GLaKZYEecIgIECf4UaUuaByiGtzckwR4ain9VzUh95T1exYGw==",
"dependencies": {
"readable-stream": "^3.6.0"
},
"engines": {
"node": ">=8"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/Borewit"
}
},
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/safer-buffer": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
},
"node_modules/split-ca": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/split-ca/-/split-ca-1.0.1.tgz",
"integrity": "sha512-Q5thBSxp5t8WPTTJQS59LrGqOZqOsrhDGDVm8azCqIBjSBd7nd9o2PM+mDulQQkh8h//4U6hFZnc/mul8t5pWQ=="
},
"node_modules/ssh2": {
"version": "1.11.0",
"resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.11.0.tgz",
"integrity": "sha512-nfg0wZWGSsfUe/IBJkXVll3PEZ//YH2guww+mP88gTpuSU4FtZN7zu9JoeTGOyCNx2dTDtT9fOpWwlzyj4uOOw==",
"hasInstallScript": true,
"dependencies": {
"asn1": "^0.2.4",
"bcrypt-pbkdf": "^1.0.2"
},
"engines": {
"node": ">=10.16.0"
},
"optionalDependencies": {
"cpu-features": "~0.0.4",
"nan": "^2.16.0"
}
},
"node_modules/streamsearch": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz",
"integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==",
"engines": {
"node": ">=10.0.0"
}
},
"node_modules/string_decoder": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz",
"integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==",
"dependencies": {
"safe-buffer": "~5.2.0"
}
},
"node_modules/strtok3": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/strtok3/-/strtok3-7.0.0.tgz",
"integrity": "sha512-pQ+V+nYQdC5H3Q7qBZAz/MO6lwGhoC2gOAjuouGf/VO0m7vQRh8QNMl2Uf6SwAtzZ9bOw3UIeBukEGNJl5dtXQ==",
"dependencies": {
"@tokenizer/token": "^0.3.0",
"peek-readable": "^5.0.0"
},
"engines": {
"node": ">=14.16"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/Borewit"
}
},
"node_modules/tar-fs": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.0.1.tgz",
"integrity": "sha512-6tzWDMeroL87uF/+lin46k+Q+46rAJ0SyPGz7OW7wTgblI273hsBqk2C1j0/xNadNLKDTUL9BukSjB7cwgmlPA==",
"dependencies": {
"chownr": "^1.1.1",
"mkdirp-classic": "^0.5.2",
"pump": "^3.0.0",
"tar-stream": "^2.0.0"
}
},
"node_modules/tar-stream": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz",
"integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==",
"dependencies": {
"bl": "^4.0.3",
"end-of-stream": "^1.4.1",
"fs-constants": "^1.0.0",
"inherits": "^2.0.3",
"readable-stream": "^3.1.1"
},
"engines": {
"node": ">=6"
}
},
"node_modules/through": {
"version": "2.3.8",
"resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz",
"integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg=="
},
"node_modules/token-types": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/token-types/-/token-types-5.0.1.tgz",
"integrity": "sha512-Y2fmSnZjQdDb9W4w4r1tswlMHylzWIeOKpx0aZH9BgGtACHhrk3OkT52AzwcuqTRBZtvvnTjDBh8eynMulu8Vg==",
"dependencies": {
"@tokenizer/token": "^0.3.0",
"ieee754": "^1.2.1"
},
"engines": {
"node": ">=14.16"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/Borewit"
}
},
"node_modules/ts-mixer": {
"version": "6.0.2",
"resolved": "https://registry.npmjs.org/ts-mixer/-/ts-mixer-6.0.2.tgz",
"integrity": "sha512-zvHx3VM83m2WYCE8XL99uaM7mFwYSkjR2OZti98fabHrwkjsCvgwChda5xctein3xGOyaQhtTeDq/1H/GNvF3A=="
},
"node_modules/tslib": {
"version": "2.4.1",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.4.1.tgz",
"integrity": "sha512-tGyy4dAjRIEwI7BzsB0lynWgOpfqjUdq91XXAlIWD2OwKBH7oCl/GZG/HT4BOHrTlPMOASlMQ7veyTqpmRcrNA=="
},
"node_modules/tweetnacl": {
"version": "0.14.5",
"resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz",
"integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA=="
},
"node_modules/undici": {
"version": "5.14.0",
"resolved": "https://registry.npmjs.org/undici/-/undici-5.14.0.tgz",
"integrity": "sha512-yJlHYw6yXPPsuOH0x2Ib1Km61vu4hLiRRQoafs+WUgX1vO64vgnxiCEN9dpIrhZyHFsai3F0AEj4P9zy19enEQ==",
"dependencies": {
"busboy": "^1.6.0"
},
"engines": {
"node": ">=12.18"
}
},
"node_modules/util-deprecate": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
"integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="
},
"node_modules/wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="
},
"node_modules/ws": {
"version": "8.11.0",
"resolved": "https://registry.npmjs.org/ws/-/ws-8.11.0.tgz",
"integrity": "sha512-HPG3wQd9sNQoT9xHyNCXoDUa+Xw/VevmY9FoHyQ+g+rrMn4j6FB4np7Z0OhdTgjx6MgQLK7jwSy1YecU1+4Asg==",
"engines": {
"node": ">=10.0.0"
},
"peerDependencies": {
"bufferutil": "^4.0.1",
"utf-8-validate": "^5.0.2"
},
"peerDependenciesMeta": {
"bufferutil": {
"optional": true
},
"utf-8-validate": {
"optional": true
}
}
}
}
}

@ -1,30 +0,0 @@
{
"name": "server-bot",
"version": "1.0.0",
"description": "Discord bot to remotely monitor and control a docker based server",
"main": "index.js",
"scripts": {
"start": "nodemon index.js"
},
"repository": {
"type": "git",
"url": "git+https://github.com/allenrkeen/server-bot.git"
},
"keywords": [
"discord",
"docker",
"linux",
"selfhost"
],
"author": "allenrkeen",
"license": "MIT",
"bugs": {
"url": "https://github.com/allenrkeen/server-bot/issues"
},
"homepage": "https://github.com/allenrkeen/server-bot#readme",
"dependencies": {
"discord.js": "^14.7.1",
"dotenv": "^16.0.3",
"node-docker-api": "^1.1.22"
}
}

@ -1,72 +0,0 @@
"""
Paper Swarm
1. Scrape https://huggingface.co/papers for all papers, by search for all links on the paper with a /papers/, then clicks, gets the header, and then the abstract.
and various links and then adds them to a txt file for each paper on https://huggingface.co/papers
2. Feed prompts iteratively into Anthropic for summarizations + value score on impact, reliability, and novel, and other paper ranking mechanisms
3. Store papers in a database with metadata. Agents can use retrieval
4. Discord Bot // Twitter Bot
"""
import requests
from bs4 import BeautifulSoup
import os
class Paper:
def __init__(self, title, date, authors, abstract):
self.title = title
self.date = date
self.authors = authors
self.abstract = abstract
class Scraper:
def __init__(self, url):
self.url = url
def get_paper_links(self):
response = requests.get(self.url)
soup = BeautifulSoup(response.text, "html.parser")
links = [
a["href"] for a in soup.find_all("a", href=True) if "/papers/" in a["href"]
]
return links
def get_paper_details(self, link):
response = requests.get(self.url + link)
soup = BeautifulSoup(response.text, "html.parser")
title = soup.find("h1").text
date_tag = soup.find("time")
date = date_tag.text if date_tag else "Unknown"
authors = [author.text for author in soup.find_all("span", class_="author")]
abstract_tag = soup.find("div", class_="abstract")
abstract = abstract_tag.text if abstract_tag else "Abstract not found"
return Paper(title, date, authors, abstract)
class FileWriter:
def __init__(self, directory):
self.directory = directory
def write_paper(self, paper):
with open(os.path.join(self.directory, paper.title + ".txt"), "w") as f:
f.write(f"h1: {paper.title}\n")
f.write(f"Published on {paper.date}\n")
f.write("Authors:\n")
for author in paper.authors:
f.write(f"{author}\n")
f.write("Abstract\n")
f.write(paper.abstract)
scraper = Scraper("https://huggingface.co/papers")
file_writer = FileWriter("images")
links = scraper.get_paper_links()
for link in links:
paper = scraper.get_paper_details(link)
file_writer.write_paper(paper)

@ -1,29 +0,0 @@
To count tokens you can use Swarms events and the `TokenCounter` util:
```python
from swarms import utils
from swarms.events import (
StartPromptEvent, FinishPromptEvent,
)
from swarms.structures import Agent
token_counter = utils.TokenCounter()
agent = Agent(
event_listeners={
StartPromptEvent: [
lambda e: token_counter.add_tokens(e.token_count)
],
FinishPromptEvent: [
lambda e: token_counter.add_tokens(e.token_count)
],
}
)
agent.run("tell me about large language models")
agent.run("tell me about GPT")
print(f"total tokens: {token_counter.tokens}")
```

@ -0,0 +1,117 @@
# `MPT7B`
==============================================
The `MPT7B` class is a powerful tool for generating text using pre-trained models. It leverages the `transformers` library from Hugging Face to load models and tokenizers, and to perform the text generation. The class is designed to be flexible and easy to use, with a variety of methods for generating text both synchronously and asynchronously.
## Class Definition
----------------
```
class MPT7B:
def __init__(self, model_name: str, tokenizer_name: str, max_tokens: int = 100)
def run(self, task: str, *args, **kwargs) -> str
async def run_async(self, task: str, *args, **kwargs) -> str
def generate(self, prompt: str) -> str
async def generate_async(self, prompt: str) -> str
def __call__(self, task: str, *args, **kwargs) -> str
async def __call_async__(self, task: str, *args, **kwargs) -> str
def batch_generate(self, prompts: list, temperature: float = 1.0) -> list
def unfreeze_model(self)
```
## Class Parameters
----------------
| Parameter | Type | Description |
| --- | --- | --- |
| `model_name` | str | Name of the pre-trained model to use. |
| `tokenizer_name` | str | Name of the tokenizer to use. |
| `max_tokens` | int | Maximum number of tokens to generate. Default is 100. |
## Class Methods
-------------
| Method | Returns | Description |
| --- | --- | --- |
| `run(task: str, *args, **kwargs)` | str | Run the model with the specified task and arguments. |
| `run_async(task: str, *args, **kwargs)` | str | Run the model asynchronously with the specified task and arguments. |
| `generate(prompt: str)` | str | Generate text from the given prompt. |
| `generate_async(prompt: str)` | str | Generate text asynchronously from the given prompt. |
| `__call__(task: str, *args, **kwargs)` | str | Call the model with the specified task and arguments. |
| `__call_async__(task: str, *args, **kwargs)` | str | Call the model asynchronously with the specified task and arguments. |
| `batch_generate(prompts: list, temperature: float = 1.0)` | list | Generate text for a batch of prompts. |
| `unfreeze_model()` | None | Unfreeze the model for fine-tuning. |
## Usage Examples
--------------
### Example 1: Basic Text Generation
```python
from swarms.models import MPT7B
# Initialize the MPT7B class
mpt = MPT7B('mosaicml/mpt-7b-storywriter', 'EleutherAI/gpt-neox-20b', max_tokens=150)
# Generate text
output = mpt.run('generate', 'Once upon a time in a land far, far away...')
print(output)
```
### Example 2: Batch Text Generation
```pyton
from swarms.models import MPT7B
# Initialize the MPT7B class
mpt = MPT7B('mosaicml/mpt-7b-storywriter', 'EleutherAI/gpt-neox-20b', max_tokens=150)
# Generate text for a batch of prompts
prompts = ['In the deep jungles,', 'At the heart of the city,']
outputs = mpt.batch_generate(prompts, temperature=0.7)
print(outputs)
```
### Example 3: Asynchronous Text Generation
```python
import asyncio
from swarms.models import MPT7B
# Initialize the MPT7B class
mpt = MPT7B('mosaicml/mpt-7b-storywriter', 'EleutherAI/gpt-neox-20b', max_tokens=150)
# Generate text asynchronously
output = asyncio.run(mpt.run_async('generate', 'Once upon a time in a land far, far away...'))
print(output)
```
## Additional Information
----------------------------------
The `batch_generate` method allows for generating text for multiple prompts at once. This can be more efficient than generating text for each prompt individually, especially when working with a large number of prompts.
The `unfreeze_model` method is used to unfreeze the model for fine-tuning. By default, the model parameters are frozen to prevent them from being updated during training. Unfreezing the model allows the parameters to be updated, which can be useful for fine-tuning the model on a specific task.
The `__call__` and `__call_async__` methods are convenience methods that allow the class instance to be called like a function. They simply call the `run` and `run_async` methods, respectively.
## Architecture and Working
------------------------
The `MPT7B` class is designed to be a simple and flexible interface for text generation with pre-trained models. It encapsulates the complexity of loading models and tokenizers, setting up the text generation pipeline, and generating text.
The class uses the `AutoModelForCausalLM` and `AutoTokenizer` classes from the `transformers` library to load the pre-trained model and tokenizer. The `pipeline` function is used to create a text generation pipeline with the loaded model and tokenizer. This pipeline is used to generate text from prompts.
The `run` and `run_async` methods are the main entry points for using the class. They accept a task name and arbitrary arguments, and call the appropriate method based on the task name. The `generate` and `generate_async` methods perform the actual text generation.
The `batch_generate` method allows for generating text for multiple prompts at once. This can be more efficient than generating text for each prompt individually, especially when working with a large number of prompts.
The `unfreeze_model` method is used to unfreeze the model for fine-tuning. By default, the model parameters are frozen to prevent them from being updated during training. Unfreezing the model allows the parameters to be updated, which can be useful for fine-tuning the model on a specific task.
The `__call__` and `__call_async__` methods are convenience methods that allow the class instance to be called like a function. They simply call the `run` and `run_async` methods, respectively.
## Conclusion
----------
The `MPT7B` class provides a powerful and flexible interface for text generation with pre-trained models. It encapsulates the complexity of loading models and tokenizers, setting up the text generation pipeline, and generating text, making it easy to generate high-quality text with just a few lines of code. Whether you're generating text for a single prompt, a batch of prompts, or fine-tuning the model on a specific task, the `MPT7B` class has you covered.

@ -19,6 +19,6 @@ node = Worker(
temperature=0.5, temperature=0.5,
) )
task = "Create an entirely new board game around riddles for physics" task = "Locate 5 trending topics on healthy living, locate a website like NYTimes, and then generate an image of people doing those topics."
response = node.run(task) response = node.run(task)
print(response) print(response)

@ -0,0 +1,27 @@
from swarms.models import OpenAIChat
from swarms.structs import Flow
api_key = ""
# Initialize the language model,
# This model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC
llm = OpenAIChat(
openai_api_key=api_key,
temperature=0.5,
max_tokens=100,
)
# Initialize the flow
flow = Flow(
llm=llm,
max_loops=5,
# system_prompt=SYSTEM_PROMPT,
# retry_interval=1,
)
out = flow.run("Generate a 10,000 word blog, say Stop when done")
print(out)
# # Now save the flow
# flow.save("flow.yaml")

@ -97,6 +97,7 @@ nav:
- OpenAI: "swarms/models/openai.md" - OpenAI: "swarms/models/openai.md"
- Zephyr: "swarms/models/zephyr.md" - Zephyr: "swarms/models/zephyr.md"
- BioGPT: "swarms/models/biogpt.md" - BioGPT: "swarms/models/biogpt.md"
- MPT7B: "swarms/models/mpt.md"
- MultiModal: - MultiModal:
- Fuyu: "swarms/models/fuyu.md" - Fuyu: "swarms/models/fuyu.md"
- Vilt: "swarms/models/vilt.md" - Vilt: "swarms/models/vilt.md"
@ -121,7 +122,6 @@ nav:
- Worker: - Worker:
- Basic: "examples/worker.md" - Basic: "examples/worker.md"
- StackedWorker: "examples/stacked_worker.md" - StackedWorker: "examples/stacked_worker.md"
- Applications: - Applications:
- CustomerSupport: - CustomerSupport:
- Overview: "applications/customer_support.md" - Overview: "applications/customer_support.md"

@ -1,227 +0,0 @@
*****TASK LIST*****
1: Make a todo list
*****NEXT TASK*****
1: Make a todo list
message='Request to OpenAI API' method=post path=https://api.openai.com/v1/engines/text-embedding-ada-002/embeddings
api_version=None data='{"input": ["\\nPlease make a web GUI for using HTTP API server. \\nThe name of it is Swarms. \\nYou can check the server code at ./main.py. \\nThe server is served on localhost:8000. \\nUsers should be able to write text input as \'query\' and url array as \'files\', and check the response. \\nUsers input form should be delivered in JSON format. \\nI want it to have neumorphism-style. Serve it on port 4500.\\n\\n"], "encoding_format": "base64"}' message='Post details'
Converted retries value: 2 -> Retry(total=2, connect=None, read=None, redirect=None, status=None)
Starting new HTTPS connection (1): api.openai.com:443
https://api.openai.com:443 "POST /v1/engines/text-embedding-ada-002/embeddings HTTP/1.1" 200 None
message='OpenAI API response' path=https://api.openai.com/v1/engines/text-embedding-ada-002/embeddings processing_ms=168 request_id=6b1f8e81a95d5f4ec48a65a2b0bc7a29 response_code=200
> Entering new chain...
message='Request to OpenAI API' method=post path=https://api.openai.com/v1/completions
api_version=None data='{"prompt": ["You are an Boss in a swarm who performs one task based on the following objective: \\nPlease make a web GUI for using HTTP API server. \\nThe name of it is Swarms. \\nYou can check the server code at ./main.py. \\nThe server is served on localhost:8000. \\nUsers should be able to write text input as \'query\' and url array as \'files\', and check the response. \\nUsers input form should be delivered in JSON format. \\nI want it to have neumorphism-style. Serve it on port 4500.\\n\\n. Take into account these previously completed tasks: .\\n \\n\\nTODO: useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!\\nWorkerNode AI Agent: Input: an objective with a todo list for that objective. Output: your task completed: Please be very clear what the objective and task instructions are. The Swarm worker agent is Useful for when you need to spawn an autonomous agent instance as a worker to accomplish any complex tasks, it can search the internet or write code or spawn child multi-modality models to process and generate images and text or audio and so on\\n\\nUse the following format:\\n\\nQuestion: the input question you must answer\\nThought: you should always think about what to do\\nAction: the action to take, should be one of [TODO, WorkerNode AI Agent]\\nAction Input: the input to the action\\nObservation: the result of the action\\n... (this Thought/Action/Action Input/Observation can repeat N times)\\nThought: I now know the final answer\\nFinal Answer: the final answer to the original input question\\n\\nQuestion: Make a todo list\\n"], "model": "text-davinci-003", "temperature": 0.5, "max_tokens": 256, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "n": 1, "logit_bias": {}, "stop": ["\\nObservation:", "\\n\\tObservation:"]}' message='Post details'
https://api.openai.com:443 "POST /v1/completions HTTP/1.1" 200 None
message='OpenAI API response' path=https://api.openai.com/v1/completions processing_ms=3128 request_id=802132ab4ecdd632506f28a95d3c185a response_code=200
Thought: What do I need to do?
Action: TODO
Action Input: Make a web GUI for using HTTP API server. The name of it is Swarms. You can check the server code at ./main.py. The server is served on localhost:8000. Users should be able to write text input as 'query' and url array as 'files', and check the response. Users input form should be delivered in JSON format. I want it to have neumorphism-style. Serve it on port 4500.message='Request to OpenAI API' method=post path=https://api.openai.com/v1/completions
api_version=None data='{"prompt": ["You are a boss planer in a swarm who is an expert at coming up with a todo list for a given objective and then creating an worker to help you accomplish your task. Come up with a todo list for this objective: Make a web GUI for using HTTP API server. The name of it is Swarms. You can check the server code at ./main.py. The server is served on localhost:8000. Users should be able to write text input as \'query\' and url array as \'files\', and check the response. Users input form should be delivered in JSON format. I want it to have neumorphism-style. Serve it on port 4500. and then spawn a worker agent to complete the task for you."], "model": "text-davinci-003", "temperature": 0.5, "max_tokens": 256, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "n": 1, "logit_bias": {}}' message='Post details'
https://api.openai.com:443 "POST /v1/completions HTTP/1.1" 200 None
message='OpenAI API response' path=https://api.openai.com/v1/completions processing_ms=6119 request_id=9ec8b1aad373258657cdf5663721f398 response_code=200
Observation:
Todo List:
1. Create a basic HTML page with a simple form for user input
2. Add a text input field and url array field to the form
3. Create a JavaScript function to capture and format the user input into JSON
4. Create a JavaScript function to make an AJAX call to the HTTP API server
5. Create a JavaScript function to process the response from the server and display it to the user
6. Add neumorphism-style styling to the HTML page
7. Create a Python script to run the server on port 4500
8. Create a worker agent to complete the task
Once the worker agent is created, it will be responsible for completing the task by following the todo list. It will be able to execute the HTML page, JavaScript functions, and Python script to make the web GUI for using the HTTP API server.
Thought:message='Request to OpenAI API' method=post path=https://api.openai.com/v1/completions
api_version=None data='{"prompt": ["You are an Boss in a swarm who performs one task based on the following objective: \\nPlease make a web GUI for using HTTP API server. \\nThe name of it is Swarms. \\nYou can check the server code at ./main.py. \\nThe server is served on localhost:8000. \\nUsers should be able to write text input as \'query\' and url array as \'files\', and check the response. \\nUsers input form should be delivered in JSON format. \\nI want it to have neumorphism-style. Serve it on port 4500.\\n\\n. Take into account these previously completed tasks: .\\n \\n\\nTODO: useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!\\nWorkerNode AI Agent: Input: an objective with a todo list for that objective. Output: your task completed: Please be very clear what the objective and task instructions are. The Swarm worker agent is Useful for when you need to spawn an autonomous agent instance as a worker to accomplish any complex tasks, it can search the internet or write code or spawn child multi-modality models to process and generate images and text or audio and so on\\n\\nUse the following format:\\n\\nQuestion: the input question you must answer\\nThought: you should always think about what to do\\nAction: the action to take, should be one of [TODO, WorkerNode AI Agent]\\nAction Input: the input to the action\\nObservation: the result of the action\\n... (this Thought/Action/Action Input/Observation can repeat N times)\\nThought: I now know the final answer\\nFinal Answer: the final answer to the original input question\\n\\nQuestion: Make a todo list\\nThought: What do I need to do?\\nAction: TODO\\nAction Input: Make a web GUI for using HTTP API server. The name of it is Swarms. You can check the server code at ./main.py. The server is served on localhost:8000. Users should be able to write text input as \'query\' and url array as \'files\', and check the response. Users input form should be delivered in JSON format. I want it to have neumorphism-style. Serve it on port 4500.\\nObservation: \\n\\nTodo List:\\n\\n1. Create a basic HTML page with a simple form for user input\\n2. Add a text input field and url array field to the form\\n3. Create a JavaScript function to capture and format the user input into JSON\\n4. Create a JavaScript function to make an AJAX call to the HTTP API server\\n5. Create a JavaScript function to process the response from the server and display it to the user\\n6. Add neumorphism-style styling to the HTML page\\n7. Create a Python script to run the server on port 4500\\n8. Create a worker agent to complete the task\\n\\nOnce the worker agent is created, it will be responsible for completing the task by following the todo list. It will be able to execute the HTML page, JavaScript functions, and Python script to make the web GUI for using the HTTP API server.\\nThought:"], "model": "text-davinci-003", "temperature": 0.5, "max_tokens": 256, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "n": 1, "logit_bias": {}, "stop": ["\\nObservation:", "\\n\\tObservation:"]}' message='Post details'
https://api.openai.com:443 "POST /v1/completions HTTP/1.1" 200 None
message='OpenAI API response' path=https://api.openai.com/v1/completions processing_ms=1153 request_id=3af455f0446ff21257b66034aa4671d2 response_code=200
I now know the final answer
Final Answer: Create a todo list for making a web GUI for using HTTP API server with neumorphism-style styling served on port 4500.
> Finished chain.
*****TASK RESULT*****
Create a todo list for making a web GUI for using HTTP API server with neumorphism-style styling served on port 4500.
message='Request to OpenAI API' method=post path=https://api.openai.com/v1/engines/text-embedding-ada-002/embeddings
api_version=None data='{"input": ["Create a todo list for making a web GUI for using HTTP API server with neumorphism-style styling served on port 4500."], "encoding_format": "base64"}' message='Post details'
https://api.openai.com:443 "POST /v1/engines/text-embedding-ada-002/embeddings HTTP/1.1" 200 None
message='OpenAI API response' path=https://api.openai.com/v1/engines/text-embedding-ada-002/embeddings processing_ms=202 request_id=9325c5cc5cb825438e25d8f5618b2774 response_code=200
message='Request to OpenAI API' method=post path=https://api.openai.com/v1/completions
api_version=None data='{"prompt": ["You are an task creation AI that uses the result of an execution agent to create new tasks with the following objective: \\nPlease make a web GUI for using HTTP API server. \\nThe name of it is Swarms. \\nYou can check the server code at ./main.py. \\nThe server is served on localhost:8000. \\nUsers should be able to write text input as \'query\' and url array as \'files\', and check the response. \\nUsers input form should be delivered in JSON format. \\nI want it to have neumorphism-style. Serve it on port 4500.\\n\\n, The last completed task has the result: Create a todo list for making a web GUI for using HTTP API server with neumorphism-style styling served on port 4500.. This result was based on this task description: Make a todo list. These are incomplete tasks: . Based on the result, create new tasks to be completed by the AI system that do not overlap with incomplete tasks. Return the tasks as an array."], "model": "text-davinci-003", "temperature": 0.5, "max_tokens": 256, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "n": 1, "logit_bias": {}}' message='Post details'
https://api.openai.com:443 "POST /v1/completions HTTP/1.1" 200 None
message='OpenAI API response' path=https://api.openai.com/v1/completions processing_ms=4343 request_id=ddd5dc301576bac56271e4a666194222 response_code=200
message='Request to OpenAI API' method=post path=https://api.openai.com/v1/completions
api_version=None data='{"prompt": ["You are a task prioritization AI tasked with cleaning the formatting of and reprioritizing the following tasks: Tasks: , 1. Create the HTML structure for the web GUI. , 2. Design the web GUI with neumorphism-style styling. , 3. Create the JavaScript code to capture user input., 4. Create the JavaScript code to send the user input to the server., 5. Create the JavaScript code to capture the server response., 6. Create the JavaScript code to display the response to the user., 7. Test the web GUI on port 4500.. Consider the ultimate objective of your team: \\nPlease make a web GUI for using HTTP API server. \\nThe name of it is Swarms. \\nYou can check the server code at ./main.py. \\nThe server is served on localhost:8000. \\nUsers should be able to write text input as \'query\' and url array as \'files\', and check the response. \\nUsers input form should be delivered in JSON format. \\nI want it to have neumorphism-style. Serve it on port 4500.\\n\\n. Do not remove any tasks. Return the result as a numbered list, like: #. First task #. Second task Start the task list with number 2."], "model": "text-davinci-003", "temperature": 0.5, "max_tokens": 256, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "n": 1, "logit_bias": {}}' message='Post details'
https://api.openai.com:443 "POST /v1/completions HTTP/1.1" 200 None
message='OpenAI API response' path=https://api.openai.com/v1/completions processing_ms=3085 request_id=a4981199d3ca81a0251b3e60a1410c48 response_code=200
*****TASK LIST*****
1: Create the JavaScript code to capture user input.
2: Create the JavaScript code to send the user input to the server.
3: Create the JavaScript code to capture the server response.
4: Create the JavaScript code to display the response to the user.
5: Test the web GUI on port 4500.
6: Create the HTML structure for the web GUI.
7: Design the web GUI with neumorphism-style styling.
*****NEXT TASK*****
1: Create the JavaScript code to capture user input.
message='Request to OpenAI API' method=post path=https://api.openai.com/v1/engines/text-embedding-ada-002/embeddings
api_version=None data='{"input": ["\\nPlease make a web GUI for using HTTP API server. \\nThe name of it is Swarms. \\nYou can check the server code at ./main.py. \\nThe server is served on localhost:8000. \\nUsers should be able to write text input as \'query\' and url array as \'files\', and check the response. \\nUsers input form should be delivered in JSON format. \\nI want it to have neumorphism-style. Serve it on port 4500.\\n\\n"], "encoding_format": "base64"}' message='Post details'
https://api.openai.com:443 "POST /v1/engines/text-embedding-ada-002/embeddings HTTP/1.1" 200 None
message='OpenAI API response' path=https://api.openai.com/v1/engines/text-embedding-ada-002/embeddings processing_ms=37 request_id=46c1a49e639c2221cde74735999d1ff2 response_code=200
> Entering new chain...
message='Request to OpenAI API' method=post path=https://api.openai.com/v1/completions
api_version=None data='{"prompt": ["You are an Boss in a swarm who performs one task based on the following objective: \\nPlease make a web GUI for using HTTP API server. \\nThe name of it is Swarms. \\nYou can check the server code at ./main.py. \\nThe server is served on localhost:8000. \\nUsers should be able to write text input as \'query\' and url array as \'files\', and check the response. \\nUsers input form should be delivered in JSON format. \\nI want it to have neumorphism-style. Serve it on port 4500.\\n\\n. Take into account these previously completed tasks: Make a todo list.\\n \\n\\nTODO: useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!\\nWorkerNode AI Agent: Input: an objective with a todo list for that objective. Output: your task completed: Please be very clear what the objective and task instructions are. The Swarm worker agent is Useful for when you need to spawn an autonomous agent instance as a worker to accomplish any complex tasks, it can search the internet or write code or spawn child multi-modality models to process and generate images and text or audio and so on\\n\\nUse the following format:\\n\\nQuestion: the input question you must answer\\nThought: you should always think about what to do\\nAction: the action to take, should be one of [TODO, WorkerNode AI Agent]\\nAction Input: the input to the action\\nObservation: the result of the action\\n... (this Thought/Action/Action Input/Observation can repeat N times)\\nThought: I now know the final answer\\nFinal Answer: the final answer to the original input question\\n\\nQuestion: Create the JavaScript code to capture user input.\\n"], "model": "text-davinci-003", "temperature": 0.5, "max_tokens": 256, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "n": 1, "logit_bias": {}, "stop": ["\\nObservation:", "\\n\\tObservation:"]}' message='Post details'
https://api.openai.com:443 "POST /v1/completions HTTP/1.1" 200 None
message='OpenAI API response' path=https://api.openai.com/v1/completions processing_ms=3908 request_id=29c7aa85577bb9a38555c4462f3c0fa5 response_code=200
Thought: I need to create a web GUI for user input.
Action: TODO
Action Input: Create a web GUI for using HTTP API server. The name of it is Swarms. Users should be able to write text input as 'query' and url array as 'files', and check the response. Users input form should be delivered in JSON format. I want it to have neumorphism-style. Serve it on port 4500.message='Request to OpenAI API' method=post path=https://api.openai.com/v1/completions
api_version=None data='{"prompt": ["You are a boss planer in a swarm who is an expert at coming up with a todo list for a given objective and then creating an worker to help you accomplish your task. Come up with a todo list for this objective: Create a web GUI for using HTTP API server. The name of it is Swarms. Users should be able to write text input as \'query\' and url array as \'files\', and check the response. Users input form should be delivered in JSON format. I want it to have neumorphism-style. Serve it on port 4500. and then spawn a worker agent to complete the task for you."], "model": "text-davinci-003", "temperature": 0.5, "max_tokens": 256, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "n": 1, "logit_bias": {}}' message='Post details'
https://api.openai.com:443 "POST /v1/completions HTTP/1.1" 200 None
message='OpenAI API response' path=https://api.openai.com/v1/completions processing_ms=8743 request_id=6d79af97400c608b91d7402f5641fdb2 response_code=200
Observation:
Todo List:
1. Research neumorphism-style and decide on best design for the web GUI.
2. Create HTML/CSS files for the web GUI.
3. Create JavaScript files for the web GUI.
4. Create a server to host the web GUI on port 4500.
5. Create an HTTP API server to handle user input and deliver JSON response.
6. Create a worker agent to handle the server side logic.
7. Test the web GUI for functionality and performance.
8. Deploy the web GUI on port 4500.
Thought:message='Request to OpenAI API' method=post path=https://api.openai.com/v1/completions
api_version=None data='{"prompt": ["You are an Boss in a swarm who performs one task based on the following objective: \\nPlease make a web GUI for using HTTP API server. \\nThe name of it is Swarms. \\nYou can check the server code at ./main.py. \\nThe server is served on localhost:8000. \\nUsers should be able to write text input as \'query\' and url array as \'files\', and check the response. \\nUsers input form should be delivered in JSON format. \\nI want it to have neumorphism-style. Serve it on port 4500.\\n\\n. Take into account these previously completed tasks: Make a todo list.\\n \\n\\nTODO: useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!\\nWorkerNode AI Agent: Input: an objective with a todo list for that objective. Output: your task completed: Please be very clear what the objective and task instructions are. The Swarm worker agent is Useful for when you need to spawn an autonomous agent instance as a worker to accomplish any complex tasks, it can search the internet or write code or spawn child multi-modality models to process and generate images and text or audio and so on\\n\\nUse the following format:\\n\\nQuestion: the input question you must answer\\nThought: you should always think about what to do\\nAction: the action to take, should be one of [TODO, WorkerNode AI Agent]\\nAction Input: the input to the action\\nObservation: the result of the action\\n... (this Thought/Action/Action Input/Observation can repeat N times)\\nThought: I now know the final answer\\nFinal Answer: the final answer to the original input question\\n\\nQuestion: Create the JavaScript code to capture user input.\\nThought: I need to create a web GUI for user input.\\nAction: TODO\\nAction Input: Create a web GUI for using HTTP API server. The name of it is Swarms. Users should be able to write text input as \'query\' and url array as \'files\', and check the response. Users input form should be delivered in JSON format. I want it to have neumorphism-style. Serve it on port 4500.\\nObservation: \\n\\nTodo List:\\n\\n1. Research neumorphism-style and decide on best design for the web GUI.\\n\\n2. Create HTML/CSS files for the web GUI.\\n\\n3. Create JavaScript files for the web GUI.\\n\\n4. Create a server to host the web GUI on port 4500.\\n\\n5. Create an HTTP API server to handle user input and deliver JSON response.\\n\\n6. Create a worker agent to handle the server side logic.\\n\\n7. Test the web GUI for functionality and performance.\\n\\n8. Deploy the web GUI on port 4500.\\nThought:"], "model": "text-davinci-003", "temperature": 0.5, "max_tokens": 256, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "n": 1, "logit_bias": {}, "stop": ["\\nObservation:", "\\n\\tObservation:"]}' message='Post details'
https://api.openai.com:443 "POST /v1/completions HTTP/1.1" 200 None
message='OpenAI API response' path=https://api.openai.com/v1/completions processing_ms=1278 request_id=01dc3693e3251ae99575abd4e6577ee8 response_code=200
I now know the final answer
Final Answer: Create the JavaScript code to capture user input and deploy the web GUI on port 4500.
> Finished chain.
*****TASK RESULT*****
Create the JavaScript code to capture user input and deploy the web GUI on port 4500.
message='Request to OpenAI API' method=post path=https://api.openai.com/v1/engines/text-embedding-ada-002/embeddings
api_version=None data='{"input": ["Create the JavaScript code to capture user input and deploy the web GUI on port 4500."], "encoding_format": "base64"}' message='Post details'
https://api.openai.com:443 "POST /v1/engines/text-embedding-ada-002/embeddings HTTP/1.1" 200 None
message='OpenAI API response' path=https://api.openai.com/v1/engines/text-embedding-ada-002/embeddings processing_ms=40 request_id=d5225adb1208b38e2639a23afcccf29d response_code=200
An error occurred in run: Tried to add ids that already exist: {'result_1'}
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ /content/swarms/example.py:26 in <module> │
│ │
│ 23 """ │
│ 24 │
│ 25 # Run Swarms │
│ ❱ 26 task = swarm.run(objective) │
│ 27 │
│ 28 print(task) │
│ 29 │
│ │
│ /content/swarms/swarms/swarms.py:79 in run │
│ │
│ 76 │ │ │ boss_node = self.initialize_boss_node(vectorstore, worker_node) │
│ 77 │ │ │ │
│ 78 │ │ │ task = boss_node.create_task(objective) │
│ ❱ 79 │ │ │ return boss_node.execute_task(task) │
│ 80 │ │ except Exception as e: │
│ 81 │ │ │ logging.error(f"An error occurred in run: {e}") │
│ 82 │ │ │ raise │
│ │
│ /content/swarms/swarms/agents/boss/boss_agent.py:27 in execute_task │
│ │
│ 24 │ │ return {"objective": objective} │
│ 25 │ │
│ 26 │ def execute_task(self, task): │
│ ❱ 27 │ │ self.baby_agi(task) │
│ 28 │
│ │
│ /usr/local/lib/python3.10/dist-packages/langchain/chains/base.py:181 in __call__ │
│ │
│ 178 │ │ │ ) │
│ 179 │ │ except (KeyboardInterrupt, Exception) as e: │
│ 180 │ │ │ run_manager.on_chain_error(e) │
│ ❱ 181 │ │ │ raise e │
│ 182 │ │ run_manager.on_chain_end(outputs) │
│ 183 │ │ final_outputs: Dict[str, Any] = self.prep_outputs( │
│ 184 │ │ │ inputs, outputs, return_only_outputs │
│ │
│ /usr/local/lib/python3.10/dist-packages/langchain/chains/base.py:175 in __call__ │
│ │
│ 172 │ │ ) │
│ 173 │ │ try: │
│ 174 │ │ │ outputs = ( │
│ ❱ 175 │ │ │ │ self._call(inputs, run_manager=run_manager) │
│ 176 │ │ │ │ if new_arg_supported │
│ 177 │ │ │ │ else self._call(inputs) │
│ 178 │ │ │ ) │
│ │
│ /usr/local/lib/python3.10/dist-packages/langchain/experimental/autonomous_agents/baby_agi/baby_a │
│ gi.py:142 in _call │
│ │
│ 139 │ │ │ │ │
│ 140 │ │ │ │ # Step 3: Store the result in Pinecone │
│ 141 │ │ │ │ result_id = f"result_{task['task_id']}" │
│ ❱ 142 │ │ │ │ self.vectorstore.add_texts( │
│ 143 │ │ │ │ │ texts=[result], │
│ 144 │ │ │ │ │ metadatas=[{"task": task["task_name"]}], │
│ 145 │ │ │ │ │ ids=[result_id], │
│ │
│ /usr/local/lib/python3.10/dist-packages/langchain/vectorstores/faiss.py:150 in add_texts │
│ │
│ 147 │ │ │ ) │
│ 148 │ │ # Embed and create the documents. │
│ 149 │ │ embeddings = [self.embedding_function(text) for text in texts] │
│ ❱ 150 │ │ return self.__add(texts, embeddings, metadatas=metadatas, ids=ids, **kwargs) │
│ 151 │ │
│ 152 │ def add_embeddings( │
│ 153 │ │ self, │
│ │
│ /usr/local/lib/python3.10/dist-packages/langchain/vectorstores/faiss.py:121 in __add │
│ │
│ 118 │ │ # Get list of index, id, and docs. │
│ 119 │ │ full_info = [(starting_len + i, ids[i], doc) for i, doc in enumerate(documents)] │
│ 120 │ │ # Add information to docstore and index. │
│ ❱ 121 │ │ self.docstore.add({_id: doc for _, _id, doc in full_info}) │
│ 122 │ │ index_to_id = {index: _id for index, _id, _ in full_info} │
│ 123 │ │ self.index_to_docstore_id.update(index_to_id) │
│ 124 │ │ return [_id for _, _id, _ in full_info] │
│ │
│ /usr/local/lib/python3.10/dist-packages/langchain/docstore/in_memory.py:19 in add │
│ │
│ 16 │ │ """Add texts to in memory dictionary.""" │
│ 17 │ │ overlapping = set(texts).intersection(self._dict) │
│ 18 │ │ if overlapping: │
│ ❱ 19 │ │ │ raise ValueError(f"Tried to add ids that already exist: {overlapping}") │
│ 20 │ │ self._dict = {**self._dict, **texts} │
│ 21 │ │
│ 22 │ def search(self, search: str) -> Union[str, Document]: │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
ValueError: Tried to add ids that already exist: {'result_1'}
/content/swarms#
[0] 0:bash* "802396df995c" 18:44 06-Jul-23

File diff suppressed because one or more lines are too long

@ -23,7 +23,6 @@ classifiers = [
[tool.poetry.dependencies] [tool.poetry.dependencies]
python = "^3.8.1" python = "^3.8.1"
revChatGPT = "*"
transformers = "*" transformers = "*"
openai = "*" openai = "*"
langchain = "*" langchain = "*"
@ -62,8 +61,6 @@ dalle3 = "*"
soundfile = "*" soundfile = "*"
torchvision = "*" torchvision = "*"
rich = "*" rich = "*"
EdgeGPT = "*"
[tool.poetry.dev-dependencies] [tool.poetry.dev-dependencies]
first_dependency = {git = "https://github.com/IDEA-Research/GroundingDINO.git"} first_dependency = {git = "https://github.com/IDEA-Research/GroundingDINO.git"}

@ -7,6 +7,8 @@ from swarms.agents.omni_modal_agent import OmniModalAgent
from swarms.models import OpenAIChat from swarms.models import OpenAIChat
from swarms.tools.autogpt import tool from swarms.tools.autogpt import tool
from swarms.workers import Worker from swarms.workers import Worker
from swarms.prompts.task_assignment_prompt import task_planner_prompt
# Initialize API Key # Initialize API Key
api_key = "" api_key = ""
@ -17,6 +19,7 @@ api_key = ""
llm = OpenAIChat( llm = OpenAIChat(
openai_api_key=api_key, openai_api_key=api_key,
temperature=0.5, temperature=0.5,
max_tokens=200,
) )
@ -35,6 +38,17 @@ def hf_agent(task: str = None):
return response return response
@tool
def task_planner_worker_agent(task: str):
"""
Task planner tool that creates a plan for a given task.
Input: an objective to create a todo list for. Output: a todo list for that objective.
"""
task = task_planner_prompt(task)
return llm(task)
# wrap a function with the tool decorator to make it a tool # wrap a function with the tool decorator to make it a tool
@tool @tool
def omni_agent(task: str = None): def omni_agent(task: str = None):
@ -94,7 +108,8 @@ def compile(task: str):
# Append tools to an list # Append tools to an list
tools = [hf_agent, omni_agent, compile] # tools = [hf_agent, omni_agent, compile]
tools = [task_planner_worker_agent]
# Initialize a single Worker node with previously defined tools in addition to it's # Initialize a single Worker node with previously defined tools in addition to it's
@ -110,7 +125,7 @@ node = Worker(
) )
# Specify task # Specify task
task = "Create a neural network using the interpreter tool" task = "Use the task planner to agent to create a plan to Locate 5 trending topics on healthy living, locate a website like NYTimes, and then generate an image of people doing those topics."
# Run the node on the task # Run the node on the task
response = node.run(task) response = node.run(task)

@ -8,7 +8,6 @@ warnings.filterwarnings("ignore", category=UserWarning)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from swarms.workers import * from swarms.workers import *
from swarms.workers.worker import Worker from swarms.workers.worker import Worker
from swarms.chunkers import * from swarms.chunkers import *
@ -18,4 +17,4 @@ from swarms.swarms import *
from swarms.agents import * from swarms.agents import *
from swarms.logo import print_colored_logo from swarms.logo import print_colored_logo
print_colored_logo() print_colored_logo()

@ -1,157 +0,0 @@
import logging
import os
import faiss
from langchain import LLMChain, OpenAI, PromptTemplate
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.docstore import InMemoryDocstore
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain_experimental.autonomous_agents import BabyAGI
from pydantic import ValidationError
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
# ---------- Boss Node ----------
class Boss:
"""
The Bose class is responsible for creating and executing tasks using the BabyAGI model.
It takes a language model (llm), a vectorstore for memory, an agent_executor for task execution, and a maximum number of iterations for the BabyAGI model.
# Setup
api_key = "YOUR_OPENAI_API_KEY" # Replace with your OpenAI API Key.
os.environ["OPENAI_API_KEY"] = api_key
# Objective for the Boss
objective = "Analyze website user behavior patterns over the past month."
# Create a Bose instance
boss = Bose(
objective=objective,
boss_system_prompt="You are the main controller of a data analysis swarm...",
api_key=api_key,
worker_node=WorkerNode
)
# Run the Bose to process the objective
boss.run()
"""
def __init__(
self,
objective: str,
api_key=None,
max_iterations=5,
human_in_the_loop=None,
boss_system_prompt="You are a boss planner in a swarm...",
llm_class=OpenAI,
worker_node=None,
verbose=False,
):
# Store parameters
self.api_key = api_key or os.getenv("OPENAI_API_KEY")
self.objective = objective
self.max_iterations = max_iterations
self.boss_system_prompt = boss_system_prompt
self.llm_class = llm_class
self.verbose = verbose
# Initialization methods
self.llm = self._initialize_llm()
self.vectorstore = self._initialize_vectorstore()
self.task = self._create_task(self.objective)
self.agent_executor = self._initialize_agent_executor(worker_node)
self.baby_agi = self._initialize_baby_agi(human_in_the_loop)
def _initialize_llm(self):
"""
Init LLM
Params:
llm_class(class): The Language model class. Default is OpenAI.
temperature (float): The Temperature for the language model. Default is 0.5
"""
try:
return self.llm_class(openai_api_key=self.api_key, temperature=0.5)
except Exception as e:
logging.error(f"Failed to initialize language model: {e}")
raise e
def _initialize_vectorstore(self):
try:
embeddings_model = OpenAIEmbeddings(openai_api_key=self.api_key)
embedding_size = 8192
index = faiss.IndexFlatL2(embedding_size)
return FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
except Exception as e:
logging.error(f"Failed to initialize vector store: {e}")
raise e
def _initialize_agent_executor(self, worker_node):
todo_prompt = PromptTemplate.from_template(self.boss_system_prompt)
todo_chain = LLMChain(llm=self.llm, prompt=todo_prompt)
tools = [
Tool(
name="Goal Decomposition Tool",
func=todo_chain.run,
description="Use Case: Decompose ambitious goals into as many explicit and well defined tasks for an AI agent to follow. Rules and Regulations, don't use this tool too often only in the beginning when the user grants you a mission.",
),
Tool(
name="Swarm Worker Agent",
func=worker_node,
description="Use Case: When you want to delegate and assign the decomposed goal sub tasks to a worker agent in your swarm, Rules and Regulations, Provide a task specification sheet to the worker agent. It can use the browser, process csvs and generate content",
),
]
suffix = """Question: {task}\n{agent_scratchpad}"""
prefix = """You are a Boss in a swarm who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}.\n """
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["objective", "task", "context", "agent_scratchpad"],
)
llm_chain = LLMChain(llm=self.llm, prompt=prompt)
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tools)
return AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=self.verbose
)
def _initialize_baby_agi(self, human_in_the_loop):
try:
return BabyAGI.from_llm(
llm=self.llm,
vectorstore=self.vectorstore,
task_execution_chain=self.agent_executor,
max_iterations=self.max_iterations,
human_in_the_loop=human_in_the_loop,
)
except ValidationError as e:
logging.error(f"Validation Error while initializing BabyAGI: {e}")
raise
except Exception as e:
logging.error(f"Unexpected Error while initializing BabyAGI: {e}")
raise
def _create_task(self, objective):
if not objective:
logging.error("Objective cannot be empty.")
raise ValueError("Objective cannot be empty.")
return {"objective": objective}
def run(self):
if not self.task:
logging.error("Task cannot be empty.")
raise ValueError("Task cannot be empty.")
try:
self.baby_agi(self.task)
except Exception as e:
logging.error(f"Error while executing task: {e}")
raise

@ -1 +1,2 @@
from swarms.embeddings.pegasus import PegasusEmbedding # from swarms.embeddings.pegasus import PegasusEmbedding
from swarms.embeddings.simple_ada import get_ada_embeddings

@ -0,0 +1,27 @@
import openai
from dotenv import load_dotenv
load_dotenv()
from os import getenv
def get_ada_embeddings(text: str, model: str = "text-embedding-ada-002"):
"""
Simple function to get embeddings from ada
Usage:
>>> get_ada_embeddings("Hello World")
>>> get_ada_embeddings("Hello World", model="text-embedding-ada-001")
"""
openai.api_key = getenv("OPENAI_API_KEY")
text = text.replace("\n", " ")
return openai.Embedding.create(
input=[text],
model=model,
)["data"][
0
]["embedding"]

@ -3,6 +3,7 @@ from rich.markdown import Markdown
from rich.rule import Rule from rich.rule import Rule
from termcolor import colored, cprint from termcolor import colored, cprint
def display_markdown_message(message): def display_markdown_message(message):
""" """
Display markdown message. Works with multiline strings with lots of indentation. Display markdown message. Works with multiline strings with lots of indentation.
@ -23,7 +24,6 @@ def display_markdown_message(message):
print("") print("")
logo = """ logo = """
________ _ _______ _______ _____ ______ ________ _ _______ _______ _____ ______
/ ___/\ \/ \/ /\__ \\_ __ \/ \ / ___/ / ___/\ \/ \/ /\__ \\_ __ \/ \ / ___/
@ -45,10 +45,11 @@ logo2 = """
def print_colored_logo(): def print_colored_logo():
with open('swarms/logo.txt', 'r') as file: with open("swarms/logo.txt", "r") as file:
logo = file.read() logo = file.read()
text = colored(logo, 'red') text = colored(logo, "red")
print(text) print(text)
# # Call the function # # Call the function
# print_colored_logo() # print_colored_logo()

@ -1,6 +1,6 @@
from swarms.memory.vector_stores.pinecone import PineconeVector from swarms.memory.pinecone import PineconeVector
from swarms.memory.vector_stores.base import BaseVectorStore from swarms.memory.base import BaseVectorStore
from swarms.memory.vector_stores.pg import PgVectorVectorStore from swarms.memory.pg import PgVectorVectorStore
from swarms.memory.ocean import OceanDB from swarms.memory.ocean import OceanDB
__all__ = [ __all__ = [

@ -0,0 +1,703 @@
from __future__ import annotations
import logging
import uuid
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
)
import numpy as np
from swarms.structs.document import Document
from swarms.embeddings.base import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import xor_args
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
import chromadb
import chromadb.config
from chromadb.api.types import ID, OneOrMany, Where, WhereDocument
logger = logging.getLogger()
DEFAULT_K = 4 # Number of Documents to return.
def _results_to_docs(results: Any) -> List[Document]:
return [doc for doc, _ in _results_to_docs_and_scores(results)]
def _results_to_docs_and_scores(results: Any) -> List[Tuple[Document, float]]:
return [
# TODO: Chroma can do batch querying,
# we shouldn't hard code to the 1st result
(Document(page_content=result[0], metadata=result[1] or {}), result[2])
for result in zip(
results["documents"][0],
results["metadatas"][0],
results["distances"][0],
)
]
class Chroma(VectorStore):
"""`ChromaDB` vector store.
To use, you should have the ``chromadb`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = Chroma("langchain_store", embeddings)
"""
_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain"
def __init__(
self,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
embedding_function: Optional[Embeddings] = None,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
collection_metadata: Optional[Dict] = None,
client: Optional[chromadb.Client] = None,
relevance_score_fn: Optional[Callable[[float], float]] = None,
) -> None:
"""Initialize with a Chroma client."""
try:
import chromadb
import chromadb.config
except ImportError:
raise ImportError(
"Could not import chromadb python package. "
"Please install it with `pip install chromadb`."
)
if client is not None:
self._client_settings = client_settings
self._client = client
self._persist_directory = persist_directory
else:
if client_settings:
# If client_settings is provided with persist_directory specified,
# then it is "in-memory and persisting to disk" mode.
client_settings.persist_directory = (
persist_directory or client_settings.persist_directory
)
if client_settings.persist_directory is not None:
# Maintain backwards compatibility with chromadb < 0.4.0
major, minor, _ = chromadb.__version__.split(".")
if int(major) == 0 and int(minor) < 4:
client_settings.chroma_db_impl = "duckdb+parquet"
_client_settings = client_settings
elif persist_directory:
# Maintain backwards compatibility with chromadb < 0.4.0
major, minor, _ = chromadb.__version__.split(".")
if int(major) == 0 and int(minor) < 4:
_client_settings = chromadb.config.Settings(
chroma_db_impl="duckdb+parquet",
)
else:
_client_settings = chromadb.config.Settings(is_persistent=True)
_client_settings.persist_directory = persist_directory
else:
_client_settings = chromadb.config.Settings()
self._client_settings = _client_settings
self._client = chromadb.Client(_client_settings)
self._persist_directory = (
_client_settings.persist_directory or persist_directory
)
self._embedding_function = embedding_function
self._collection = self._client.get_or_create_collection(
name=collection_name,
embedding_function=self._embedding_function.embed_documents
if self._embedding_function is not None
else None,
metadata=collection_metadata,
)
self.override_relevance_score_fn = relevance_score_fn
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embedding_function
@xor_args(("query_texts", "query_embeddings"))
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Query the chroma collection."""
try:
import chromadb # noqa: F401
except ImportError:
raise ValueError(
"Could not import chromadb python package. "
"Please install it with `pip install chromadb`."
)
return self._collection.query(
query_texts=query_texts,
query_embeddings=query_embeddings,
n_results=n_results,
where=where,
where_document=where_document,
**kwargs,
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
Returns:
List[str]: List of IDs of the added texts.
"""
# TODO: Handle the case where the user doesn't provide ids on the Collection
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
embeddings = None
texts = list(texts)
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(texts)
if metadatas:
# fill metadatas with empty dicts if somebody
# did not specify metadata for all texts
length_diff = len(texts) - len(metadatas)
if length_diff:
metadatas = metadatas + [{}] * length_diff
empty_ids = []
non_empty_ids = []
for idx, m in enumerate(metadatas):
if m:
non_empty_ids.append(idx)
else:
empty_ids.append(idx)
if non_empty_ids:
metadatas = [metadatas[idx] for idx in non_empty_ids]
texts_with_metadatas = [texts[idx] for idx in non_empty_ids]
embeddings_with_metadatas = (
[embeddings[idx] for idx in non_empty_ids] if embeddings else None
)
ids_with_metadata = [ids[idx] for idx in non_empty_ids]
try:
self._collection.upsert(
metadatas=metadatas,
embeddings=embeddings_with_metadatas,
documents=texts_with_metadatas,
ids=ids_with_metadata,
)
except ValueError as e:
if "Expected metadata value to be" in str(e):
msg = (
"Try filtering complex metadata from the document using "
"langchain.vectorstores.utils.filter_complex_metadata."
)
raise ValueError(e.args[0] + "\n\n" + msg)
else:
raise e
if empty_ids:
texts_without_metadatas = [texts[j] for j in empty_ids]
embeddings_without_metadatas = (
[embeddings[j] for j in empty_ids] if embeddings else None
)
ids_without_metadatas = [ids[j] for j in empty_ids]
self._collection.upsert(
embeddings=embeddings_without_metadatas,
documents=texts_without_metadatas,
ids=ids_without_metadatas,
)
else:
self._collection.upsert(
embeddings=embeddings,
documents=texts,
ids=ids,
)
return ids
def similarity_search(
self,
query: str,
k: int = DEFAULT_K,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with Chroma.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of documents most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(query, k, filter=filter)
return [doc for doc, _ in docs_and_scores]
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = DEFAULT_K,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding (List[float]): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
results = self.__query_collection(
query_embeddings=embedding,
n_results=k,
where=filter,
where_document=where_document,
)
return _results_to_docs(results)
def similarity_search_by_vector_with_relevance_scores(
self,
embedding: List[float],
k: int = DEFAULT_K,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""
Return docs most similar to embedding vector and similarity score.
Args:
embedding (List[float]): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of documents most similar to
the query text and cosine distance in float for each.
Lower score represents more similarity.
"""
results = self.__query_collection(
query_embeddings=embedding,
n_results=k,
where=filter,
where_document=where_document,
)
return _results_to_docs_and_scores(results)
def similarity_search_with_score(
self,
query: str,
k: int = DEFAULT_K,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Run similarity search with Chroma with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of documents most similar to
the query text and cosine distance in float for each.
Lower score represents more similarity.
"""
if self._embedding_function is None:
results = self.__query_collection(
query_texts=[query],
n_results=k,
where=filter,
where_document=where_document,
)
else:
query_embedding = self._embedding_function.embed_query(query)
results = self.__query_collection(
query_embeddings=[query_embedding],
n_results=k,
where=filter,
where_document=where_document,
)
return _results_to_docs_and_scores(results)
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.override_relevance_score_fn:
return self.override_relevance_score_fn
distance = "l2"
distance_key = "hnsw:space"
metadata = self._collection.metadata
if metadata and distance_key in metadata:
distance = metadata[distance_key]
if distance == "cosine":
return self._cosine_relevance_score_fn
elif distance == "l2":
return self._euclidean_relevance_score_fn
elif distance == "ip":
return self._max_inner_product_relevance_score_fn
else:
raise ValueError(
"No supported normalization function"
f" for distance metric of type: {distance}."
"Consider providing relevance_score_fn to Chroma constructor."
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = DEFAULT_K,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents selected by maximal marginal relevance.
"""
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
where_document=where_document,
include=["metadatas", "documents", "distances", "embeddings"],
)
mmr_selected = maximal_marginal_relevance(
np.array(embedding, dtype=np.float32),
results["embeddings"][0],
k=k,
lambda_mult=lambda_mult,
)
candidates = _results_to_docs(results)
selected_results = [r for i, r in enumerate(candidates) if i in mmr_selected]
return selected_results
def max_marginal_relevance_search(
self,
query: str,
k: int = DEFAULT_K,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self._embedding_function is None:
raise ValueError(
"For MMR search, you must specify an embedding function on" "creation."
)
embedding = self._embedding_function.embed_query(query)
docs = self.max_marginal_relevance_search_by_vector(
embedding,
k,
fetch_k,
lambda_mult=lambda_mult,
filter=filter,
where_document=where_document,
)
return docs
def delete_collection(self) -> None:
"""Delete the collection."""
self._client.delete_collection(self._collection.name)
def get(
self,
ids: Optional[OneOrMany[ID]] = None,
where: Optional[Where] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
where_document: Optional[WhereDocument] = None,
include: Optional[List[str]] = None,
) -> Dict[str, Any]:
"""Gets the collection.
Args:
ids: The ids of the embeddings to get. Optional.
where: A Where type dict used to filter results by.
E.g. `{"color" : "red", "price": 4.20}`. Optional.
limit: The number of documents to return. Optional.
offset: The offset to start returning results from.
Useful for paging results with limit. Optional.
where_document: A WhereDocument type dict used to filter by the documents.
E.g. `{$contains: "hello"}`. Optional.
include: A list of what to include in the results.
Can contain `"embeddings"`, `"metadatas"`, `"documents"`.
Ids are always included.
Defaults to `["metadatas", "documents"]`. Optional.
"""
kwargs = {
"ids": ids,
"where": where,
"limit": limit,
"offset": offset,
"where_document": where_document,
}
if include is not None:
kwargs["include"] = include
return self._collection.get(**kwargs)
def persist(self) -> None:
"""Persist the collection.
This can be used to explicitly persist the data to disk.
It will also be called automatically when the object is destroyed.
"""
if self._persist_directory is None:
raise ValueError(
"You must specify a persist_directory on"
"creation to persist the collection."
)
import chromadb
# Maintain backwards compatibility with chromadb < 0.4.0
major, minor, _ = chromadb.__version__.split(".")
if int(major) == 0 and int(minor) < 4:
self._client.persist()
def update_document(self, document_id: str, document: Document) -> None:
"""Update a document in the collection.
Args:
document_id (str): ID of the document to update.
document (Document): Document to update.
"""
return self.update_documents([document_id], [document])
def update_documents(self, ids: List[str], documents: List[Document]) -> None:
"""Update a document in the collection.
Args:
ids (List[str]): List of ids of the document to update.
documents (List[Document]): List of documents to update.
"""
text = [document.page_content for document in documents]
metadata = [document.metadata for document in documents]
if self._embedding_function is None:
raise ValueError(
"For update, you must specify an embedding function on creation."
)
embeddings = self._embedding_function.embed_documents(text)
if hasattr(
self._collection._client, "max_batch_size"
): # for Chroma 0.4.10 and above
from chromadb.utils.batch_utils import create_batches
for batch in create_batches(
api=self._collection._client,
ids=ids,
metadatas=metadata,
documents=text,
embeddings=embeddings,
):
self._collection.update(
ids=batch[0],
embeddings=batch[1],
documents=batch[3],
metadatas=batch[2],
)
else:
self._collection.update(
ids=ids,
embeddings=embeddings,
documents=text,
metadatas=metadata,
)
@classmethod
def from_texts(
cls: Type[Chroma],
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
client: Optional[chromadb.Client] = None,
collection_metadata: Optional[Dict] = None,
**kwargs: Any,
) -> Chroma:
"""Create a Chroma vectorstore from a raw documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
texts (List[str]): List of texts to add to the collection.
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
collection_metadata (Optional[Dict]): Collection configurations.
Defaults to None.
Returns:
Chroma: Chroma vectorstore.
"""
chroma_collection = cls(
collection_name=collection_name,
embedding_function=embedding,
persist_directory=persist_directory,
client_settings=client_settings,
client=client,
collection_metadata=collection_metadata,
**kwargs,
)
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if hasattr(
chroma_collection._client, "max_batch_size"
): # for Chroma 0.4.10 and above
from chromadb.utils.batch_utils import create_batches
for batch in create_batches(
api=chroma_collection._client,
ids=ids,
metadatas=metadatas,
documents=texts,
):
chroma_collection.add_texts(
texts=batch[3] if batch[3] else [],
metadatas=batch[2] if batch[2] else None,
ids=batch[0],
)
else:
chroma_collection.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return chroma_collection
@classmethod
def from_documents(
cls: Type[Chroma],
documents: List[Document],
embedding: Optional[Embeddings] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
client: Optional[chromadb.Client] = None, # Add this line
collection_metadata: Optional[Dict] = None,
**kwargs: Any,
) -> Chroma:
"""Create a Chroma vectorstore from a list of documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
collection_metadata (Optional[Dict]): Collection configurations.
Defaults to None.
Returns:
Chroma: Chroma vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
texts=texts,
embedding=embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
persist_directory=persist_directory,
client_settings=client_settings,
client=client,
collection_metadata=collection_metadata,
**kwargs,
)
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
"""
self._collection.delete(ids=ids)

@ -0,0 +1,75 @@
"""Math utils."""
import logging
from typing import List, Optional, Tuple, Union
import numpy as np
logger = logging.getLogger(__name__)
Matrix = Union[List[List[float]], List[np.ndarray], np.ndarray]
def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray:
"""Row-wise cosine similarity between two equal-width matrices."""
if len(X) == 0 or len(Y) == 0:
return np.array([])
X = np.array(X)
Y = np.array(Y)
if X.shape[1] != Y.shape[1]:
raise ValueError(
f"Number of columns in X and Y must be the same. X has shape {X.shape} "
f"and Y has shape {Y.shape}."
)
try:
import simsimd as simd
X = np.array(X, dtype=np.float32)
Y = np.array(Y, dtype=np.float32)
Z = 1 - simd.cdist(X, Y, metric="cosine")
if isinstance(Z, float):
return np.array([Z])
return Z
except ImportError:
logger.info(
"Unable to import simsimd, defaulting to NumPy implementation. If you want "
"to use simsimd please install with `pip install simsimd`."
)
X_norm = np.linalg.norm(X, axis=1)
Y_norm = np.linalg.norm(Y, axis=1)
# Ignore divide by zero errors run time warnings as those are handled below.
with np.errstate(divide="ignore", invalid="ignore"):
similarity = np.dot(X, Y.T) / np.outer(X_norm, Y_norm)
similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0
return similarity
def cosine_similarity_top_k(
X: Matrix,
Y: Matrix,
top_k: Optional[int] = 5,
score_threshold: Optional[float] = None,
) -> Tuple[List[Tuple[int, int]], List[float]]:
"""Row-wise cosine similarity with optional top-k and score threshold filtering.
Args:
X: Matrix.
Y: Matrix, same width as X.
top_k: Max number of results to return.
score_threshold: Minimum cosine similarity of results.
Returns:
Tuple of two lists. First contains two-tuples of indices (X_idx, Y_idx),
second contains corresponding cosine similarities.
"""
if len(X) == 0 or len(Y) == 0:
return [], []
score_array = cosine_similarity(X, Y)
score_threshold = score_threshold or -1.0
score_array[score_array < score_threshold] = 0
top_k = min(top_k or len(score_array), np.count_nonzero(score_array))
top_k_idxs = np.argpartition(score_array, -top_k, axis=None)[-top_k:]
top_k_idxs = top_k_idxs[np.argsort(score_array.ravel()[top_k_idxs])][::-1]
ret_idxs = np.unravel_index(top_k_idxs, score_array.shape)
scores = score_array.ravel()[top_k_idxs].tolist()
return list(zip(*ret_idxs)), scores # type: ignore

@ -142,3 +142,12 @@ class OceanDB:
except Exception as e: except Exception as e:
logging.error(f"Failed to query the collection. Error {e}") logging.error(f"Failed to query the collection. Error {e}")
raise raise
# Example
# ocean = OceanDB()
# collection = ocean.create_collection("test", "text")
# ocean.append_document(collection, "hello world", "1")
# ocean.add_documents(collection, ["hello world", "hello world"], ["2", "3"])
# results = ocean.query(collection, ["hello world"], 3)
# print(results)

@ -0,0 +1,74 @@
"""Utility functions for working with vectors and vectorstores."""
from enum import Enum
from typing import List, Tuple, Type
import numpy as np
from swarms.structs.document import Document
from swarms.memory.cosine_similarity import cosine_similarity
class DistanceStrategy(str, Enum):
"""Enumerator of the Distance strategies for calculating distances
between vectors."""
EUCLIDEAN_DISTANCE = "EUCLIDEAN_DISTANCE"
MAX_INNER_PRODUCT = "MAX_INNER_PRODUCT"
DOT_PRODUCT = "DOT_PRODUCT"
JACCARD = "JACCARD"
COSINE = "COSINE"
def maximal_marginal_relevance(
query_embedding: np.ndarray,
embedding_list: list,
lambda_mult: float = 0.5,
k: int = 4,
) -> List[int]:
"""Calculate maximal marginal relevance."""
if min(k, len(embedding_list)) <= 0:
return []
if query_embedding.ndim == 1:
query_embedding = np.expand_dims(query_embedding, axis=0)
similarity_to_query = cosine_similarity(query_embedding, embedding_list)[0]
most_similar = int(np.argmax(similarity_to_query))
idxs = [most_similar]
selected = np.array([embedding_list[most_similar]])
while len(idxs) < min(k, len(embedding_list)):
best_score = -np.inf
idx_to_add = -1
similarity_to_selected = cosine_similarity(embedding_list, selected)
for i, query_score in enumerate(similarity_to_query):
if i in idxs:
continue
redundant_score = max(similarity_to_selected[i])
equation_score = (
lambda_mult * query_score - (1 - lambda_mult) * redundant_score
)
if equation_score > best_score:
best_score = equation_score
idx_to_add = i
idxs.append(idx_to_add)
selected = np.append(selected, [embedding_list[idx_to_add]], axis=0)
return idxs
def filter_complex_metadata(
documents: List[Document],
*,
allowed_types: Tuple[Type, ...] = (str, bool, int, float)
) -> List[Document]:
"""Filter out metadata types that are not supported for a vector store."""
updated_documents = []
for document in documents:
filtered_metadata = {}
for key, value in document.metadata.items():
if not isinstance(value, allowed_types):
continue
filtered_metadata[key] = value
document.metadata = filtered_metadata
updated_documents.append(document)
return updated_documents

@ -0,0 +1,4 @@
"""
Implement retreiever for vector store
"""

@ -6,6 +6,9 @@ from swarms.models.openai_models import OpenAI, AzureOpenAI, OpenAIChat
from swarms.models.zephyr import Zephyr from swarms.models.zephyr import Zephyr
from swarms.models.biogpt import BioGPT from swarms.models.biogpt import BioGPT
from swarms.models.huggingface import HuggingfaceLLM from swarms.models.huggingface import HuggingfaceLLM
from swarms.models.wizard_storyteller import WizardLLMStoryTeller
from swarms.models.mpt import MPT7B
# MultiModal Models # MultiModal Models
@ -37,4 +40,6 @@ __all__ = [
"LayoutLMDocumentQA", "LayoutLMDocumentQA",
"BioGPT", "BioGPT",
"HuggingfaceLLM", "HuggingfaceLLM",
"MPT7B",
"WizardLLMStoryTeller",
] ]

@ -1,66 +0,0 @@
"""EdgeGPT model by OpenAI"""
import asyncio
import json
from pathlib import Path
from EdgeGPT.EdgeGPT import Chatbot, ConversationStyle
from EdgeGPT.EdgeUtils import Cookie, ImageQuery, Query
from EdgeGPT.ImageGen import ImageGen
class BingChat:
"""
EdgeGPT model by OpenAI
Parameters
----------
cookies_path : str
Path to the cookies.json necessary for authenticating with EdgeGPT
Examples
--------
>>> edgegpt = BingChat(cookies_path="./path/to/cookies.json")
>>> response = edgegpt("Hello, my name is ChatGPT")
>>> image_path = edgegpt.create_img("Sunset over mountains")
"""
def __init__(self, cookies_path: str):
self.cookies = json.loads(open(cookies_path, encoding="utf-8").read())
self.bot = asyncio.run(Chatbot.create(cookies=self.cookies))
def __call__(
self, prompt: str, style: ConversationStyle = ConversationStyle.creative
) -> str:
"""
Get a text response using the EdgeGPT model based on the provided prompt.
"""
response = asyncio.run(
self.bot.ask(
prompt=prompt, conversation_style=style, simplify_response=True
)
)
return response["text"]
def create_img(
self, prompt: str, output_dir: str = "./output", auth_cookie: str = None
) -> str:
"""
Generate an image based on the provided prompt and save it in the given output directory.
Returns the path of the generated image.
"""
if not auth_cookie:
raise ValueError("Auth cookie is required for image generation.")
image_generator = ImageGen(auth_cookie, quiet=True)
images = image_generator.get_images(prompt)
image_generator.save_images(images, output_dir=output_dir)
return Path(output_dir) / images[0]["path"]
@staticmethod
def set_cookie_dir_path(path: str):
"""
Set the directory path for managing cookies.
"""
Cookie.dir_path = Path(path)

@ -1,951 +0,0 @@
from __future__ import annotations
import logging
import sys
import warnings
from typing import (
AbstractSet,
Any,
AsyncIterator,
Callable,
Collection,
Dict,
Iterator,
List,
Literal,
Mapping,
Optional,
Set,
Tuple,
Union,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import BaseLLM, create_base_retry_decorator
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema import Generation, LLMResult
from langchain.schema.output import GenerationChunk
from langchain.utils import get_from_dict_or_env, get_pydantic_field_names
from langchain.utils.utils import build_extra_kwargs
logger = logging.getLogger(__name__)
def update_token_usage(
keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any]
) -> None:
"""Update token usage."""
_keys_to_use = keys.intersection(response["usage"])
for _key in _keys_to_use:
if _key not in token_usage:
token_usage[_key] = response["usage"][_key]
else:
token_usage[_key] += response["usage"][_key]
def _stream_response_to_generation_chunk(
stream_response: Dict[str, Any],
) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
return GenerationChunk(
text=stream_response["choices"][0]["text"],
generation_info=dict(
finish_reason=stream_response["choices"][0].get("finish_reason", None),
logprobs=stream_response["choices"][0].get("logprobs", None),
),
)
def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None:
"""Update response from the stream response."""
response["choices"][0]["text"] += stream_response["choices"][0]["text"]
response["choices"][0]["finish_reason"] = stream_response["choices"][0].get(
"finish_reason", None
)
response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"]
def _streaming_response_template() -> Dict[str, Any]:
return {
"choices": [
{
"text": "",
"finish_reason": None,
"logprobs": None,
}
]
}
def _create_retry_decorator(
llm: Union[BaseOpenAI, OpenAIChat],
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
import llm
errors = [
llm.error.Timeout,
llm.error.APIError,
llm.error.APIConnectionError,
llm.error.RateLimitError,
llm.error.ServiceUnavailableError,
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
def completion_with_retry(
llm: Union[BaseOpenAI, OpenAIChat],
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return llm.client.create(**kwargs)
return _completion_with_retry(**kwargs)
async def acompletion_with_retry(
llm: Union[BaseOpenAI, OpenAIChat],
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
class BaseOpenAI(BaseLLM):
"""Base OpenAI large language model class."""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"openai_api_key": "OPENAI_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
return True
client: Any = None #: :meta private:
model_name: str = Field(default="text-davinci-003", alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
best_of: int = 1
"""Generates best_of completions server-side and returns the "best"."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
openai_api_base: Optional[str] = None
openai_organization: Optional[str] = None
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
batch_size: int = 20
"""Batch size to use when passing multiple documents to generate."""
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
"""Adjust the probability of specific tokens being generated."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
allowed_special: Union[Literal["all"], AbstractSet[str]] = set()
"""Set of special tokens that are allowed。"""
disallowed_special: Union[Literal["all"], Collection[str]] = "all"
"""Set of special tokens that are not allowed。"""
tiktoken_model_name: Optional[str] = None
"""The model name to pass to tiktoken when using this class.
Tiktoken is used to count the number of tokens in documents to constrain
them to be under a certain limit. By default, when set to None, this will
be the same as the embedding model name. However, there are some cases
where you may want to use this Embedding class with a model name not
supported by tiktoken. This can include when using Azure embeddings or
when using one of the many model providers that expose an OpenAI-like
API but with different models. In those cases, in order to avoid erroring
when tiktoken is called, you can specify a model name to use here."""
def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore
"""Initialize the OpenAI object."""
model_name = data.get("model_name", "")
if (
model_name.startswith("gpt-3.5-turbo") or model_name.startswith("gpt-4")
) and "-instruct" not in model_name:
warnings.warn(
"You are trying to use a chat model. This way of initializing it is "
"no longer supported. Instead, please use: "
"`from langchain.chat_models import ChatOpenAI`"
)
return OpenAIChat(**data)
return super().__new__(cls)
class Config:
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
values["model_kwargs"] = build_extra_kwargs(
extra, values, all_required_field_names
)
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["openai_api_key"] = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
values["openai_api_base"] = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
values["openai_organization"] = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import llm
values["client"] = llm.Completion
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if values["streaming"] and values["n"] > 1:
raise ValueError("Cannot stream results when n > 1.")
if values["streaming"] and values["best_of"] > 1:
raise ValueError("Cannot stream results when best_of > 1.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
normal_params = {
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"request_timeout": self.request_timeout,
"logit_bias": self.logit_bias,
}
# Azure gpt-35-turbo doesn't support best_of
# don't specify best_of if it is 1
if self.best_of > 1:
normal_params["best_of"] = self.best_of
return {**normal_params, **self.model_kwargs}
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = {**self._invocation_params, **kwargs, "stream": True}
self.get_sub_prompts(params, [prompt], stop) # this mutates params
for stream_resp in completion_with_retry(
self, prompt=prompt, run_manager=run_manager, **params
):
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose,
logprobs=chunk.generation_info["logprobs"]
if chunk.generation_info
else None,
)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
params = {**self._invocation_params, **kwargs, "stream": True}
self.get_sub_prompts(params, [prompt], stop) # this mutate params
async for stream_resp in await acompletion_with_retry(
self, prompt=prompt, run_manager=run_manager, **params
):
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose,
logprobs=chunk.generation_info["logprobs"]
if chunk.generation_info
else None,
)
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to OpenAI's endpoint with k unique prompts.
Args:
prompts: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The full LLM output.
Example:
.. code-block:: python
response = openai.generate(["Tell me a joke."])
"""
# TODO: write a unit test for this
params = self._invocation_params
params = {**params, **kwargs}
sub_prompts = self.get_sub_prompts(params, prompts, stop)
choices = []
token_usage: Dict[str, int] = {}
# Get the token usage from the response.
# Includes prompt, completion, and total tokens used.
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
for _prompts in sub_prompts:
if self.streaming:
if len(_prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
generation: Optional[GenerationChunk] = None
for chunk in self._stream(_prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append(
{
"text": generation.text,
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
}
)
else:
response = completion_with_retry(
self, prompt=_prompts, run_manager=run_manager, **params
)
choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage)
return self.create_llm_result(choices, prompts, token_usage)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to OpenAI's endpoint async with k unique prompts."""
params = self._invocation_params
params = {**params, **kwargs}
sub_prompts = self.get_sub_prompts(params, prompts, stop)
choices = []
token_usage: Dict[str, int] = {}
# Get the token usage from the response.
# Includes prompt, completion, and total tokens used.
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
for _prompts in sub_prompts:
if self.streaming:
if len(_prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
generation: Optional[GenerationChunk] = None
async for chunk in self._astream(
_prompts[0], stop, run_manager, **kwargs
):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append(
{
"text": generation.text,
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
}
)
else:
response = await acompletion_with_retry(
self, prompt=_prompts, run_manager=run_manager, **params
)
choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage)
return self.create_llm_result(choices, prompts, token_usage)
def get_sub_prompts(
self,
params: Dict[str, Any],
prompts: List[str],
stop: Optional[List[str]] = None,
) -> List[List[str]]:
"""Get the sub prompts for llm call."""
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
if params["max_tokens"] == -1:
if len(prompts) != 1:
raise ValueError(
"max_tokens set to -1 not supported for multiple inputs."
)
params["max_tokens"] = self.max_tokens_for_prompt(prompts[0])
sub_prompts = [
prompts[i : i + self.batch_size]
for i in range(0, len(prompts), self.batch_size)
]
return sub_prompts
def create_llm_result(
self, choices: Any, prompts: List[str], token_usage: Dict[str, int]
) -> LLMResult:
"""Create the LLMResult from the choices and prompts."""
generations = []
for i, _ in enumerate(prompts):
sub_choices = choices[i * self.n : (i + 1) * self.n]
generations.append(
[
Generation(
text=choice["text"],
generation_info=dict(
finish_reason=choice.get("finish_reason"),
logprobs=choice.get("logprobs"),
),
)
for choice in sub_choices
]
)
llm_output = {"token_usage": token_usage, "model_name": self.model_name}
return LLMResult(generations=generations, llm_output=llm_output)
@property
def _invocation_params(self) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
openai_creds: Dict[str, Any] = {
"api_key": self.openai_api_key,
"api_base": self.openai_api_base,
"organization": self.openai_organization,
}
if self.openai_proxy:
import llm
llm.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
return {**openai_creds, **self._default_params}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "openai"
def get_token_ids(self, text: str) -> List[int]:
"""Get the token IDs using the tiktoken package."""
# tiktoken NOT supported for Python < 3.8
if sys.version_info[1] < 8:
return super().get_num_tokens(text)
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_num_tokens. "
"Please install it with `pip install tiktoken`."
)
model_name = self.tiktoken_model_name or self.model_name
try:
enc = tiktoken.encoding_for_model(model_name)
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"
enc = tiktoken.get_encoding(model)
return enc.encode(
text,
allowed_special=self.allowed_special,
disallowed_special=self.disallowed_special,
)
@staticmethod
def modelname_to_contextsize(modelname: str) -> int:
"""Calculate the maximum number of tokens possible to generate for a model.
Args:
modelname: The modelname we want to know the context size for.
Returns:
The maximum context size
Example:
.. code-block:: python
max_tokens = openai.modelname_to_contextsize("text-davinci-003")
"""
model_token_mapping = {
"gpt-4": 8192,
"gpt-4-0314": 8192,
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,
"gpt-4-32k-0314": 32768,
"gpt-4-32k-0613": 32768,
"gpt-3.5-turbo": 4096,
"gpt-3.5-turbo-0301": 4096,
"gpt-3.5-turbo-0613": 4096,
"gpt-3.5-turbo-16k": 16385,
"gpt-3.5-turbo-16k-0613": 16385,
"gpt-3.5-turbo-instruct": 4096,
"text-ada-001": 2049,
"ada": 2049,
"text-babbage-001": 2040,
"babbage": 2049,
"text-curie-001": 2049,
"curie": 2049,
"davinci": 2049,
"text-davinci-003": 4097,
"text-davinci-002": 4097,
"code-davinci-002": 8001,
"code-davinci-001": 8001,
"code-cushman-002": 2048,
"code-cushman-001": 2048,
}
# handling finetuned models
if "ft-" in modelname:
modelname = modelname.split(":")[0]
context_size = model_token_mapping.get(modelname, None)
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid OpenAI model name."
"Known models are: " + ", ".join(model_token_mapping.keys())
)
return context_size
@property
def max_context_size(self) -> int:
"""Get max context size for this model."""
return self.modelname_to_contextsize(self.model_name)
def max_tokens_for_prompt(self, prompt: str) -> int:
"""Calculate the maximum number of tokens possible to generate for a prompt.
Args:
prompt: The prompt to pass into the model.
Returns:
The maximum number of tokens to generate for a prompt.
Example:
.. code-block:: python
max_tokens = openai.max_token_for_prompt("Tell me a joke.")
"""
num_tokens = self.get_num_tokens(prompt)
return self.max_context_size - num_tokens
class OpenAI(BaseOpenAI):
"""OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import OpenAI
openai = OpenAI(model_name="text-davinci-003")
"""
@property
def _invocation_params(self) -> Dict[str, Any]:
return {**{"model": self.model_name}, **super()._invocation_params}
class AzureOpenAI(BaseOpenAI):
"""Azure-specific OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import AzureOpenAI
openai = AzureOpenAI(model_name="text-davinci-003")
"""
deployment_name: str = ""
"""Deployment name to use."""
openai_api_type: str = ""
openai_api_version: str = ""
@root_validator()
def validate_azure_settings(cls, values: Dict) -> Dict:
values["openai_api_version"] = get_from_dict_or_env(
values,
"openai_api_version",
"OPENAI_API_VERSION",
)
values["openai_api_type"] = get_from_dict_or_env(
values, "openai_api_type", "OPENAI_API_TYPE", "azure"
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {
**{"deployment_name": self.deployment_name},
**super()._identifying_params,
}
@property
def _invocation_params(self) -> Dict[str, Any]:
openai_params = {
"engine": self.deployment_name,
"api_type": self.openai_api_type,
"api_version": self.openai_api_version,
}
return {**openai_params, **super()._invocation_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "azure"
class OpenAIChat(BaseLLM):
"""OpenAI Chat large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import OpenAIChat
openaichat = OpenAIChat(model_name="gpt-3.5-turbo")
"""
client: Any #: :meta private:
model_name: str = "gpt-3.5-turbo"
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
openai_api_base: Optional[str] = None
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
max_retries: int = 6
"""Maximum number of retries to make when generating."""
prefix_messages: List = Field(default_factory=list)
"""Series of messages for Chat input."""
streaming: bool = False
"""Whether to stream the results or not."""
allowed_special: Union[Literal["all"], AbstractSet[str]] = set()
"""Set of special tokens that are allowed。"""
disallowed_special: Union[Literal["all"], Collection[str]] = "all"
"""Set of special tokens that are not allowed。"""
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_api_base = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
openai_proxy = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
openai_organization = get_from_dict_or_env(
values, "openai_organization", "OPENAI_ORGANIZATION", default=""
)
try:
import llm
llm.api_key = openai_api_key
if openai_api_base:
llm.api_base = openai_api_base
if openai_organization:
llm.organization = openai_organization
if openai_proxy:
llm.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = llm.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
warnings.warn(
"You are trying to use a chat model. This way of initializing it is "
"no longer supported. Instead, please use: "
"`from langchain.chat_models import ChatOpenAI`"
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return self.model_kwargs
def _get_chat_params(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> Tuple:
if len(prompts) > 1:
raise ValueError(
f"OpenAIChat currently only supports single prompt, got {prompts}"
)
messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}]
params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params}
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
if params.get("max_tokens") == -1:
# for ChatGPT api, omitting max_tokens is equivalent to having no limit
del params["max_tokens"]
return messages, params
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
messages, params = self._get_chat_params([prompt], stop)
params = {**params, **kwargs, "stream": True}
for stream_resp in completion_with_retry(
self, messages=messages, run_manager=run_manager, **params
):
token = stream_resp["choices"][0]["delta"].get("content", "")
chunk = GenerationChunk(text=token)
yield chunk
if run_manager:
run_manager.on_llm_new_token(token, chunk=chunk)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
messages, params = self._get_chat_params([prompt], stop)
params = {**params, **kwargs, "stream": True}
async for stream_resp in await acompletion_with_retry(
self, messages=messages, run_manager=run_manager, **params
):
token = stream_resp["choices"][0]["delta"].get("content", "")
chunk = GenerationChunk(text=token)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(token, chunk=chunk)
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
if self.streaming:
generation: Optional[GenerationChunk] = None
for chunk in self._stream(prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]])
messages, params = self._get_chat_params(prompts, stop)
params = {**params, **kwargs}
full_response = completion_with_retry(
self, messages=messages, run_manager=run_manager, **params
)
llm_output = {
"token_usage": full_response["usage"],
"model_name": self.model_name,
}
return LLMResult(
generations=[
[Generation(text=full_response["choices"][0]["message"]["content"])]
],
llm_output=llm_output,
)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
if self.streaming:
generation: Optional[GenerationChunk] = None
async for chunk in self._astream(prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]])
messages, params = self._get_chat_params(prompts, stop)
params = {**params, **kwargs}
full_response = await acompletion_with_retry(
self, messages=messages, run_manager=run_manager, **params
)
llm_output = {
"token_usage": full_response["usage"],
"model_name": self.model_name,
}
return LLMResult(
generations=[
[Generation(text=full_response["choices"][0]["message"]["content"])]
],
llm_output=llm_output,
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "openai-chat"
def get_token_ids(self, text: str) -> List[int]:
"""Get the token IDs using the tiktoken package."""
# tiktoken NOT supported for Python < 3.8
if sys.version_info[1] < 8:
return super().get_token_ids(text)
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_num_tokens. "
"Please install it with `pip install tiktoken`."
)
enc = tiktoken.encoding_for_model(self.model_name)
return enc.encode(
text,
allowed_special=self.allowed_special,
disallowed_special=self.disallowed_special,
)

@ -157,6 +157,29 @@ class HuggingfaceLLM:
except Exception as e: except Exception as e:
self.logger.error(f"Failed to generate the text: {e}") self.logger.error(f"Failed to generate the text: {e}")
raise raise
async def run_async(self, task: str, *args, **kwargs) -> str:
"""
Run the model asynchronously
Args:
task (str): Task to run.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Examples:
>>> mpt_instance = MPT('mosaicml/mpt-7b-storywriter', "EleutherAI/gpt-neox-20b", max_tokens=150)
>>> mpt_instance("generate", "Once upon a time in a land far, far away...")
'Once upon a time in a land far, far away...'
>>> mpt_instance.batch_generate(["In the deep jungles,", "At the heart of the city,"], temperature=0.7)
['In the deep jungles,',
'At the heart of the city,']
>>> mpt_instance.freeze_model()
>>> mpt_instance.unfreeze_model()
"""
# Wrapping synchronous calls with async
return self.run(task, *args, **kwargs)
def __call__(self, prompt_text: str): def __call__(self, prompt_text: str):
""" """
@ -212,3 +235,23 @@ class HuggingfaceLLM:
except Exception as e: except Exception as e:
self.logger.error(f"Failed to generate the text: {e}") self.logger.error(f"Failed to generate the text: {e}")
raise raise
async def __call_async__(self, task: str, *args, **kwargs) -> str:
"""Call the model asynchronously""" ""
return await self.run_async(task, *args, **kwargs)
def save_model(self, path: str):
self.model.save_pretrained(path)
self.tokenizer.save_pretrained(path)
def gpu_available(self) -> bool:
return torch.cuda.is_available()
def memory_consumption(self) -> dict:
if self.gpu_available():
torch.cuda.synchronize()
allocated = torch.cuda.memory_allocated()
reserved = torch.cuda.memory_reserved()
return {'allocated': allocated, 'reserved': reserved}
else:
return {'error': 'GPU not available'}

@ -0,0 +1,177 @@
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import logging
class MPT7B:
"""
MPT class for generating text using a pre-trained model.
Args:
model_name (str): Name of the model to use.
tokenizer_name (str): Name of the tokenizer to use.
max_tokens (int): Maximum number of tokens to generate.
Attributes:
model_name (str): Name of the model to use.
tokenizer_name (str): Name of the tokenizer to use.
tokenizer (transformers.AutoTokenizer): Tokenizer object.
model (transformers.AutoModelForCausalLM): Model object.
pipe (transformers.pipelines.TextGenerationPipeline): Text generation pipeline.
max_tokens (int): Maximum number of tokens to generate.
Examples:
>>>
"""
def __init__(self, model_name: str, tokenizer_name: str, max_tokens: int = 100):
# Loading model and tokenizer details
self.model_name = model_name
self.tokenizer_name = tokenizer_name
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
# Setup logging
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger(__name__)
config = AutoModelForCausalLM.from_pretrained(
model_name, trust_remote_code=True
).config
self.model = AutoModelForCausalLM.from_pretrained(
model_name, config=config, trust_remote_code=True
)
# Initializing a text-generation pipeline
self.pipe = pipeline(
"text-generation",
model=self.model,
tokenizer=self.tokenizer,
device="cuda:0",
)
self.max_tokens = max_tokens
def run(self, task: str, *args, **kwargs) -> str:
"""
Run the model
Args:
task (str): Task to run.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Examples:
>>> mpt_instance = MPT('mosaicml/mpt-7b-storywriter', "EleutherAI/gpt-neox-20b", max_tokens=150)
>>> mpt_instance("generate", "Once upon a time in a land far, far away...")
'Once upon a time in a land far, far away...'
>>> mpt_instance.batch_generate(["In the deep jungles,", "At the heart of the city,"], temperature=0.7)
['In the deep jungles,',
'At the heart of the city,']
>>> mpt_instance.freeze_model()
>>> mpt_instance.unfreeze_model()
"""
if task == "generate":
return self.generate(*args, **kwargs)
else:
raise ValueError(f"Task '{task}' not recognized!")
async def run_async(self, task: str, *args, **kwargs) -> str:
"""
Run the model asynchronously
Args:
task (str): Task to run.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Examples:
>>> mpt_instance = MPT('mosaicml/mpt-7b-storywriter', "EleutherAI/gpt-neox-20b", max_tokens=150)
>>> mpt_instance("generate", "Once upon a time in a land far, far away...")
'Once upon a time in a land far, far away...'
>>> mpt_instance.batch_generate(["In the deep jungles,", "At the heart of the city,"], temperature=0.7)
['In the deep jungles,',
'At the heart of the city,']
>>> mpt_instance.freeze_model()
>>> mpt_instance.unfreeze_model()
"""
# Wrapping synchronous calls with async
return self.run(task, *args, **kwargs)
def generate(self, prompt: str) -> str:
"""
Generate Text
Args:
prompt (str): Prompt to generate text from.
Examples:
"""
with torch.autocast("cuda", dtype=torch.bfloat16):
return self.pipe(
prompt, max_new_tokens=self.max_tokens, do_sample=True, use_cache=True
)[0]["generated_text"]
async def generate_async(self, prompt: str) -> str:
"""Generate Async"""
return self.generate(prompt)
def __call__(self, task: str, *args, **kwargs) -> str:
"""Call the model"""
return self.run(task, *args, **kwargs)
async def __call_async__(self, task: str, *args, **kwargs) -> str:
"""Call the model asynchronously""" ""
return await self.run_async(task, *args, **kwargs)
def batch_generate(self, prompts: list, temperature: float = 1.0) -> list:
"""Batch generate text"""
self.logger.info(f"Generating text for {len(prompts)} prompts...")
results = []
with torch.autocast("cuda", dtype=torch.bfloat16):
for prompt in prompts:
result = self.pipe(
prompt,
max_new_tokens=self.max_tokens,
do_sample=True,
use_cache=True,
temperature=temperature,
)
results.append(result[0]["generated_text"])
return results
def unfreeze_model(self):
"""Unfreeze the model"""
for param in self.model.parameters():
param.requires_grad = True
self.logger.info("Model has been unfrozen.")
# # Example usage:
# mpt_instance = MPT(
# "mosaicml/mpt-7b-storywriter", "EleutherAI/gpt-neox-20b", max_tokens=150
# )
# # For synchronous calls
# print(mpt_instance("generate", "Once upon a time in a land far, far away..."))
# For asynchronous calls, use an event loop or similar async framework
# For example:
# # import asyncio
# # asyncio.run(mpt_instance.__call_async__("generate", "Once upon a time in a land far, far away..."))
# # Example usage:
# mpt_instance = MPT('mosaicml/mpt-7b-storywriter', "EleutherAI/gpt-neox-20b", max_tokens=150)
# # For synchronous calls
# print(mpt_instance("generate", "Once upon a time in a land far, far away..."))
# print(mpt_instance.batch_generate(["In the deep jungles,", "At the heart of the city,"], temperature=0.7))
# # Freezing and unfreezing the model
# mpt_instance.freeze_model()
# mpt_instance.unfreeze_model()

File diff suppressed because it is too large Load Diff

@ -0,0 +1,214 @@
import logging
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
class WizardLLMStoryTeller:
"""
A class for running inference on a given model.
Attributes:
model_id (str): The ID of the model.
device (str): The device to run the model on (either 'cuda' or 'cpu').
max_length (int): The maximum length of the output sequence.
quantize (bool, optional): Whether to use quantization. Defaults to False.
quantization_config (dict, optional): The configuration for quantization.
verbose (bool, optional): Whether to print verbose logs. Defaults to False.
logger (logging.Logger, optional): The logger to use. Defaults to a basic logger.
# Usage
```
from finetuning_suite import Inference
model_id = "gpt2-small"
inference = Inference(model_id=model_id)
prompt_text = "Once upon a time"
generated_text = inference(prompt_text)
print(generated_text)
```
"""
def __init__(
self,
model_id: str = "TheBloke/WizardLM-Uncensored-SuperCOT-StoryTelling-30B-GGUF",
device: str = None,
max_length: int = 500,
quantize: bool = False,
quantization_config: dict = None,
verbose=False,
# logger=None,
distributed=False,
decoding=False,
):
self.logger = logging.getLogger(__name__)
self.device = (
device if device else ("cuda" if torch.cuda.is_available() else "cpu")
)
self.model_id = model_id
self.max_length = max_length
self.verbose = verbose
self.distributed = distributed
self.decoding = decoding
self.model, self.tokenizer = None, None
# self.log = Logging()
if self.distributed:
assert (
torch.cuda.device_count() > 1
), "You need more than 1 gpu for distributed processing"
bnb_config = None
if quantize:
if not quantization_config:
quantization_config = {
"load_in_4bit": True,
"bnb_4bit_use_double_quant": True,
"bnb_4bit_quant_type": "nf4",
"bnb_4bit_compute_dtype": torch.bfloat16,
}
bnb_config = BitsAndBytesConfig(**quantization_config)
try:
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
self.model = AutoModelForCausalLM.from_pretrained(
self.model_id, quantization_config=bnb_config
)
self.model # .to(self.device)
except Exception as e:
self.logger.error(f"Failed to load the model or the tokenizer: {e}")
raise
def load_model(self):
"""Load the model"""
if not self.model or not self.tokenizer:
try:
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
bnb_config = (
BitsAndBytesConfig(**self.quantization_config)
if self.quantization_config
else None
)
self.model = AutoModelForCausalLM.from_pretrained(
self.model_id, quantization_config=bnb_config
).to(self.device)
if self.distributed:
self.model = DDP(self.model)
except Exception as error:
self.logger.error(f"Failed to load the model or the tokenizer: {error}")
raise
def run(self, prompt_text: str):
"""
Generate a response based on the prompt text.
Args:
- prompt_text (str): Text to prompt the model.
- max_length (int): Maximum length of the response.
Returns:
- Generated text (str).
"""
self.load_model()
max_length = self.max_length
try:
inputs = self.tokenizer.encode(prompt_text, return_tensors="pt").to(
self.device
)
# self.log.start()
if self.decoding:
with torch.no_grad():
for _ in range(max_length):
output_sequence = []
outputs = self.model.generate(
inputs, max_length=len(inputs) + 1, do_sample=True
)
output_tokens = outputs[0][-1]
output_sequence.append(output_tokens.item())
# print token in real-time
print(
self.tokenizer.decode(
[output_tokens], skip_special_tokens=True
),
end="",
flush=True,
)
inputs = outputs
else:
with torch.no_grad():
outputs = self.model.generate(
inputs, max_length=max_length, do_sample=True
)
del inputs
return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
except Exception as e:
self.logger.error(f"Failed to generate the text: {e}")
raise
def __call__(self, prompt_text: str):
"""
Generate a response based on the prompt text.
Args:
- prompt_text (str): Text to prompt the model.
- max_length (int): Maximum length of the response.
Returns:
- Generated text (str).
"""
self.load_model()
max_length = self.max_
try:
inputs = self.tokenizer.encode(prompt_text, return_tensors="pt").to(
self.device
)
# self.log.start()
if self.decoding:
with torch.no_grad():
for _ in range(max_length):
output_sequence = []
outputs = self.model.generate(
inputs, max_length=len(inputs) + 1, do_sample=True
)
output_tokens = outputs[0][-1]
output_sequence.append(output_tokens.item())
# print token in real-time
print(
self.tokenizer.decode(
[output_tokens], skip_special_tokens=True
),
end="",
flush=True,
)
inputs = outputs
else:
with torch.no_grad():
outputs = self.model.generate(
inputs, max_length=max_length, do_sample=True
)
del inputs
return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
except Exception as e:
self.logger.error(f"Failed to generate the text: {e}")
raise

@ -27,7 +27,13 @@ class HumanMessage(Message):
A Message from a human. A Message from a human.
""" """
def __init__(self, content: str, role: str = "Human", additional_kwargs: Dict = None, example: bool = False): def __init__(
self,
content: str,
role: str = "Human",
additional_kwargs: Dict = None,
example: bool = False,
):
super().__init__(content, role, additional_kwargs) super().__init__(content, role, additional_kwargs)
self.example = example self.example = example
@ -40,7 +46,13 @@ class AIMessage(Message):
A Message from an AI. A Message from an AI.
""" """
def __init__(self, content: str, role: str = "AI", additional_kwargs: Dict = None, example: bool = False): def __init__(
self,
content: str,
role: str = "AI",
additional_kwargs: Dict = None,
example: bool = False,
):
super().__init__(content, role, additional_kwargs) super().__init__(content, role, additional_kwargs)
self.example = example self.example = example
@ -54,7 +66,9 @@ class SystemMessage(Message):
of input messages. of input messages.
""" """
def __init__(self, content: str, role: str = "System", additional_kwargs: Dict = None): def __init__(
self, content: str, role: str = "System", additional_kwargs: Dict = None
):
super().__init__(content, role, additional_kwargs) super().__init__(content, role, additional_kwargs)
def get_type(self) -> str: def get_type(self) -> str:
@ -66,7 +80,13 @@ class FunctionMessage(Message):
A Message for passing the result of executing a function back to a model. A Message for passing the result of executing a function back to a model.
""" """
def __init__(self, content: str, role: str = "Function", name: str, additional_kwargs: Dict = None): def __init__(
self,
content: str,
role: str = "Function",
name: str = None,
additional_kwargs: Dict = None,
):
super().__init__(content, role, additional_kwargs) super().__init__(content, role, additional_kwargs)
self.name = name self.name = name

@ -0,0 +1,13 @@
def task_planner_prompt(objective):
return f"""
You are a planner who is an expert at coming up with a todo list for a given objective.
useful for when you need to come up with todo lists.
Input: an objective to create a todo list for. Output: a todo list for that objective. For the main objective
layout each import subtask that needs to be accomplished and provide all subtasks with a ranking system prioritizing the
most important subtasks first that are likely to accomplish the main objective. Use the following ranking system:
0.0 -> 1.0, 1.0 being the most important subtask.
Please be very clear what the objective is!"Come up with a todo list for this objective: {objective}
"""

@ -1,7 +1,5 @@
from swarms.structs.workflow import Workflow from swarms.structs.workflow import Workflow
from swarms.structs.task import Task from swarms.structs.task import Task
from swarms.structs.flow import Flow
__all__ = [ __all__ = ["Workflow", "Task", "Flow"]
"Workflow",
"Task",
]

@ -0,0 +1,91 @@
from __future__ import annotations
import asyncio
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Literal, Sequence
from langchain.load.serializable import Serializable
from pydantic import Field
class Document(Serializable):
"""Class for storing a piece of text and associated metadata."""
page_content: str
"""String text."""
metadata: dict = Field(default_factory=dict)
"""Arbitrary metadata about the page content (e.g., source, relationships to other
documents, etc.).
"""
type: Literal["Document"] = "Document"
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
class BaseDocumentTransformer(ABC):
"""Abstract base class for document transformation systems.
A document transformation system takes a sequence of Documents and returns a
sequence of transformed Documents.
Example:
.. code-block:: python
class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
embeddings: Embeddings
similarity_fn: Callable = cosine_similarity
similarity_threshold: float = 0.95
class Config:
arbitrary_types_allowed = True
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
stateful_documents = get_stateful_documents(documents)
embedded_documents = _get_embeddings_from_stateful_docs(
self.embeddings, stateful_documents
)
included_idxs = _filter_similar_embeddings(
embedded_documents, self.similarity_fn, self.similarity_threshold
)
return [stateful_documents[i] for i in sorted(included_idxs)]
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
raise NotImplementedError
""" # noqa: E501
@abstractmethod
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Transform a list of documents.
Args:
documents: A sequence of Documents to be transformed.
Returns:
A list of transformed Documents.
"""
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Asynchronously transform a list of documents.
Args:
documents: A sequence of Documents to be transformed.
Returns:
A list of transformed Documents.
"""
return await asyncio.get_running_loop().run_in_executor(
None, partial(self.transform_documents, **kwargs), documents
)

@ -1,7 +1,51 @@
"""
Flow,
A chain like structure from langchain that provides the autonomy to language models
to generate sequential responses.
Features:
* User defined queries
* Dynamic keep generating until <DONE> is outputted by the agent
* Interactive, AI generates, then user input
* Message history and performance history fed -> into context
* Ability to save and load flows
* Ability to provide feedback on responses
* Ability to provide a stopping condition
* Ability to provide a retry mechanism
* Ability to provide a loop interval
----------------------------------
Example:
from swarms.models import OpenAIChat
from swarms.structs import Flow
# Initialize the language model,
# This model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC
llm = OpenAIChat(
openai_api_key=api_key,
temperature=0.5,
)
# Initialize the flow
flow = Flow(
llm=llm, max_loops=5,
#system_prompt=SYSTEM_PROMPT,
#retry_interval=1,
)
flow.run("Generate a 10,000 word blog")
# Now save the flow
flow.save("path/flow.yaml")
"""
import json import json
import logging import logging
import time import time
from typing import Any, Callable, Dict, List, Optional, Path, Union from typing import Any, Callable, Dict, List, Optional, Union
from pathlib import Path
import yaml import yaml
@ -12,86 +56,11 @@ def stop_when_repeats(response: str) -> bool:
return "Stop" in response.lower() return "Stop" in response.lower()
# class Flow:
# def __init__(
# self,
# llm: Any,
# template: str,
# max_loops: int = 1,
# stopping_condition: Optional[Callable[[str], bool]] = None,
# **kwargs: Any
# ):
# self.llm = llm
# self.template = template
# self.max_loops = max_loops
# self.stopping_condition = stopping_condition
# self.feedback = []
# self.history = []
# def __call__(
# self,
# prompt,
# **kwargs
# ) -> str:
# """Invoke the flow by providing a template and it's variables"""
# response = self.llm(prompt, **kwargs)
# return response
# def _check_stopping_condition(self, response: str) -> bool:
# """Check if the stopping condition is met"""
# if self.stopping_condition:
# return self.stopping_condition(response)
# return False
# def provide_feedback(self, feedback: str) -> None:
# """Allow users to to provide feedback on the responses"""
# feedback = self.feedback.append(feedback)
# return feedback
# def format_prompt(self, **kwargs: Any) -> str:
# """Format the template with the provided kwargs using f string interpolation"""
# return self.template.format(**kwargs)
# def _generate(self, formatted_prompts: str) -> str:
# """
# Generate a result using the lm
# """
# return self.llm(formatted_prompts)
# def run(self, **kwargs: Any) -> str:
# """Generate a result using the provided keyword args"""
# prompt = self.format_prompt(**kwargs)
# response = self._generate(prompt)
# return response
# def bulk_run(
# self,
# inputs: List[Dict[str, Any]]
# ) -> List[str]:
# """Generate responses for multiple input sets"""
# return [self.run(**input_data) for input_data in inputs]
# @staticmethod
# def from_llm_and_template(llm: Any, template: str) -> "Flow":
# """Create FlowStream from LLM and a string template"""
# return Flow(llm=llm, template=template)
# @staticmethod
# def from_llm_and_template_file(llm: Any, template_file: str) -> "Flow":
# """Create FlowStream from LLM and a template file"""
# with open(template_file, "r") as f:
# template = f.read()
# return Flow(llm=llm, template=template)
class Flow: class Flow:
def __init__( def __init__(
self, self,
llm: Any, llm: Any,
template: str, # template: str,
max_loops: int = 1, max_loops: int = 1,
stopping_condition: Optional[Callable[[str], bool]] = None, stopping_condition: Optional[Callable[[str], bool]] = None,
loop_interval: int = 1, loop_interval: int = 1,
@ -100,7 +69,7 @@ class Flow:
**kwargs: Any, **kwargs: Any,
): ):
self.llm = llm self.llm = llm
self.template = template # self.template = template
self.max_loops = max_loops self.max_loops = max_loops
self.stopping_condition = stopping_condition self.stopping_condition = stopping_condition
self.loop_interval = loop_interval self.loop_interval = loop_interval
@ -125,15 +94,15 @@ class Flow:
response = self.llm(prompt, **kwargs) response = self.llm(prompt, **kwargs)
return response return response
def format_prompt(self, **kwargs: Any) -> str: def format_prompt(self, template, **kwargs: Any) -> str:
"""Format the template with the provided kwargs using f-string interpolation.""" """Format the template with the provided kwargs using f-string interpolation."""
return self.template.format(**kwargs) return template.format(**kwargs)
def _generate(self, task: str, formatted_prompts: str) -> str: def run(self, task: str): # formatted_prompts: str) -> str:
""" """
Generate a result using the lm with optional query loops and stopping conditions. Generate a result using the lm with optional query loops and stopping conditions.
""" """
response = formatted_prompts response = task
history = [task] history = [task]
for _ in range(self.max_loops): for _ in range(self.max_loops):
if self._check_stopping_condition(response): if self._check_stopping_condition(response):
@ -152,7 +121,7 @@ class Flow:
time.sleep(self.loop_interval) time.sleep(self.loop_interval)
return response, history return response, history
def run(self, **kwargs: Any) -> str: def _run(self, **kwargs: Any) -> str:
"""Generate a result using the provided keyword args.""" """Generate a result using the provided keyword args."""
task = self.format_prompt(**kwargs) task = self.format_prompt(**kwargs)
response, history = self._generate(task, task) response, history = self._generate(task, task)
@ -175,7 +144,7 @@ class Flow:
template = f.read() template = f.read()
return Flow(llm=llm, template=template) return Flow(llm=llm, template=template)
def save(self, file_path: Union[Path, str]) -> None: def save(self, file_path) -> None:
"""Save the flow. """Save the flow.
Expects `Flow._flow_type` property to be implemented and for memory to be Expects `Flow._flow_type` property to be implemented and for memory to be

@ -24,7 +24,7 @@ class BaseTask(ABC):
self.parent_ids: List[str] = [] self.parent_ids: List[str] = []
self.child_ids: List[str] = [] self.child_ids: List[str] = []
self.output: Optional[Union[Artifact, ErrorArtifact]] = None self.output: Optional[Union[Artifact, ErrorArtifact]] = None
self.structure: Optional["Structure"] = None self.structure = None
@property @property
@abstractmethod @abstractmethod
@ -45,7 +45,7 @@ class BaseTask(ABC):
def __lshift__(self, child: BaseTask) -> BaseTask: def __lshift__(self, child: BaseTask) -> BaseTask:
return self.add_parent(child) return self.add_parent(child)
def preprocess(self, structure: "Structure") -> BaseTask: def preprocess(self, structure) -> BaseTask:
self.structure = structure self.structure = structure
return self return self

@ -0,0 +1,187 @@
from swarms.embeddings.simple_ada import get_ada_embeddings
import chromadb
from swarms.models.openai_models import OpenAIChat
# Vectordb
client = chromadb.Client()
collection = client.create_collection(name="swarm")
def add_to_vectordb(task):
"""
Add some text documents to the collection
Chroma will store your text, and handle tokenization, embedding, and indexing automatically.
"""
docs = collection.add(documents=[task], metadatas=[{"source": "agent1"}], ids=["1"])
return docs
def query_vectordb(query: str):
results = collection.query(query_texts=[query], n_results=1)
return results
# Test
TASK_TEXT = """
11.3.1 Einsteins A and B Coefficients
Picture a container of atoms, of them in the lower state , and of them in the upper state . Let A be the spontaneous emission rate,14 so that the number of particles leaving the upper state by this process, per unit time, is .15 The transition rate for stimulated emission, as we have seen (Equation 11.54), is proportional to the energy density of the electromagnetic field: , where ; the number of particles leaving the upper state by this mechanism, per unit time, is . The absorption rate is likewise proportional to call it ; the number of particles per unit time joining the upper level is therefore . All told, then,
(11.55) Suppose these atoms are in thermal equilibrium with the ambient field, so that the number of particles in
each level is constant. In that case , and it follows that
(11.56) On the other hand, we know from statistical mechanics16 that the number of particles with energy E, in
thermal equilibrium at temperature T, is proportional to the Boltzmann factor, , so
(11.57)
and hence
But Plancks blackbody formula17 tells us the energy density of thermal radiation:
comparing the two expressions, we conclude that
and
(11.58)
(11.59)
(11.60)
(11.61)
Equation 11.60 confirms what we already knew: the transition rate for stimulated emission is the same as for absorption. But it was an astonishing result in 1917indeed, Einstein was forced to invent stimulated emission in order to reproduce Plancks formula. Our present attention, however, focuses on Equation 11.61, for this tells us the spontaneous emission rate which is what we are looking forin terms of the stimulated emission rate which we already know. From Equation 11.54 we read off
(11.62)
530
and it follows that the spontaneous emission rate is
(11.63)
Problem 11.10 As a mechanism for downward transitions, spontaneous emission competes with thermally stimulated emission (stimulated emission for which
blackbody radiation is the source). Show that at room temperature ( thermal stimulation dominates for frequencies well below spontaneous emission dominates for frequencies well above mechanism dominates for visible light?
K) Hz, whereas Hz. Which
Problem 11.11 You could derive the spontaneous emission rate (Equation 11.63) without the detour through Einsteins A and B coefficients if you knew the ground state energy density of the electromagnetic field, , for then it would simply be a case of stimulated emission (Equation 11.54). To do this honestly would require quantum electrodynamics, but if you are prepared to believe that the ground state consists of one photon in each classical mode, then the derivation is fairly simple:
(a)
To obtain the classical modes, consider an empty cubical box, of side l, with one corner at the origin. Electromagnetic fields (in vacuum) satisfy the classical wave equation18
where f stands for any component of E or of B. Show that separation of variables, and the imposition of the boundary condition on all six surfaces yields the standing wave patterns
with
There are two modes for each triplet of positive integers , corresponding to the two polarization states.
The energy of a photon is (Equation 4.92), so the energy in the mode is
What, then, is the total energy per unit volume in the frequency range 531
(b)
(c)
What, then, is the total energy per unit volume in the frequency range , if each mode gets one photon? Express your answer in the form
and read off . Hint: refer to Figure 5.3.
Use your result, together with Equation 11.54, to obtain the spontaneous
emission rate. Compare Equation 11.63.
532
11.3.2 The Lifetime of an Excited State
Equation 11.63 is our fundamental result; it gives the transition rate for spontaneous emission. Suppose, now, that you have somehow pumped a large number of atoms into the excited state. As a result of spontaneous emission, this number will decrease as time goes on; specifically, in a time interval dt you will lose a fraction A dt of them:
(assuming there is no mechanism to replenish the supply).19 Solving for , we find:
evidently the number remaining in the excited state decreases exponentially, with a time constant
We call this the lifetime of the statetechnically, it is the time it takes for to reach initial value.
(11.64)
(11.65)
(11.66) of its
I have assumed all along that there are only two states for the system, but this was just for notational simplicitythe spontaneous emission formula (Equation 11.63) gives the transition rate for
regardless of what other states may be accessible (see Problem 11.24). Typically, an excited atom has many different decay modes (that is: can decay to a large number of different lower-energy states, , , , ...). In that case the transition rates add, and the net lifetime is
(11.67)
Example 11.1
Suppose a charge q is attached to a spring and constrained to oscillate along the x axis. Say it starts out in the state (Equation 2.68), and decays by spontaneous emission to state . From Equation 11.51 we have
You calculated the matrix elements of x back in Problem 3.39:
where ω is the natural frequency of the oscillator (I no longer need this letter for the frequency of the stimulating radiation). But were talking about emission, so must be lower than n; for our purposes, then,
(11.68)
Evidently transitions occur only to states one step lower on the ladder, and the frequency of the 533
Evidently transitions occur only to states one step lower on the ladder, and the frequency of the photon emitted is
(11.69) Not surprisingly, the system radiates at the classical oscillator frequency. The transition rate
(Equation 11.63) is
and the lifetime of the nth stationary state is
Meanwhile, each radiated photon carries an energy , so the power radiated is :
(11.70)
(11.71)
(11.72)
or, since the energy of an oscillator in the nth state is
,
This is the average power radiated by a quantum oscillator with (initial) energy E.
For comparison, lets determine the average power radiated by a classical oscillator with the same energy. According to classical electrodynamics, the power radiated by an accelerating charge q is given
by the Larmor formula:20
(11.73)
For a harmonic oscillator with amplitude , , and the acceleration is . Averaging over a full cycle, then,
But the energy of the oscillator is , so , and hence
(11.74)
This is the average power radiated by a classical oscillator with energy E. In the classical limit
the classical and quantum formulas agree;21 however, the quantum formula (Equation 11.72) protects the ground state: If the oscillator does not radiate.
534
Problem 11.12 The half-life of an excited state is the time it would take for half the atoms in a large sample to make a transition. Find the relation between
and τ (the lifetime of the state).
Problem 11.13 Calculate the lifetime (in seconds) for each of the four
states of hydrogen. Hint: Youll need to evaluate matrix elements of the form , , and so on. Remember that ,
, and . Most of these integrals are zero, so inspect them closely before you start calculating. Answer: seconds for all except , which is infinite.
535
11.3.3 Selection Rules
The calculation of spontaneous emission rates has been reduced to a matter of evaluating matrix elements of the form
As you will have discovered if you worked Problem 11.13, (if you didnt, go back right now and do so!) these quantities are very often zero, and it would be helpful to know in advance when this is going to happen, so we dont waste a lot of time evaluating unnecessary integrals. Suppose we are interested in systems like hydrogen, for which the Hamiltonian is spherically symmetrical. In that case we can specify the states with the usual quantum numbers n, , and m, and the matrix elements are
Now, r is a vector operator, and we can invoke the results of Chapter 6 to obtain the selection rules22 (11.75)
These conditions follow from symmetry alone. If they are not met, then the matrix element is zero, and the transition is said to be forbidden. Moreover, it follows from Equations 6.566.58 that
(11.76)
So it is never necessary to compute the matrix elements of both x and y; you can always get one from the other.
Evidently not all transitions to lower-energy states can proceed by electric dipole radiation; most are forbidden by the selection rules. The scheme of allowed transitions for the first four Bohr levels in hydrogen is shown in Figure 11.9. Notice that the state is stuck: it cannot decay, because there is no lower- energy state with . It is called a metastable state, and its lifetime is indeed much longer than that of, for example, the states , , and . Metastable states do eventually decay, by collisions, or by forbidden transitions (Problem 11.31), or by multiphoton emission.
Figure 11.9: Allowed decays for the first four Bohr levels in hydrogen. 536
Problem 11.14 From the commutators of with x, y, and z (Equation 4.122): (11.77)
obtain the selection rule for and Equation 11.76. Hint: Sandwich each commutator between and .
Problem 11.15 Obtain the selection rule for as follows:
(a)
Derive the commutation relation
Hint: First show that
Use this, and (in the final step) the fact that demonstrate that
The generalization from z to r is trivial. Sandwich this commutator between
the implications.
(b)
(11.78)
, to
and , and work out
Problem 11.16 An electron in the , ,
by a sequence of (electric dipole) transitions to the ground state.
(a)
(b) (c)
state of hydrogen decays What decay routes are open to it? Specify them in the following way:
If you had a bottle full of atoms in this state, what fraction of them would decay via each route?
What is the lifetime of this state? Hint: Once its made the first transition, its no longer in the state , so only the first step in each sequence is relevant in computing the lifetime.
537
11.4 Fermis Golden Rule
In the previous sections we considered transitions between two discrete energy states, such as two bound states of an atom. We saw that such a transition was most likely when the final energy satisfied the resonance condition: , where ω is the frequency associated with the perturbation. I now want to look at the case where falls in a continuum of states (Figure 11.10). To stick close to the example of Section 11.2, if the radiation is energetic enough it can ionize the atomthe photoelectric effectexciting the electron from a bound state into the continuum of scattering states.
Figure 11.10: A transition (a) between two discrete states and (b) between a discrete state and a continuum of states.
We cant talk about a transition to a precise state in that continuum (any more than we can talk about someone being precisely 16 years old), but we can compute the probability that the system makes a transition to a state with an energy in some finite range about . That is given by the integral of Equation 11.35 over all the final states:
(11.79)
where . The quantity is the number of states with energy between E and ; is called the density of states, and Ill show you how its calculated in Example 11.2.
At short times, Equation 11.79 leads to a transition probability proportional to , just as for a transition between discrete states. On the other hand, at long times the quantity in curly brackets in Equation 11.79 is sharply peaked: as a function of its maximum occurs at and the central peak has a width of . For sufficiently large t, we can therefore approximate Equation 11.79 as23
The remaining integral was already evaluated in Section 11.2.3:
The oscillatory behavior of P has again been washed out, giving a constant transition rate:24 538
(11.80)
Equation 11.81 is known as Fermis Golden Rule.25 Apart from the factor of , it says that the transition rate is the square of the matrix element (this encapsulates all the relevant information about the dynamics of the process) times the density of states (how many final states are accessible, given the energy supplied by the perturbationthe more roads are open, the faster the traffic will flow). It makes sense.
Example 11.2
Use Fermis Golden Rule to obtain the differential scattering cross-section for a particle of mass m and incident wave vector scattering from a potential (Figure 11.11).
Figure11.11: Aparticlewithincidentwavevector isscatteredintoastatewithwavevectork. Solution:
We take our initial and final states to be plane waves:
(11.82)
Here Ive used a technique called box normalization; I place the whole setup inside a box of length l on a side. This makes the free-particle states normalizable and countable. Formally, we want the limit
; in practice l will drop out of our final expression. Using periodic boundary conditions,26 the allowed values of are
(11.83)
for integers , , and . Our pertu
"""
# insert into vectordb
added = add_to_vectordb(TASK_TEXT)
print(f"added to db: {added}")
# # Init LLM
# llm = OpenAIChat(
# openai_api_key=""
# )
# Query vectordb
query = "What are einsteins coefficients?"
task = str(query_vectordb(query)["documents"][0])
print(f"task: {task}")
# # # Send the query back into the llm
# response = llm(task)
# print(response)

@ -1,4 +1,4 @@
from swarms.utils.display_markdown import display_markdown_message from swarms.utils.display_markdown import display_markdown_message
from swarms.utils.futures import execute_futures_dict from swarms.utils.futures import execute_futures_dict
from swarms.utils.code_interpreter import SubprocessCodeInterpreter from swarms.utils.code_interpreter import SubprocessCodeInterpreter
from swarms.utils.parse_code import extract_code_in_backticks_in_string from swarms.utils.parse_code import extract_code_in_backticks_in_string

@ -7,5 +7,5 @@ def extract_code_in_backticks_in_string(message: str) -> str:
""" """
pattern = r"`` ``(.*?)`` " # Non-greedy match between six backticks pattern = r"`` ``(.*?)`` " # Non-greedy match between six backticks
match = re.search(pattern, message, re.DOTALL) # re.DOTALL to match newline chars match = re.search(pattern, message, re.DOTALL) # re.DOTALL to match newline chars
return match.group(1).strip() if match else None return match.group(1).strip() if match else None

@ -0,0 +1,68 @@
import pytest
from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms.models.mpt import MPT7B
def test_mpt7b_init():
mpt = MPT7B(
"mosaicml/mpt-7b-storywriter", "EleutherAI/gpt-neox-20b", max_tokens=150
)
assert isinstance(mpt, MPT7B)
assert mpt.model_name == "mosaicml/mpt-7b-storywriter"
assert mpt.tokenizer_name == "EleutherAI/gpt-neox-20b"
assert isinstance(mpt.tokenizer, AutoTokenizer)
assert isinstance(mpt.model, AutoModelForCausalLM)
assert mpt.max_tokens == 150
def test_mpt7b_run():
mpt = MPT7B(
"mosaicml/mpt-7b-storywriter", "EleutherAI/gpt-neox-20b", max_tokens=150
)
output = mpt.run("generate", "Once upon a time in a land far, far away...")
assert isinstance(output, str)
assert output.startswith("Once upon a time in a land far, far away...")
def test_mpt7b_run_invalid_task():
mpt = MPT7B(
"mosaicml/mpt-7b-storywriter", "EleutherAI/gpt-neox-20b", max_tokens=150
)
with pytest.raises(ValueError):
mpt.run("invalid_task", "Once upon a time in a land far, far away...")
def test_mpt7b_generate():
mpt = MPT7B(
"mosaicml/mpt-7b-storywriter", "EleutherAI/gpt-neox-20b", max_tokens=150
)
output = mpt.generate("Once upon a time in a land far, far away...")
assert isinstance(output, str)
assert output.startswith("Once upon a time in a land far, far away...")
def test_mpt7b_batch_generate():
mpt = MPT7B(
"mosaicml/mpt-7b-storywriter", "EleutherAI/gpt-neox-20b", max_tokens=150
)
prompts = ["In the deep jungles,", "At the heart of the city,"]
outputs = mpt.batch_generate(prompts, temperature=0.7)
assert isinstance(outputs, list)
assert len(outputs) == len(prompts)
for output in outputs:
assert isinstance(output, str)
def test_mpt7b_unfreeze_model():
mpt = MPT7B(
"mosaicml/mpt-7b-storywriter", "EleutherAI/gpt-neox-20b", max_tokens=150
)
mpt.unfreeze_model()
for param in mpt.model.parameters():
assert param.requires_grad
Loading…
Cancel
Save