Queue, Conversations

pull/3/head
killian 12 months ago
parent 036ee268dd
commit 54a2236b6c

@ -17,6 +17,21 @@
ws = new WebSocket("ws://localhost:8000/");
ws.onopen = function(event) {
console.log("Connected to WebSocket server.");
ws.onmessage = function (event) {
if (lastMessageElement == null) {
lastMessageElement = document.createElement('p');
document.getElementById('messages').appendChild(lastMessageElement);
}
var data = JSON.parse(event.data);
if (data.hasOwnProperty('content')) {
if (data.type == 'code') {
lastMessageElement.innerHTML += '<pre>' + data.content + '</pre>';
} else {
lastMessageElement.innerHTML += data.content;
}
}
};
};
ws.onerror = function(error) {
console.log("WebSocket error: ", error);
@ -28,13 +43,7 @@
}
connectWebSocket();
var lastMessageElement = null;
ws.onmessage = function (event) {
if (lastMessageElement == null) {
lastMessageElement = document.createElement('p');
document.getElementById('messages').appendChild(lastMessageElement);
}
lastMessageElement.innerHTML += event.data;
};
function sendMessage(event) {
event.preventDefault();
var input = document.getElementById("messageInput");

@ -0,0 +1 @@
[{"role": "user", "type": "message", "content": "uh"}, {"role": "assistant", "type": "message", "content": "Hello! How can I assist you today?"}, {"role": "user", "type": "message", "content": "hello"}, {"role": "assistant", "type": "message", "content": "Hi there! What can I help you with today? Can we start by planning your day or there's something specific you have in mind?"}]

@ -14,7 +14,7 @@ import os
import glob
def check_queue():
queue_files = glob.glob("/queue/*.json")
queue_files = glob.glob("interpreter/queue/*.json")
if queue_files:
with open(queue_files[0], 'r') as file:
data = json.load(file)
@ -24,15 +24,15 @@ def check_queue():
return None
def save_conversation(messages):
with open('/conversations/user.json', 'w') as file:
with open('interpreter/conversations/user.json', 'w') as file:
json.dump(messages, file)
def load_conversation():
try:
with open('/conversations/user.json', 'r') as file:
with open('interpreter/conversations/user.json', 'r') as file:
messages = json.load(file)
return messages
except FileNotFoundError:
except (FileNotFoundError, json.JSONDecodeError):
return []
def main(interpreter):
@ -42,55 +42,72 @@ def main(interpreter):
@app.websocket("/")
async def i_test(websocket: WebSocket):
await websocket.accept()
data = None
while True:
data = await websocket.receive_text()
while data.strip().lower() != "stop": # Stop command
task = asyncio.create_task(websocket.receive_text())
# This would be terrible for production. Just for testing.
try:
data_dict = json.loads(data)
if set(data_dict.keys()) == {"role", "content", "type"} or set(
data_dict.keys()
) == {"role", "content", "type", "format"}:
data = data_dict
except json.JSONDecodeError:
pass
for response in interpreter.chat(
message=data, stream=True, display=False
):
# Check queue
# This is the task for waiting for the user to send any message at all.
task = asyncio.create_task(websocket.receive_text())
if data == None: # Data will have stuff in it if we inturrupted it.
while True:
# Has the user sent a message?
if task.done():
data = task.result()
break
# Has the queue recieved a message?
queued_message = check_queue()
if queued_message:
data = queued_message
break
if task.done():
data = task.result() # Get the new message
break # Break the loop and start processing the new message
# Send out assistant message chunks
if (
response.get("type") == "message"
and response["role"] == "assistant"
and "content" in response
):
await websocket.send_text(response["content"])
await asyncio.sleep(0.01) # Add a small delay
# If it just finished sending an assistant message, send a newline. Otherwise it looks weird.
if (
response.get("type") == "message"
and response["role"] == "assistant"
and response.get("end") == True
):
await websocket.send_text("\n")
await asyncio.sleep(0.01) # Add a small delay
if not task.done():
data = (
await task
) # Wait for the next message if it hasn't arrived yet
# Wait 0.2 seconds
await asyncio.sleep(0.2)
### FOR DEV ONLY: SIMULATE LMC MESSAGES
# This lets users simulate any kind of LMC message by passing a JSON into the textbox in index.html.
try:
data_dict = json.loads(data)
data = data_dict
except json.JSONDecodeError:
pass
### CONVERSATION / DISC MANAGEMENT
user_message = {"role": "user", "type": "message", "content": data}
messages = load_conversation()
messages.append(user_message)
save_conversation(messages)
### RESPONDING
# This is the task for waiting for user inturruptions.
task = asyncio.create_task(websocket.receive_text())
for chunk in interpreter.chat(
messages, stream=True, display=True
):
print(chunk)
# Check queue
queued_message = check_queue()
if queued_message:
data = queued_message
break
# Check for new user messages
if task.done():
data = task.result() # Get the new message
break # Break the loop and start processing the new message
# Send out chunks
await websocket.send_json(chunk)
await asyncio.sleep(0.01) # Add a small delay
# If the interpreter just finished sending a message, save it
if "end" in chunk:
save_conversation(interpreter.messages)
data = None
uvicorn.run(app, host="0.0.0.0", port=8000)

@ -1,5 +0,0 @@
{
"role": "computer",
"type": "message",
"content": "Your 10:00am alarm has gone off."
}

@ -50,7 +50,8 @@ interpreter.system_message = system_message
for file in glob.glob('/tools/*.py'):
with open(file, 'r') as f:
interpreter.computer.run("python", f.read())
for chunk in interpreter.computer.run("python", f.read()):
print(chunk)
### LLM SETTINGS

@ -1358,13 +1358,13 @@ files = [
[[package]]
name = "litellm"
version = "1.20.0"
version = "1.20.1"
description = "Library to easily interface with LLM API providers"
optional = false
python-versions = ">=3.8, !=2.7.*, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*, !=3.7.*"
files = [
{file = "litellm-1.20.0-py3-none-any.whl", hash = "sha256:c90bb88d30307f67849b9c234de2cfd082ab8a259cacb76079cebba6fa27ce06"},
{file = "litellm-1.20.0.tar.gz", hash = "sha256:91793c455d94c6999942765be2fe86ec5fe85615f110f499464e33bea15b82ac"},
{file = "litellm-1.20.1-py3-none-any.whl", hash = "sha256:83a63c2fde88d3cd11ba963da79ce18c22deb316bf9579fefea86b3116f743ba"},
{file = "litellm-1.20.1.tar.gz", hash = "sha256:cb14c567187e2e6fa06a396111701dfe5a4bd3a0ccebd8104a23d7b87c97b2c4"},
]
[package.dependencies]

@ -8,8 +8,8 @@ license = "AGPL"
[tool.poetry.dependencies]
python = "^3.11"
open-interpreter = "^0.2.0"
fastapi = "^0.109.0"
uvicorn = {extras = ["standard"], version = "^0.27.0"}
fastapi = "^0.109.0"
[build-system]

Loading…
Cancel
Save