Pseudocode for new OS structure

pull/3/head
killian 11 months ago
parent b9f4e92ef0
commit 849282ab50

BIN
OS/01/.DS_Store vendored

Binary file not shown.

@ -1 +0,0 @@
The app is responsible for accepting user input, sending it to "/" as an [LMC message](https://docs.openinterpreter.com/protocols/lmc-messages), then recieving [streaming LMC messages](https://docs.openinterpreter.com/guides/streaming-response) and somehow displaying them to the user.

@ -1,61 +0,0 @@
<!DOCTYPE html>
<html>
<head>
<title>Chat</title>
</head>
<body>
<form action="" onsubmit="sendMessage(event)">
<textarea id="messageInput" rows="10" cols="50" autocomplete="off"></textarea>
<button>Send</button>
</form>
<div id="messages"></div>
<script>
var ws;
function connectWebSocket() {
ws = new WebSocket("ws://localhost:8000/");
ws.onopen = function(event) {
console.log("Connected to WebSocket server.");
ws.onmessage = function (event) {
if (lastMessageElement == null) {
lastMessageElement = document.createElement('p');
document.getElementById('messages').appendChild(lastMessageElement);
}
var data = JSON.parse(event.data);
if (data.hasOwnProperty('content')) {
lastMessageElement.innerHTML += data.content;
}
};
};
ws.onerror = function(error) {
console.log("WebSocket error: ", error);
};
ws.onclose = function(event) {
console.log("WebSocket connection closed. Retrying in 1 second...");
setTimeout(connectWebSocket, 1000);
};
}
connectWebSocket();
var lastMessageElement = null;
function sendMessage(event) {
event.preventDefault();
var input = document.getElementById("messageInput");
var message = input.value;
if (message.startsWith('{') && message.endsWith('}')) {
message = JSON.stringify(JSON.parse(message));
}
ws.send(message);
var userMessageElement = document.createElement('p');
userMessageElement.innerHTML = '<b>' + input.value + '</b><br>';
document.getElementById('messages').appendChild(userMessageElement);
lastMessageElement = document.createElement('p');
document.getElementById('messages').appendChild(lastMessageElement);
input.value = '';
}
</script>
</body>
</html>

@ -0,0 +1,28 @@
while True:
message = None
while message is None:
message = get_from_queue('to_main')
if message == user_start_message:
continue
messages = get_conversation_history()
messages.append(message)
save_conversation_history(message)
sentence = ""
for chunk in interpreter.chat(messages):
if queue_length() > 0:
save_conversation_history(interpreter.messages)
break
send_to_io(chunk)
sentence += chunk
if is_full_sentence(sentence):
audio = tts(sentence)
sentence = ""
send_to_io(audio)

@ -0,0 +1,5 @@
@app.post("/i/")
async def i(request: Request):
message = await request.json()
message = to_lmc(message)
r.lpush("to_main", message)

@ -1,184 +0,0 @@
"""
Responsible for taking an interpreter, then serving it at "/" as a websocket, accepting and streaming LMC Messages.
https://docs.openinterpreter.com/protocols/lmc-messages
Also needs to be saving conversations, and checking the queue.
"""
from typing import Optional, Tuple
import uvicorn
from fastapi import FastAPI, WebSocket
import asyncio
import json
import os
import glob
from interpreter.core.core import OpenInterpreter
def check_queue() -> dict:
queue_files = glob.glob("interpreter/queue/*.json")
if queue_files:
with open(queue_files[0], 'r') as file:
data = json.load(file)
os.remove(queue_files[0])
return data
def save_conversation(messages):
with open('interpreter/conversations/user.json', 'w') as file:
json.dump(messages, file)
def load_conversation():
try:
with open('interpreter/conversations/user.json', 'r') as file:
messages = json.load(file)
return messages
except (FileNotFoundError, json.JSONDecodeError):
return []
def main(interpreter: OpenInterpreter):
app = FastAPI()
@app.websocket("/")
async def i_test(websocket: WebSocket):
await websocket.accept()
data = None
while True:
# This is the task for waiting for the user to send any message at all.
try:
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
except:
pass
task = asyncio.create_task(websocket.receive_text())
if data == None: # Data will have stuff in it if we inturrupted it.
while data == None:
# Has the user sent a message?
if task.done():
try:
data = {"role": "user", "type": "message", "content": task.result()}
except Exception as e:
print(e)
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
# Has the queue recieved a message?
queued_message = check_queue()
if queued_message:
data = queued_message
await asyncio.sleep(0.2)
### CONVERSATION / DISC MANAGEMENT
message = data
messages = load_conversation()
messages.append(message)
save_conversation(messages)
### RESPONDING
# This is the task for waiting for user inturruptions.
try:
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
except:
pass
task = asyncio.create_task(websocket.receive_text())
recieved_chunks = []
for chunk in interpreter.chat(
messages, stream=True, display=True
):
recieved_chunks.append(chunk)
# Has the user sent a message?
if task.done():
try:
data = {"role": "user", "type": "message", "content": task.result()}
except Exception as e:
print(e)
task.cancel() # The user didn't inturrupt
try:
await task
except asyncio.CancelledError:
pass
save_conversation(interpreter.messages)
break
# Has the queue recieved a message?
queued_message = check_queue()
if queued_message:
data = queued_message
save_conversation(interpreter.messages)
break
# Send out chunks
await websocket.send_json(chunk)
await asyncio.sleep(0.01) # Add a small delay
# If the interpreter just finished sending a message, save it
if "end" in chunk:
save_conversation(interpreter.messages)
data = None
if not any([message["type"] == "code" for message in recieved_chunks]):
for chunk in interpreter.chat(
"Did you need to run code? It's okay if not, but please do if you did.", stream=True, display=True
):
# Has the user sent a message?
if task.done():
try:
data = {"role": "user", "type": "message", "content": task.result()}
except Exception as e:
print(e)
task.cancel() # The user didn't inturrupt
try:
await task
except asyncio.CancelledError:
pass
save_conversation(interpreter.messages)
break
# Has the queue recieved a message?
queued_message = check_queue()
if queued_message:
data = queued_message
save_conversation(interpreter.messages)
break
# Send out chunks
await websocket.send_json(chunk)
await asyncio.sleep(0.01) # Add a small delay
# If the interpreter just finished sending a message, save it
if "end" in chunk:
save_conversation(interpreter.messages)
data = None
if not task.done():
task.cancel() # User didn't inturrupt
try:
await task
except asyncio.CancelledError:
pass
uvicorn.run(app, host="0.0.0.0", port=8000)

@ -0,0 +1,11 @@
last_timestamp = time.time()
while True:
messages = get_dmesg(after=last_timestamp)
last_timestamp = time.time()
for message in messages:
if passes_filter(message)
send_to_main(to_lmc(message))
time.sleep(1)

3333
OS/01/core/poetry.lock generated

File diff suppressed because it is too large Load Diff

@ -1,17 +0,0 @@
[tool.poetry]
name = "01-core"
version = "0.0.1"
description = "The python at the heart of the 01."
authors = ["Open Interpreter <killian@openinterpreter.com>"]
license = "AGPL"
[tool.poetry.dependencies]
python = "^3.10"
open-interpreter = "^0.2.0"
uvicorn = {extras = ["standard"], version = "^0.27.0"}
fastapi = "*"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"

@ -1,8 +0,0 @@
### START THE LANGUAGE MODEL
# Disabled as we're starting with hosted models
# python llm/start.py
### START THE INTERPRETER
python interpreter/start.py

@ -1,8 +1,8 @@
""" """
Responsible for configuring an interpreter, then using main.py to serve it at "/". Responsible for configuring an interpreter, then running core.py.
""" """
from main import main from core import main
from interpreter import interpreter from interpreter import interpreter
import os import os
import glob import glob
@ -80,6 +80,6 @@ with open('interpreter/conversations/user.json', 'w') as file:
json.dump([], file) json.dump([], file)
### SERVE INTERPRETER AT "/" ### START CORE
main(interpreter) main(interpreter)

@ -2,6 +2,10 @@ import threading
from datetime import datetime from datetime import datetime
import json import json
import time import time
import redis
# Connect to Redis
r = redis.Redis()
def add_message_to_queue(message): def add_message_to_queue(message):
@ -13,10 +17,8 @@ def add_message_to_queue(message):
"content": message "content": message
}) })
# Write the JSON data to the file # Add the message to the 'to_main' queue
timestamp = str(int(time.time())) r.rpush('to_main', message_json)
with open(f"interpreter/queue/{timestamp}.json", "w") as file:
file.write(message_json)
def schedule(dt, message): def schedule(dt, message):
# Calculate the delay in seconds # Calculate the delay in seconds

@ -0,0 +1,16 @@
<div class="centered-circle"></div>
<script>
ws = new WebSocket("ws://localhost/server")
ws.onmessage = event => {
if (event.data == "user_start_message") {
document.body.style.backgroundColor = "white"
document.querySelector('.centered-circle')
.style.backgroundColor = "black"
} else if (event.data == "user_end_message") {
document.body.style.backgroundColor = "black"
document.querySelector('.centered-circle')
.style.backgroundColor = "white"
}
}
</script>

@ -0,0 +1,27 @@
while True:
if button.is_pressed():
send_to_main(user_start_message)
send_to_websocket(user_start_message)
audio_chunks = []
for audio_chunk in listen():
audio_chunks.append(chunk)
if not button.is_pressed():
break
text = stt(audio_chunks)
send_to_main(text)
send_to_websocket(user_end_message)
chunk = get_from_queue('to_io')
if chunk:
send_to_websocket(chunk)
sentence += chunk["content"]
if is_full_sentence(sentence)
tts(sentence)
sentence = []
message = check_websocket()
if message:
send_to_main(message)

@ -0,0 +1 @@
pip install git+https://github.com/KillianLucas/open-interpreter.git

@ -1,13 +1,45 @@
### Start whisper.cpp and stuff? ### SETUP
### APP # INSTALL REQUIREMENTS
sudo apt-get update
sudo apt-get install redis-server
pip install -r requirements.txt
# START REDIS
redis-cli -h localhost -p 6379 rpush to_interface ""
redis-cli -h localhost -p 6379 rpush to_core ""
open app/index.html
# ^ This should be to run it in fullscreen / kiosk mode
### CORE ### CORE
cd core/ # START KERNEL WATCHER
pip install poetry
poetry install python core/kernel_watcher.py &
poetry run bash start.sh
# START SST AND TTS SERVICES
# (todo)
# (i think we should start with hosted services)
# START LLM
# (disabled, we'll start with hosted services)
# python core/llm/start.py &
# START CORE
python core/start_core.py &
### INTERFACE
# START INTERFACE
python interface/interface.py &
# START DISPLAY
# (this should be changed to run it in fullscreen / kiosk mode)
open interface/display.html
Loading…
Cancel
Save