diff --git a/OS/01/core/core.py b/OS/01/core/core.py index 2cb207b..1947da3 100644 --- a/OS/01/core/core.py +++ b/OS/01/core/core.py @@ -1,28 +1,38 @@ -while True: +import redis +import json +import time - message = None - while message is None: - message = get_from_queue('to_main') +# Set up Redis connection +r = redis.Redis(host='localhost', port=6379, db=0) - if message == user_start_message: - continue +def main(interpreter): - messages = get_conversation_history() - messages.append(message) - save_conversation_history(message) - - sentence = "" + while True: - for chunk in interpreter.chat(messages): - - if queue_length() > 0: - save_conversation_history(interpreter.messages) - break + # Check 10x a second for new messages + message = None + while message is None: + message = r.lpop('to_core') + time.sleep(0.1) + + # Custom stop message will halt us + if message.get("content") and message.get("content").lower().strip(".,!") == "stop": + continue - send_to_io(chunk) + # Load, append, and save conversation history + with open("conversations/user.json", "r") as file: + messages = json.load(file) + messages.append(message) + with open("conversations/user.json", "w") as file: + json.dump(messages, file) + + for chunk in interpreter.chat(messages): - sentence += chunk - if is_full_sentence(sentence): - audio = tts(sentence) - sentence = "" - send_to_io(audio) \ No newline at end of file + # Send it to the interface + r.rpush('to_interface', chunk) + + # If we have a new message, save our progress and go back to the top + if r.llen('to_main') > 0: + with open("conversations/user.json", "w") as file: + json.dump(interpreter.messages, file) + break diff --git a/OS/01/interface/interface.py b/OS/01/interface/interface.py index 10352f2..41a920f 100644 --- a/OS/01/interface/interface.py +++ b/OS/01/interface/interface.py @@ -25,10 +25,6 @@ sample_rate = 44100 # Hz # Set up Redis connection r = redis.Redis(host='localhost', port=6379, db=0) -# Define some standard, useful messages -user_start_message = {"role": "user", "type": "message", "start": True} -user_start_message = {"role": "user", "type": "message", "start": True} - # Set up websocket connection websocket = websockets.connect('ws://localhost:8765') @@ -54,9 +50,9 @@ def main(): # If the button is pushed down if not GPIO.input(18): - # Send start message to core and websocket - r.rpush('to_core', user_start_message) - send_to_websocket(user_start_message) + # Tell websocket and core that the user is speaking + send_to_websocket({"role": "user", "type": "message", "start": True}) # Standard start flag, required per streaming LMC protocol (https://docs.openinterpreter.com/guides/streaming-response) + r.rpush('to_core', {"role": "user", "type": "message", "content": "stop"}) # Custom stop message. Core is not streaming LMC (it's static LMC) so doesn't require that ^ flag # Record audio from the microphone in chunks audio_chunks = [] @@ -75,6 +71,9 @@ def main(): # Send message to core and websocket r.rpush('to_core', message) send_to_websocket(message) + + # Send user message end flag to websocket, required per streaming LMC protocol + send_to_websocket({"role": "user", "type": "message", "end": True}) # Send out anything in the to_interface queue chunk = r.lpop('to_interface')