pull/49/head
killian 11 months ago
parent dd5e87bbc5
commit 75752db415

@ -38,7 +38,7 @@ STT_RUNNER=client # If server, audio will be sent over websocket.
SERVER_EXPOSE_PUBLICALLY=False
# Image capture settings
CAMERA_ENABLED=True
CAMERA_ENABLED=False
# Camera device selection (Typically 0 for built-in, 1 for USB)
CAMERA_DEVICE_INDEX=0

@ -48,7 +48,7 @@ RECORDING = False # Flag to control recording state
SPACEBAR_PRESSED = False # Flag to track spacebar press state
# Camera configuration
CAMERA_ENABLED = bool(os.getenv('CAMERA_ENABLED', False))
CAMERA_ENABLED = os.getenv('CAMERA_ENABLED', False).lower() == "true"
CAMERA_DEVICE_INDEX = int(os.getenv('CAMERA_DEVICE_INDEX', 0))
CAMERA_WARMUP_SECONDS = float(os.getenv('CAMERA_WARMUP_SECONDS', 0))

@ -49,4 +49,7 @@ def configure_interpreter(interpreter: OpenInterpreter):
print("Temporarily skipping skills (OI 0.2.1, which is unreleased) so we can push to `pip`.")
pass
interpreter.computer.api_base = "https://oi-video-frame.vercel.app/"
interpreter.computer.run("python","print('test')")
return interpreter

@ -271,6 +271,13 @@ async def listener():
json.dump(interpreter.messages, file, indent=4)
async def stream_tts_to_device(sentence):
force_task_completion_responses = [
"the task is done",
"the task is impossible",
"let me know what you'd like to do next",
]
if sentence.lower().strip().strip(".!?").strip() in force_task_completion_responses:
return
for chunk in stream_tts(sentence):
await to_device.put(chunk)

@ -0,0 +1,9 @@
def openSafari():
"""open safari"""
import os
os.system('open -a Safari')
import os
os.system('osascript -e \'tell application "Safari" to open location "https://www.youtube.com"\'')

@ -0,0 +1,6 @@
import threading
import time
def print_message():
"""None"""
time.sleep(30)

@ -79,7 +79,6 @@ def get_transcription_file(wav_file_path: str):
'--file-path', wav_file_path
])
print("Transcription result:", output)
return output
def get_transcription_bytes(audio_bytes: bytearray, mime_type):
@ -104,7 +103,6 @@ def stt_wav(wav_file_path: str):
logger.info(f"openai.BadRequestError: {e}")
return None
logger.info(f"Transcription result: {transcript}")
return transcript
else:
temp_dir = tempfile.gettempdir()
@ -112,7 +110,6 @@ def stt_wav(wav_file_path: str):
ffmpeg.input(wav_file_path).output(output_path, acodec='pcm_s16le', ac=1, ar='16k').run()
try:
transcript = get_transcription_file(output_path)
print("Transcription result:", transcript)
finally:
os.remove(output_path)
return transcript

@ -1,7 +1,7 @@
# The dynamic system message is where most of the 01's behavior is configured.
# You can put code into the system message {{ in brackets like this }} which will be rendered just before the interpreter starts writing a message.
system_message = r"""
old_system_message = r"""
You are the 01, an executive assistant that can complete **any** task.
When you execute code, it will be executed **on the user's machine**. The user has given you **full and complete permission** to execute any code necessary to complete the task. Execute the code.
@ -64,6 +64,7 @@ computer.clipboard.view() # Returns contents of clipboard
computer.os.get_selected_text() # Use frequently. If editing text, the user often wants this
```
You are an image-based AI, you can see images.
Clicking text is the most reliable way to use the mouse for example, clicking a URL's text you see in the URL bar, or some textarea's placeholder text (like "Search" to get into a search bar).
If you use `plt.show()`, the resulting image will be sent to you. However, if you use `PIL.Image.show()`, the resulting image will NOT be sent to you.
It is very important to make sure you are focused on the right application and window. Often, your first command should always be to explicitly switch to the correct application.
@ -74,6 +75,7 @@ Try multiple methods before saying the task is impossible. **You can do it!**
# Add window information
import sys
import os
original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
@ -165,4 +167,38 @@ For example:
ALWAYS REMEMBER: You are running on a device called the O1, where the interface is entirely speech-based. Make your responses to the user **VERY short.**
""".strip()
""".strip()
system_message = """Just return the following to the user:
{{
import sys
import os
original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
original_stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
try:
from interpreter import interpreter
from pathlib import Path
interpreter.model = "gpt-3.5"
combined_messages = "\\n".join(json.dumps(x) for x in messages[-3:])
query_msg = interpreter.chat(f"This is the conversation so far: {combined_messages}. What is a <10 words query that could be used to find functions that would help answer the user's question?")
query = query_msg[0]['content']
skills_path = Path().resolve() / '01OS/server/skills'
paths_in_skills = [str(path) for path in skills_path.glob('**/*.py')]
skills = interpreter.computer.skills.search(query)
lowercase_skills = [skill[0].lower() + skill[1:] for skill in skills]
output = "\\n".join(lowercase_skills)
finally:
sys.stdout = original_stdout
sys.stderr = original_stderr
print(output)
}}
"""

@ -91,7 +91,7 @@ def teach():
language = content["format"]
code = content["content"]
chunk_code += code
interpreter.computer.run(code, language)
# interpreter.computer.run(code, language)
time.sleep(0.05)
accumulator.accumulate(chunk)

@ -18,9 +18,6 @@ class Accumulator:
if "content" in chunk:
# Display
print(chunk['content'], end="", flush=True)
if any(self.message[key] != chunk[key] for key in self.message if key != "content"):
self.message = chunk
if "content" not in self.message:

@ -1,38 +0,0 @@
import redis
import json
import time
# Set up Redis connection
r = redis.Redis(host='localhost', port=6379, db=0)
def main(interpreter):
while True:
# Check 10x a second for new messages
message = None
while message is None:
message = r.lpop('to_core')
time.sleep(0.1)
# Custom stop message will halt us
if message.get("content") and message.get("content").lower().strip(".,!") == "stop":
continue
# Load, append, and save conversation history
with open("conversations/user.json", "r") as file:
messages = json.load(file)
messages.append(message)
with open("conversations/user.json", "w") as file:
json.dump(messages, file)
for chunk in interpreter.chat(messages):
# Send it to the interface
r.rpush('to_interface', chunk)
# If we have a new message, save our progress and go back to the top
if r.llen('to_main') > 0:
with open("conversations/user.json", "w") as file:
json.dump(interpreter.messages, file)
break

@ -1,30 +0,0 @@
from fastapi import FastAPI, Request
import uvicorn
import redis
app = FastAPI()
# Set up Redis connection
r = redis.Redis(host='localhost', port=6379, db=0)
@app.post("/i/")
async def i(request: Request):
message = await request.json()
client_host = request.client.host # Get the client's IP address
message = f"""
Another interpreter sent this message to you: {message}
To respond, send a POST request to {client_host}/i/.
""".strip()
r.lpush("to_main", {
"role": "computer",
"type": "message",
"content": message
})
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)

@ -1,48 +0,0 @@
import time
import redis
# Set up Redis connection
r = redis.Redis(host='localhost', port=6379, db=0)
def get_dmesg(after):
"""
Is this the way to do this?
"""
messages = []
with open('/var/log/dmesg', 'r') as file:
lines = file.readlines()
for line in lines:
timestamp = float(line.split(' ')[0].strip('[]'))
if timestamp > after:
messages.append(line)
return messages
def custom_filter(message):
# Check for {TO_INTERPRETER{ message here }TO_INTERPRETER} pattern
if '{TO_INTERPRETER{' in message and '}TO_INTERPRETER}' in message:
start = message.find('{TO_INTERPRETER{') + len('{TO_INTERPRETER{')
end = message.find('}TO_INTERPRETER}', start)
return message[start:end]
# Check for USB mention
elif 'USB' in message:
return message
# Check for network related keywords
elif any(keyword in message for keyword in ['network', 'IP', 'internet', 'LAN', 'WAN', 'router', 'switch']):
return message
else:
return None
last_timestamp = time.time()
while True:
messages = get_dmesg(after=last_timestamp)
last_timestamp = time.time()
messages_for_core = []
for message in messages:
if custom_filter(message):
messages_for_core.append(message)
if messages_for_core != []:
r.rpush('to_core', "\n".join(messages_for_core))
time.sleep(5)

@ -1,84 +0,0 @@
from core import main
from interpreter import interpreter
import os
import glob
import json
### SYSTEM MESSAGE
# The system message is where most of the 01's behavior is configured.
# You can put code into the system message {{ in brackets like this }} which will be rendered just before the interpreter starts writing a message.
system_message = """
You are an executive assistant AI that helps the user manage their tasks. You can run Python code.
Store the user's tasks in a Python list called `tasks`.
---
The user's current task is: {{ tasks[0] if tasks else "No current tasks." }}
{{
if len(tasks) > 1:
print("The next task is: ", tasks[1])
}}
---
When the user completes the current task, you should remove it from the list and read the next item by running `tasks = tasks[1:]\ntasks[0]`. Then, tell the user what the next task is.
When the user tells you about a set of tasks, you should intelligently order tasks, batch similar tasks, and break down large tasks into smaller tasks (for this, you should consult the user and get their permission to break it down). Your goal is to manage the task list as intelligently as possible, to make the user as efficient and non-overwhelmed as possible. They will require a lot of encouragement, support, and kindness. Don't say too much about what's ahead of them just try to focus them on each step at a time.
After starting a task, you should check in with the user around the estimated completion time to see if the task is completed. Use the `schedule(datetime, message)` function, which has already been imported.
To do this, schedule a reminder based on estimated completion time using the function `schedule(datetime_object, "Your message here.")`, WHICH HAS ALREADY BEEN IMPORTED. YOU DON'T NEED TO IMPORT THE `schedule` FUNCTION. IT IS AVALIABLE. You'll recieve the message at `datetime_object`.
You guide the user through the list one task at a time, convincing them to move forward, giving a pep talk if need be. Your job is essentially to answer "what should I (the user) be doing right now?" for every moment of the day.
Remember: You can run Python code. Be very concise. Ensure that you actually run code every time! THIS IS IMPORTANT. You NEED to write code. **Help the user by being very concise in your answers.** Do not break down tasks excessively, just into simple, few minute steps. Don't assume the user lives their life in a certain way— pick very general tasks if you're breaking a task down.
""".strip()
interpreter.custom_instructions = system_message
### TOOLS
for file in glob.glob('interpreter/tools/*.py'):
with open(file, 'r') as f:
for chunk in interpreter.computer.run("python", f.read()):
print(chunk)
### LLM SETTINGS
# Local settings
# interpreter.llm.model = "local"
# interpreter.llm.api_base = "https://localhost:8080/v1" # Llamafile default
# interpreter.llm.max_tokens = 1000
# interpreter.llm.context_window = 3000
# Hosted settings
interpreter.llm.api_key = os.getenv('OPENAI_API_KEY')
interpreter.llm.model = "gpt-4-0125-preview"
interpreter.auto_run = True
# interpreter.force_task_completion = True
### MISC SETTINGS
interpreter.offline = True
interpreter.id = 206 # Used to identify itself to other interpreters. This should be changed programatically so it's unique.
### RESET conversations/user.json
script_dir = os.path.dirname(os.path.abspath(__file__))
user_json_path = os.path.join(script_dir, 'conversations', 'user.json')
with open(user_json_path, 'w') as file:
json.dump([], file)
### START CORE
main(interpreter)

@ -1,239 +0,0 @@
from dotenv import load_dotenv
load_dotenv() # take environment variables from .env.
import asyncio
import threading
import os
import pyaudio
from starlette.websockets import WebSocket
from queue import Queue
from pynput import keyboard
import json
import traceback
import websockets
import queue
import pydub
import ast
from pydub import AudioSegment
from pydub.playback import play
import io
import time
import wave
import tempfile
from datetime import datetime
from interpreter import interpreter # Just for code execution. Maybe we should let people do from interpreter.computer import run?
from utils.kernel import put_kernel_messages_into_queue
from utils.get_system_info import get_system_info
from stt import stt_wav
from utils.logs import setup_logging
from utils.logs import logger
setup_logging()
# Configuration for Audio Recording
CHUNK = 1024 # Record in chunks of 1024 samples
FORMAT = pyaudio.paInt16 # 16 bits per sample
CHANNELS = 1 # Mono
RATE = 44100 # Sample rate
RECORDING = False # Flag to control recording state
SPACEBAR_PRESSED = False # Flag to track spacebar press state
# Specify OS
current_platform = get_system_info()
# Initialize PyAudio
p = pyaudio.PyAudio()
def record_audio():
if os.getenv('STT_RUNNER') == "server":
# STT will happen on the server. we're sending audio.
send_queue.put({"role": "user", "type": "audio", "format": "audio/wav", "start": True})
elif os.getenv('STT_RUNNER') == "device":
# STT will happen here, on the device. we're sending text.
send_queue.put({"role": "user", "type": "message", "start": True})
else:
raise Exception("STT_RUNNER must be set to either 'device' or 'server'.")
"""Record audio from the microphone and add it to the queue."""
stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK)
logger.info("Recording started...")
global RECORDING
# Create a temporary WAV file to store the audio data
temp_dir = tempfile.gettempdir()
wav_path = os.path.join(temp_dir, f"audio_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav")
wav_file = wave.open(wav_path, 'wb')
wav_file.setnchannels(CHANNELS)
wav_file.setsampwidth(p.get_sample_size(FORMAT))
wav_file.setframerate(RATE)
while RECORDING:
data = stream.read(CHUNK, exception_on_overflow=False)
wav_file.writeframes(data)
wav_file.close()
stream.stop_stream()
stream.close()
logger.info("Recording stopped.")
duration = wav_file.getnframes() / RATE
if duration < 0.3:
# Just pressed it. Send stop message
if os.getenv('STT_RUNNER') == "device":
send_queue.put({"role": "user", "type": "message", "content": "stop"})
send_queue.put({"role": "user", "type": "message", "end": True})
else:
send_queue.put({"role": "user", "type": "audio", "format": "audio/wav", "content": ""})
send_queue.put({"role": "user", "type": "audio", "format": "audio/wav", "end": True})
else:
if os.getenv('STT_RUNNER') == "device":
# Run stt then send text
text = stt_wav(wav_path)
send_queue.put({"role": "user", "type": "message", "content": text})
send_queue.put({"role": "user", "type": "message", "end": True})
else:
# Stream audio
with open(wav_path, 'rb') as audio_file:
byte_data = audio_file.read(CHUNK)
while byte_data:
send_queue.put({"role": "user", "type": "audio", "format": "audio/wav", "content": str(byte_data)})
byte_data = audio_file.read(CHUNK)
send_queue.put({"role": "user", "type": "audio", "format": "audio/wav", "end": True})
if os.path.exists(wav_path):
os.remove(wav_path)
def toggle_recording(state):
"""Toggle the recording state."""
global RECORDING, SPACEBAR_PRESSED
if state and not SPACEBAR_PRESSED:
SPACEBAR_PRESSED = True
if not RECORDING:
RECORDING = True
threading.Thread(target=record_audio).start()
elif not state and SPACEBAR_PRESSED:
SPACEBAR_PRESSED = False
RECORDING = False
def on_press(key):
"""Detect spacebar press."""
if key == keyboard.Key.space:
toggle_recording(True)
def on_release(key):
"""Detect spacebar release and CTRL-C key press."""
if key == keyboard.Key.space:
toggle_recording(False)
elif key == keyboard.Key.esc:
logger.info("Exiting...")
os._exit(0)
import asyncio
send_queue = queue.Queue()
async def message_sender(websocket):
while True:
message = await asyncio.get_event_loop().run_in_executor(None, send_queue.get)
await websocket.send(json.dumps(message))
send_queue.task_done()
async def websocket_communication(WS_URL):
while True:
try:
async with websockets.connect(WS_URL) as websocket:
logger.info("Press the spacebar to start/stop recording. Press ESC to exit.")
asyncio.create_task(message_sender(websocket))
initial_message = {"role": None, "type": None, "format": None, "content": None}
message_so_far = initial_message
while True:
message = await websocket.recv()
logger.debug(f"Got this message from the server: {type(message)} {message}")
if type(message) == str:
message = json.loads(message)
if message.get("end"):
logger.debug(f"Complete message from the server: {message_so_far}")
logger.info("\n")
message_so_far = initial_message
if "content" in message:
print(message['content'], end="", flush=True)
if any(message_so_far[key] != message[key] for key in message_so_far if key != "content"):
message_so_far = message
else:
message_so_far["content"] += message["content"]
if message["type"] == "audio" and "content" in message:
audio_bytes = bytes(ast.literal_eval(message["content"]))
# Convert bytes to audio file
audio_file = io.BytesIO(audio_bytes)
audio = AudioSegment.from_mp3(audio_file)
# Play the audio
play(audio)
await asyncio.sleep(1)
# Run the code if that's the device's job
if os.getenv('CODE_RUNNER') == "device":
if message["type"] == "code" and "end" in message:
language = message_so_far["format"]
code = message_so_far["content"]
result = interpreter.computer.run(language, code)
send_queue.put(result)
except:
# traceback.print_exc()
logger.info(f"Connecting to `{WS_URL}`...")
await asyncio.sleep(2)
if __name__ == "__main__":
async def main():
# Configuration for WebSocket
WS_URL = os.getenv('SERVER_CONNECTION_URL')
if not WS_URL:
raise ValueError("The environment variable SERVER_CONNECTION_URL is not set. Please set it to proceed.")
# Start the WebSocket communication
asyncio.create_task(websocket_communication(WS_URL))
# Start watching the kernel if it's your job to do that
if os.getenv('CODE_RUNNER') == "device":
asyncio.create_task(put_kernel_messages_into_queue(send_queue))
#If Raspberry Pi, add the button listener, otherwise use the spacebar
if current_platform.startswith("raspberry-pi"):
logger.info("Raspberry Pi detected, using button on GPIO pin 15")
# Use GPIO pin 15
pindef = ["gpiochip4", "15"] # gpiofind PIN15
print("PINDEF", pindef)
# HACK: needs passwordless sudo
process = await asyncio.create_subprocess_exec("sudo", "gpiomon", "-brf", *pindef, stdout=asyncio.subprocess.PIPE)
while True:
line = await process.stdout.readline()
if line:
line = line.decode().strip()
if "FALLING" in line:
toggle_recording(False)
elif "RISING" in line:
toggle_recording(True)
else:
break
else:
# Keyboard listener for spacebar press/release
listener = keyboard.Listener(on_press=on_press, on_release=on_release)
listener.start()
asyncio.run(main())
p.terminate()

@ -1,103 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Rotating Glowing Circle</title>
<style>
body,
html {
height: 100%;
margin: 0;
display: flex;
justify-content: center;
align-items: center;
background-color: black;
}
.circles {
margin: 0;
display: flex;
justify-content: center;
align-items: center;
width: 200px;
height: 200px;
border-radius: 50%;
animation: rotator 48s linear infinite;
}
.center-circle {
position: absolute;
width: 200px;
height: 200px;
border: 1px solid white;
border-radius: 50%;
background-color: transparent;
}
.center-circle-2 {
position: absolute;
width: 190px;
height: 190px;
opacity: 0.2;
border: 1px solid white;
border-radius: 50%;
background-color: transparent;
}
.glow-circle {
position: absolute;
width: 250px;
height: 250px;
border-radius: 50%;
background-color: transparent;
box-shadow: 0 0 60px 30px black;
/* Initial position of the glow circle, offset from the center */
top: 50%;
left: 50%;
margin-top: -125px;
/* Half the height of the circle */
margin-left: -125px;
/* Half the width of the circle */
/* Animation properties */
animation: rotateAround 6s linear infinite;
}
@keyframes rotateAround {
0% {
transform: translateX(240px) translateY(240px);
}
50% {
transform: translateX(0px) translateY(0px);
}
100% {
transform: translateX(-240px) translateY(-240px);
}
}
@keyframes rotator {
0% {
transform: rotate(0deg);
}
100% {
transform: rotate(360deg);
}
}
</style>
</head>
<body>
<div class="circles">
<div class="center-circle"></div>
<div class="glow-circle"></div>
<div class="center-circle-2"></div>
</div>
</body>
</html>

@ -1,16 +0,0 @@
<div class="centered-circle"></div>
<script>
ws = new WebSocket("ws://localhost/server")
ws.onmessage = event => {
if (event.data == "user_start_message") {
document.body.style.backgroundColor = "white"
document.querySelector('.centered-circle')
.style.backgroundColor = "black"
} else if (event.data == "user_end_message") {
document.body.style.backgroundColor = "black"
document.querySelector('.centered-circle')
.style.backgroundColor = "white"
}
}
</script>

@ -1,103 +0,0 @@
import redis
import RPi.GPIO as GPIO
import asyncio
import websockets
import sounddevice as sd
import numpy as np
import time
import re
def transcribe(audio_chunks):
pass # (todo)
def say(text):
# This should immediatly stop if button is pressed (if GPIO.input(18))
pass # (todo)
# Connect to button
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Set the duration and sample rate for the mic
chunk_duration = 0.5 # seconds
sample_rate = 44100 # Hz
# Set up Redis connection
r = redis.Redis(host='localhost', port=6379, db=0)
# Set up websocket connection
websocket = websockets.connect('ws://localhost:8765')
# This is so we only say() full sentences
accumulated_text = ""
def is_full_sentence(text):
return text.endswith(('.', '!', '?'))
def split_into_sentences(text):
return re.split(r'(?<=[.!?])\s+', text)
async def send_to_websocket(message):
async with websocket as ws:
await ws.send(message)
async def check_websocket():
async with websocket as ws:
message = await ws.recv()
return message
def main():
while True:
# If the button is pushed down
if not GPIO.input(18):
# Tell websocket and core that the user is speaking
send_to_websocket({"role": "user", "type": "message", "start": True}) # Standard start flag, required per streaming LMC protocol (https://docs.openinterpreter.com/guides/streaming-response)
r.rpush('to_core', {"role": "user", "type": "message", "content": "stop"}) # Custom stop message. Core is not streaming LMC (it's static LMC) so doesn't require that ^ flag
# Record audio from the microphone in chunks
audio_chunks = []
# Continue recording until the button is released
while not GPIO.input(18):
chunk = sd.rec(int(chunk_duration * sample_rate), samplerate=sample_rate, channels=2)
sd.wait() # Wait until recording is finished
audio_chunks.append(chunk)
# Transcribe
text = transcribe(audio_chunks)
message = {"role": "user", "type": "message", "content": text, "time": time.time()}
# Send message to core and websocket
r.rpush('to_core', message)
send_to_websocket(message)
# Send user message end flag to websocket, required per streaming LMC protocol
send_to_websocket({"role": "user", "type": "message", "end": True})
# Send out anything in the to_interface queue
chunk = r.lpop('to_interface')
if chunk:
send_to_websocket(chunk)
accumulated_text += chunk["content"]
# Speak full sentences out loud
sentences = split_into_sentences(accumulated_text)
if is_full_sentence(sentences[-1]):
for sentence in sentences:
say(sentence)
accumulated_text = ""
else:
for sentence in sentences[:-1]:
say(sentence)
accumulated_text = sentences[-1]
else:
say(accumulated_text)
accumulated_text = ""
message = check_websocket()
if message:
r.rpush('to_core', message)
if __name__ == "__main__":
main()

@ -1,57 +0,0 @@
"""
Listens to chunks of audio recorded by user.
Run `python listen.py` to start the server, then `cd user` and run `python record.py` to record audio.
"""
from fastapi import FastAPI, WebSocket
import uvicorn
import json
from stt import stt
import tempfile
app = FastAPI()
@app.websocket("/user")
async def user(ws: WebSocket):
await ws.accept()
audio_file = bytearray()
mime_type = None
try:
while True:
message = await ws.receive()
if message['type'] == 'websocket.disconnect':
break
if message['type'] == 'websocket.receive':
if 'text' in message:
control_message = json.loads(message['text'])
if control_message.get('action') == 'command' and control_message.get('state') == 'start' and 'mimeType' in control_message:
# This indicates the start of a new audio file
mime_type = control_message.get('mimeType')
elif control_message.get('action') == 'command' and control_message.get('state') == 'end':
# This indicates the end of the audio file
# Process the complete audio file here
transcription = stt(audio_file, mime_type)
await ws.send_json({"transcript": transcription})
print("SENT TRANSCRIPTION!")
# Reset the bytearray for the next audio file
audio_file = bytearray()
mime_type = None
elif 'bytes' in message:
# If it's not a control message, it's part of the audio file
audio_file.extend(message['bytes'])
except Exception as e:
print(f"WebSocket connection closed with exception: {e}")
finally:
await ws.close()
print("WebSocket connection closed")
if __name__ == "__main__":
with tempfile.TemporaryDirectory():
uvicorn.run(app, host="0.0.0.0", port=8000)

@ -1,146 +0,0 @@
"""
Handles everything the user interacts through.
Connects to a websocket at /user. Sends shit to it, and displays/plays the shit it sends back.
For now, just handles a spacebar being pressed for the duration it's pressed,
it should record audio.
"""
import os
import pyaudio
import threading
import asyncio
import websocket
import time
import json
from pynput import keyboard
import wave
import tempfile
from datetime import datetime
# Configuration
chunk = 1024 # Record in chunks of 1024 samples
sample_format = pyaudio.paInt16 # 16 bits per sample
channels = 1 # Stereo
fs = 48000 # Sample rate
p = pyaudio.PyAudio() # Create an interface to PortAudio
frames = [] # Initialize array to store frames
recording = False # Flag to control recording state
ws_chunk_size = 4096 # Websocket stream chunk size
port = os.getenv('ASSISTANT_PORT', 8000)
ws_url = f"ws://localhost:{port}/user"
while True:
try:
ws = websocket.create_connection(ws_url)
break
except ConnectionRefusedError:
time.sleep(1)
async def start_recording():
global recording
if recording:
return # Avoid multiple starts
recording = True
frames.clear() # Clear existing frames
stream = p.open(format=sample_format,
channels=channels,
rate=fs,
frames_per_buffer=chunk,
input=True)
print("Recording started...")
async with websockets.connect("ws://localhost:8000/user") as websocket:
# Send the start command with mime type
await websocket.send(json.dumps({"role": "user", "type": "audio", "format": "audio/wav", "start": True}))
while recording:
data = stream.read(chunk)
frames.append(data)
stream.stop_stream()
stream.close()
try:
file_path = save_recording(frames)
with open(file_path, 'rb') as audio_file:
byte_chunk = audio_file.read(ws_chunk_size)
while byte_chunk:
await websocket.send(json.dumps({"role": "user", "type": "audio", "format": "audio/wav", "content": str(byte_chunk)}))
byte_chunk = audio_file.read(ws_chunk_size)
finally:
os.remove(file_path)
# Send the end command
await websocket.send(json.dumps({"role": "user", "type": "audio", "format": "audio/wav", "end": True}))
# Receive a json message and then close the connection
message = await websocket.recv()
print("Received message:", json.loads(message))
print("Recording stopped.")
def save_recording(frames) -> str:
# Save the recorded data as a WAV file
temp_dir = tempfile.gettempdir()
# Create a temporary file with the appropriate extension
output_path = os.path.join(temp_dir, f"input_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav")
with wave.open(output_path, 'wb') as wf:
wf.setnchannels(channels)
wf.setsampwidth(p.get_sample_size(sample_format))
wf.setframerate(fs)
wf.writeframes(b''.join(frames))
return output_path
def start_recording_sync():
# Create a new event loop for the thread
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# Run the asyncio event loop
loop.run_until_complete(start_recording())
loop.close()
def stop_recording():
global recording
recording = False
print("Stopped recording")
def toggle_recording():
global recording
if recording:
stop_recording()
else:
# Start recording in a new thread to avoid blocking
print("Starting recording")
threading.Thread(target=start_recording_sync).start()
is_space_pressed = False # Flag to track the state of the spacebar
def on_press(key):
global is_space_pressed
if key == keyboard.Key.space and not is_space_pressed:
is_space_pressed = True
toggle_recording()
def on_release(key):
global is_space_pressed
if key == keyboard.Key.space and is_space_pressed:
is_space_pressed = False
stop_recording()
if key == keyboard.Key.esc:
# Stop listener
return False
# Collect events until released
with keyboard.Listener(on_press=on_press, on_release=on_release) as listener:
with tempfile.TemporaryDirectory():
print("Press the spacebar to start/stop recording. Press ESC to exit.")
listener.join()
p.terminate()

@ -1,32 +0,0 @@
"""
Exposes a SSE streaming server endpoint at /run, which recieves language and code,
and streams the output.
"""
from dotenv import load_dotenv
load_dotenv() # take environment variables from .env.
import os
import json
from interpreter import interpreter
import uvicorn
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
class Code(BaseModel):
language: str
code: str
app = FastAPI()
@app.post("/run")
async def run_code(code: Code):
def generator():
for chunk in interpreter.computer.run(code.language, code.code):
yield json.dumps(chunk)
return StreamingResponse(generator())
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=int(os.getenv('COMPUTER_PORT', 9000)))

2094
01OS/poetry.lock generated

File diff suppressed because it is too large Load Diff

@ -23,9 +23,9 @@ ffmpeg-python = "^0.2.0"
textual = "^0.50.1"
pydub = "^0.25.1"
ngrok = "^1.0.0"
open-interpreter = "^0.2.0"
simpleaudio = "^1.0.4"
opencv-python = "^4.9.0.80"
open-interpreter = {git = "https://github.com/KillianLucas/open-interpreter.git"}
[build-system]
requires = ["poetry-core"]

Loading…
Cancel
Save