parent
61dd64f469
commit
e75fa90a48
Binary file not shown.
@ -1 +0,0 @@
|
||||
[{"role": "user", "type": "message", "content": "And it works really well.\n"}]
|
@ -1,131 +0,0 @@
|
||||
from interpreter import interpreter
|
||||
import os
|
||||
import glob
|
||||
import json
|
||||
import requests
|
||||
|
||||
def create_interpreter():
|
||||
### SYSTEM MESSAGE
|
||||
|
||||
# The system message is where most of the 01's behavior is configured.
|
||||
# You can put code into the system message {{ in brackets like this }} which will be rendered just before the interpreter starts writing a message.
|
||||
|
||||
system_message = """
|
||||
|
||||
You are an executive assistant AI that helps the user manage their tasks. You can run Python code.
|
||||
|
||||
Store the user's tasks in a Python list called `tasks`.
|
||||
|
||||
---
|
||||
|
||||
The user's current task is: {{ tasks[0] if tasks else "No current tasks." }}
|
||||
|
||||
{{
|
||||
if len(tasks) > 1:
|
||||
print("The next task is: ", tasks[1])
|
||||
}}
|
||||
|
||||
---
|
||||
|
||||
When the user completes the current task, you should remove it from the list and read the next item by running `tasks = tasks[1:]\ntasks[0]`. Then, tell the user what the next task is.
|
||||
|
||||
When the user tells you about a set of tasks, you should intelligently order tasks, batch similar tasks, and break down large tasks into smaller tasks (for this, you should consult the user and get their permission to break it down). Your goal is to manage the task list as intelligently as possible, to make the user as efficient and non-overwhelmed as possible. They will require a lot of encouragement, support, and kindness. Don't say too much about what's ahead of them— just try to focus them on each step at a time.
|
||||
|
||||
After starting a task, you should check in with the user around the estimated completion time to see if the task is completed. Use the `schedule(datetime, message)` function, which has already been imported.
|
||||
|
||||
To do this, schedule a reminder based on estimated completion time using the function `schedule(datetime_object, "Your message here.")`, WHICH HAS ALREADY BEEN IMPORTED. YOU DON'T NEED TO IMPORT THE `schedule` FUNCTION. IT IS AVALIABLE. You'll recieve the message at `datetime_object`.
|
||||
|
||||
You guide the user through the list one task at a time, convincing them to move forward, giving a pep talk if need be. Your job is essentially to answer "what should I (the user) be doing right now?" for every moment of the day.
|
||||
|
||||
Remember: You can run Python code. Be very concise. Ensure that you actually run code every time! THIS IS IMPORTANT. You NEED to write code. **Help the user by being very concise in your answers.** Do not break down tasks excessively, just into simple, few minute steps. Don't assume the user lives their life in a certain way— pick very general tasks if you're breaking a task down.
|
||||
|
||||
""".strip()
|
||||
|
||||
interpreter.custom_instructions = system_message
|
||||
|
||||
### LLM SETTINGS
|
||||
|
||||
# Local settings
|
||||
# interpreter.llm.model = "local"
|
||||
# interpreter.llm.api_base = "https://localhost:8080/v1" # Llamafile default
|
||||
# interpreter.llm.max_tokens = 1000
|
||||
# interpreter.llm.context_window = 3000
|
||||
|
||||
# Hosted settings
|
||||
interpreter.llm.api_key = os.getenv('OPENAI_API_KEY')
|
||||
interpreter.llm.model = "gpt-4"
|
||||
interpreter.auto_run = True
|
||||
interpreter.force_task_completion = False
|
||||
|
||||
|
||||
### MISC SETTINGS
|
||||
|
||||
interpreter.offline = True
|
||||
interpreter.id = 206 # Used to identify itself to other interpreters. This should be changed programatically so it's unique.
|
||||
|
||||
|
||||
### RESET conversations/user.json
|
||||
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
user_json_path = os.path.join(script_dir, 'conversations', 'user.json')
|
||||
with open(user_json_path, 'w') as file:
|
||||
json.dump([], file)
|
||||
|
||||
|
||||
### CONNECT TO /run
|
||||
|
||||
class Python:
|
||||
"""
|
||||
This class contains all requirements for being a custom language in Open Interpreter:
|
||||
|
||||
- name (an attribute)
|
||||
- run (a method)
|
||||
- stop (a method)
|
||||
- terminate (a method)
|
||||
"""
|
||||
|
||||
# This is the name that will appear to the LLM.
|
||||
name = "python"
|
||||
|
||||
def __init__(self):
|
||||
self.halt = False
|
||||
|
||||
def run(self, code):
|
||||
"""Generator that yields a dictionary in LMC Format."""
|
||||
|
||||
# Prepare the data
|
||||
data = {"language": "python", "code": code}
|
||||
|
||||
# Send the data to the /run endpoint
|
||||
computer_port = os.getenv('COMPUTER_PORT', '9000')
|
||||
response = requests.post(f"http://localhost:{computer_port}/run", json=data, stream=True)
|
||||
# Stream the response
|
||||
for chunk in response.iter_content(chunk_size=100000000):
|
||||
if self.halt:
|
||||
self.halt = False
|
||||
break
|
||||
if chunk: # filter out keep-alive new lines
|
||||
yield json.loads(chunk.decode())
|
||||
|
||||
def stop(self):
|
||||
self.halt = True
|
||||
|
||||
def terminate(self):
|
||||
"""Terminates the entire process."""
|
||||
# dramatic!!
|
||||
pass
|
||||
|
||||
interpreter.computer.languages = [Python]
|
||||
|
||||
### SKILLS
|
||||
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
skills_dir = os.path.join(script_dir, 'skills')
|
||||
for file in glob.glob(os.path.join(skills_dir, '*.py')):
|
||||
with open(file, 'r') as f:
|
||||
for chunk in interpreter.computer.run("python", f.read()):
|
||||
print(chunk)
|
||||
|
||||
### RETURN INTERPRETER
|
||||
|
||||
return interpreter
|
@ -0,0 +1 @@
|
||||
[{"role": "user", "type": "message", "content": "\ub2e4\uc74c \uc601\uc0c1\uc5d0\uc11c \ub9cc\ub098\uc694!\n"}]
|
@ -0,0 +1,82 @@
|
||||
import os
|
||||
import glob
|
||||
import json
|
||||
import requests
|
||||
from pathlib import Path
|
||||
|
||||
def configure_interpreter(interpreter):
|
||||
### SYSTEM MESSAGE
|
||||
|
||||
# The system message is where most of the 01's behavior is configured.
|
||||
# You can put code into the system message {{ in brackets like this }} which will be rendered just before the interpreter starts writing a message.
|
||||
|
||||
system_message = """
|
||||
|
||||
You are an executive assistant AI that helps the user manage their tasks. You can run Python code.
|
||||
|
||||
Store the user's tasks in a Python list called `tasks`.
|
||||
|
||||
---
|
||||
|
||||
The user's current task is: {{ tasks[0] if tasks else "No current tasks." }}
|
||||
|
||||
{{
|
||||
if len(tasks) > 1:
|
||||
print("The next task is: ", tasks[1])
|
||||
}}
|
||||
|
||||
---
|
||||
|
||||
When the user completes the current task, you should remove it from the list and read the next item by running `tasks = tasks[1:]\ntasks[0]`. Then, tell the user what the next task is.
|
||||
|
||||
When the user tells you about a set of tasks, you should intelligently order tasks, batch similar tasks, and break down large tasks into smaller tasks (for this, you should consult the user and get their permission to break it down). Your goal is to manage the task list as intelligently as possible, to make the user as efficient and non-overwhelmed as possible. They will require a lot of encouragement, support, and kindness. Don't say too much about what's ahead of them— just try to focus them on each step at a time.
|
||||
|
||||
After starting a task, you should check in with the user around the estimated completion time to see if the task is completed. Use the `schedule(datetime, message)` function, which has already been imported.
|
||||
|
||||
To do this, schedule a reminder based on estimated completion time using the function `schedule(datetime_object, "Your message here.")`, WHICH HAS ALREADY BEEN IMPORTED. YOU DON'T NEED TO IMPORT THE `schedule` FUNCTION. IT IS AVALIABLE. You'll recieve the message at `datetime_object`.
|
||||
|
||||
You guide the user through the list one task at a time, convincing them to move forward, giving a pep talk if need be. Your job is essentially to answer "what should I (the user) be doing right now?" for every moment of the day.
|
||||
|
||||
Remember: You can run Python code. Be very concise. Ensure that you actually run code every time! THIS IS IMPORTANT. You NEED to write code. **Help the user by being very concise in your answers.** Do not break down tasks excessively, just into simple, few minute steps. Don't assume the user lives their life in a certain way— pick very general tasks if you're breaking a task down.
|
||||
|
||||
""".strip()
|
||||
|
||||
interpreter.custom_instructions = system_message
|
||||
|
||||
### LLM SETTINGS
|
||||
|
||||
# Local settings
|
||||
# interpreter.llm.model = "local"
|
||||
# interpreter.llm.api_base = "https://localhost:8080/v1" # Llamafile default
|
||||
# interpreter.llm.max_tokens = 1000
|
||||
# interpreter.llm.context_window = 3000
|
||||
|
||||
# Hosted settings
|
||||
interpreter.llm.api_key = os.getenv('OPENAI_API_KEY')
|
||||
interpreter.llm.model = "gpt-4"
|
||||
interpreter.auto_run = True
|
||||
interpreter.force_task_completion = False
|
||||
|
||||
|
||||
### MISC SETTINGS
|
||||
|
||||
interpreter.offline = True
|
||||
interpreter.id = 206 # Used to identify itself to other interpreters. This should be changed programatically so it's unique.
|
||||
|
||||
|
||||
### RESET conversations/user.json
|
||||
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
user_json_path = os.path.join(script_dir, 'conversations', 'user.json')
|
||||
with open(user_json_path, 'w') as file:
|
||||
json.dump([], file)
|
||||
|
||||
### SKILLS
|
||||
|
||||
skills_path = Path(__file__).parent / 'skills'
|
||||
for file in glob.glob(os.path.join(skills_path, '*.py')):
|
||||
with open(file, 'r') as f:
|
||||
for chunk in interpreter.computer.run("python", f.read()):
|
||||
print(chunk)
|
||||
|
||||
return interpreter
|
@ -1,51 +1,72 @@
|
||||
### SETTINGS
|
||||
|
||||
export MODE_01=LIGHT
|
||||
export ASSISTANT_PORT=8000
|
||||
export COMPUTER_PORT=8001
|
||||
# If ALL_LOCAL is False, we'll use OpenAI's services
|
||||
export ALL_LOCAL=False
|
||||
# export OPENAI_API_KEY=sk-...
|
||||
|
||||
# Kill whatever's on the ASSISTANT_PORT and COMPUTER_PORT
|
||||
lsof -ti tcp:$ASSISTANT_PORT | xargs kill
|
||||
lsof -ti tcp:$COMPUTER_PORT | xargs kill
|
||||
# If SERVER_START, this is where we'll serve the server.
|
||||
# If DEVICE_START, this is where the device expects the server to be.
|
||||
export SERVER_URL=ws://localhost:8000/
|
||||
export SERVER_START=True
|
||||
export DEVICE_START=True
|
||||
|
||||
# Control where various operations happen— can be `device` or `server`.
|
||||
export CODE_RUNNER=server
|
||||
export TTS_RUNNER=device # If server, audio will be sent over websocket.
|
||||
export STT_RUNNER=device # If server, audio will be sent over websocket.
|
||||
|
||||
# Will expose the server publically and display that URL.
|
||||
export SERVER_EXPOSE_PUBLICALLY=False
|
||||
|
||||
### SETUP
|
||||
|
||||
# (for dev, reset the ports we were using)
|
||||
|
||||
PORT=$(echo $SERVER_URL | grep -oE "[0-9]+")
|
||||
lsof -ti tcp:$PORT | xargs kill
|
||||
PORT=$(echo $DEVICE_URL | grep -oE "[0-9]+")
|
||||
lsof -ti tcp:$PORT | xargs kill
|
||||
|
||||
# Check the current Python version
|
||||
PYTHON_VERSION=$(python -V 2>&1 | cut -d " " -f 2 | cut -d "." -f 1-2)
|
||||
|
||||
# If the Python version is not 3.10 or 3.11, switch to it using pyenv
|
||||
if [[ "$PYTHON_VERSION" != "3.10" ]] && [[ "$PYTHON_VERSION" != "3.11" ]]; then
|
||||
echo "Switching to Python 3.10 using pyenv..."
|
||||
pyenv install 3.10.0
|
||||
pyenv shell 3.10.0
|
||||
fi
|
||||
|
||||
# INSTALL REQUIREMENTS
|
||||
|
||||
# (for dev, this is disabled for speed)
|
||||
|
||||
# if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
# brew update
|
||||
# brew install portaudio ffmpeg
|
||||
# fi
|
||||
# pip install -r requirements.txt
|
||||
|
||||
### COMPUTER
|
||||
### START
|
||||
|
||||
# START KERNEL WATCHER
|
||||
# DEVICE
|
||||
|
||||
python computer/kernel_watcher.py &
|
||||
if [[ "$DEVICE_START" == "True" ]]; then
|
||||
python device.py &
|
||||
fi
|
||||
|
||||
# START RUN ENDPOINT
|
||||
# SERVER
|
||||
|
||||
python computer/run.py &
|
||||
if [[ "$SERVER_START" == "True" ]]; then
|
||||
python server.py &
|
||||
fi
|
||||
|
||||
# START SST AND TTS SERVICES
|
||||
# TTS, STT
|
||||
|
||||
# (todo)
|
||||
# (i think we should start with hosted services)
|
||||
|
||||
# START LLM
|
||||
# LLM
|
||||
|
||||
# (disabled, we'll start with hosted services)
|
||||
# python core/llm/start.py &
|
||||
|
||||
sleep 6
|
||||
|
||||
# START ASSISTANT
|
||||
|
||||
python assistant/assistant.py &
|
||||
|
||||
### USER
|
||||
|
||||
# START USER
|
||||
|
||||
python user/user.py &
|
||||
# python core/llm/start.py &
|
@ -0,0 +1,28 @@
|
||||
"""
|
||||
Exposes a SSE streaming server endpoint at /run, which recieves language and code,
|
||||
and streams the output.
|
||||
"""
|
||||
import os
|
||||
import json
|
||||
from interpreter import interpreter
|
||||
import uvicorn
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import StreamingResponse
|
||||
from pydantic import BaseModel
|
||||
|
||||
class Code(BaseModel):
|
||||
language: str
|
||||
code: str
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
@app.post("/run")
|
||||
async def run_code(code: Code):
|
||||
def generator():
|
||||
for chunk in interpreter.computer.run(code.language, code.code):
|
||||
yield json.dumps(chunk)
|
||||
return StreamingResponse(generator())
|
||||
|
||||
if __name__ == "__main__":
|
||||
uvicorn.run(app, host="0.0.0.0", port=int(os.getenv('COMPUTER_PORT', 9000)))
|
Loading…
Reference in new issue