From e744c4f1b2728e9319c09e5d5515d2b3cd00dae6 Mon Sep 17 00:00:00 2001 From: killian <63927363+KillianLucas@users.noreply.github.com> Date: Sun, 10 Mar 2024 05:58:14 -0700 Subject: [PATCH] Simplified --- 01OS/poetry.lock | 15 +--- 01OS/pyproject.toml | 3 +- 01OS/start.py | 51 +++++++++++- README.md | 138 +++++++++++++++++++-------------- {01OS => archive}/.env.example | 0 5 files changed, 131 insertions(+), 76 deletions(-) rename {01OS => archive}/.env.example (100%) diff --git a/01OS/poetry.lock b/01OS/poetry.lock index 01f2df6..e29c468 100644 --- a/01OS/poetry.lock +++ b/01OS/poetry.lock @@ -237,19 +237,6 @@ files = [ {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, ] -[[package]] -name = "asyncio" -version = "3.4.3" -description = "reference implementation of PEP 3156" -optional = false -python-versions = "*" -files = [ - {file = "asyncio-3.4.3-cp33-none-win32.whl", hash = "sha256:b62c9157d36187eca799c378e572c969f0da87cd5fc42ca372d92cdb06e7e1de"}, - {file = "asyncio-3.4.3-cp33-none-win_amd64.whl", hash = "sha256:c46a87b48213d7464f22d9a497b9eef8c1928b68320a2fa94240f969f6fec08c"}, - {file = "asyncio-3.4.3-py3-none-any.whl", hash = "sha256:c4d18b22701821de07bd6aea8b53d21449ec0ec5680645e5317062ea21817d2d"}, - {file = "asyncio-3.4.3.tar.gz", hash = "sha256:83360ff8bc97980e4ff25c964c7bd3923d333d177aa4f7fb736b019f26c7cb41"}, -] - [[package]] name = "attrs" version = "23.2.0" @@ -8430,4 +8417,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.12" -content-hash = "5c979241e896a5415fd85bef9c1115a229df308c695dd3cabbee519227b9df2c" +content-hash = "6aa948d6a556517941cd0f399e509be40e273665886b5e2cd6479884a91076ad" diff --git a/01OS/pyproject.toml b/01OS/pyproject.toml index 1d94b85..593f1b9 100644 --- a/01OS/pyproject.toml +++ b/01OS/pyproject.toml @@ -3,7 +3,7 @@ name = "01OS" packages = [ {include = "01OS"}, ] -include = [".env.example", "start.py", "start.sh"] +include = ["start.py"] version = "0.0.13" description = "The open-source language model computer" authors = ["Killian "] @@ -12,7 +12,6 @@ readme = "README.md" [tool.poetry.dependencies] python = ">=3.9,<3.12" -asyncio = "^3.4.3" pyaudio = "^0.2.14" pynput = "^1.7.6" fastapi = "^0.110.0" diff --git a/01OS/start.py b/01OS/start.py index 3b257c3..d506501 100644 --- a/01OS/start.py +++ b/01OS/start.py @@ -38,6 +38,55 @@ def run( local: bool = typer.Option(False, "--local", help="Use recommended local services for LLM, STT, and TTS"), ): + _run( + server=server, + server_host=server_host, + server_port=server_port, + tunnel_service=tunnel_service, + expose=expose, + client=client, + server_url=server_url, + client_type=client_type, + llm_service=llm_service, + model=model, + llm_supports_vision=llm_supports_vision, + llm_supports_functions=llm_supports_functions, + context_window=context_window, + max_tokens=max_tokens, + temperature=temperature, + tts_service=tts_service, + stt_service=stt_service, + local=local + ) + +def _run( + server: bool = False, + server_host: str = "0.0.0.0", + server_port: int = 8000, + + tunnel_service: str = "bore", + expose: bool = False, + + client: bool = False, + server_url: str = None, + client_type: str = "auto", + + llm_service: str = "litellm", + + model: str = "gpt-4", + llm_supports_vision: bool = False, + llm_supports_functions: bool = False, + context_window: int = 2048, + max_tokens: int = 4096, + temperature: float = 0.8, + + tts_service: str = "openai", + + stt_service: str = "openai", + + local: bool = False + ): + if local: tts_service = "piper" # llm_service = "llamafile" @@ -93,4 +142,4 @@ def run( if client: client_thread.join() except KeyboardInterrupt: - os.kill(os.getpid(), signal.SIGINT) + os.kill(os.getpid(), signal.SIGINT) \ No newline at end of file diff --git a/README.md b/README.md index b06ef60..d07db5e 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,52 @@ -# ○ - Official pre-release repository for [The 01 Project](https://twitter.com/hellokillian/status/1745875973583896950). -> **11** days remaining until launch +> **4** days remaining until launch + +

+ +

+ + Discord +
+
+ The open-source language model computer.
+ +

+ +
+ +![poster](https://pbs.twimg.com/media/GDqTVYzbgAIfLJf?format=png&name=4096x4096) + +
+ + + +```shell +pip install 01OS +``` + +> Not working? Read our [setup guide](https://docs.openinterpreter.com/getting-started/setup). + +```shell +01 # Run this to start the 01. +```
-### [View task list ↗](https://github.com/KillianLucas/01/blob/main/TASKS.md) +**The 01 project** is an open-source ecosystem for artificially intelligent devices. + +By combining code-interpreting language models with speech recognition and voice synthesis, the 01’s flagship operating system (the 01OS) can power conversational, computer-operating AI devices similar to the Rabbit R1 or the Humane Pin. + +We intend to become the “Linux” of this space— open, modular, and free for personal or commercial use.
-## Install dependencies: +## Setup + +### Install dependencies ```bash # MacOS @@ -22,92 +58,76 @@ sudo apt-get install portaudio19-dev ffmpeg cmake If you want to run local speech-to-text using Whisper, install Rust. Follow the instructions given [here](https://www.rust-lang.org/tools/install). -## Setup for usage (experimental): +### Install and run the 01 CLI -```bash +```shell pip install 01OS ``` -**Run the 01:** +```shell +01 # This will start a server and a client. -```bash -01 # This will run the server and attempt to determine and run a client. +01 --server # Start only a server. +01 --server --expose # Start and expose a server via Ngrok. This will print a `server_url` for clients to connect to. + +01 --client --server_url your-server.com # Start only a client. ``` -**Expose an 01 Server Publicly** +### Swap out service providers -We currently support exposing the 01 server publicly via a couple of different tunnel services: +The 01 is model agnostic to speech-to-text, text-to-speech, and language model providers. -- **bore.pub** ([GitHub](https://github.com/ekzhang/bore)) +Select your provider by running: - - **Requirements:** Ensure that Rust is installed ([Rust Installation](https://www.rust-lang.org/tools/install)), then run: - ``` - cargo install bore-cli - ``` - - **To Expose:** - ```bash - 01 --server --expose-with-bore - ``` +```shell +01 --tts-service openai +01 --llm-service openai +01 --stt-service openai +``` -- **localtunnel** ([GitHub](https://github.com/localtunnel/localtunnel)) +[View all providers ↗](https://docs.litellm.ai/docs/providers/), or [join the 01 team by adding a service provider. ↗]() - - **Requirements:** Ensure that Node.js is installed ([Node.js Download](https://nodejs.org/en/download)), then run: - ``` - npm install -g localtunnel - ``` - - **To Expose:** - ```bash - 01 --server --expose-with-localtunnel - ``` +### Run the 01 locally -- **ngrok** ([Website](https://ngrok.com/)) - - **Requirements:** Install ngrok ([Getting Started with ngrok](https://ngrok.com/docs/getting-started/)), and set up an ngrok account. Get your auth key from [ngrok dashboard](https://dashboard.ngrok.com/get-started/your-authtoken), then set it in your local configuration by running: - ``` - ngrok config add-authtoken your_auth_token_here - ``` - - **To Expose:** - ```bash - 01 --server --expose-with-ngrok - ``` +Some service providers don't require an internet connection. -**Run a specific client:** +The following command will attempt to download and use the best providers for your hardware: -```bash -01 --client macos # Options: macos, rpi +```shell +01 --local ``` -**Run locally:** - -The current default uses OpenAI's services. +## How Does it Work? -The `--local` flag will install and run the [whisper.cpp](https://github.com/ggerganov/whisper.cpp) STT and [Piper](https://github.com/rhasspy/piper) TTS models. +The 01 equips a language model (wrapped in a voice interface) with an `exec()` function, which allows it to write and run code to control computers. -```bash -01 --local # Local client and server -01 --local --server --expose-with-bore # Expose the local server with a public tunnel -``` +We only stream speech to/from the end user's device. -**Teach Mode (experimental)** +# Contributing -Running `01 --teach` runs 01 in teach mode, where you can add your own skills for Open Interpreter to use, through an easy-to-follow GUI. - -
+Please see our [contributing guidelines](docs/CONTRIBUTING.md) for more details on how to get involved. -## Setup for development: +### Setup for development ```bash -# Clone the repo, cd into the 01OS directory +# Clone the repo git clone https://github.com/KillianLucas/01.git + +# Go to the 01OS directory cd 01OS -# Install dependencies, run the commands above +# Install python dependencies poetry install + +# Run it poetry run 01 ``` -**Configuration:** +
+ +# Roadmap -Copy the `01OS/.env.example` file to `01OS/.env` then configure the environment variables within the file. +Visit [our roadmap](https://github.com/KillianLucas/open-interpreter/blob/main/docs/ROADMAP.md) to see the future of the 01.
diff --git a/01OS/.env.example b/archive/.env.example similarity index 100% rename from 01OS/.env.example rename to archive/.env.example