parent
12df8bbfac
commit
23123dc549
@ -1 +1 @@
|
||||
[{"role": "user", "type": "message", "content": "This is a microphone. I also have an extra microphone.\n"}]
|
||||
[{"role": "user", "type": "message", "content": " Hello, how you doing?\n"}]
|
@ -0,0 +1,7 @@
|
||||
# Setup
|
||||
|
||||
To rebuild the `whisper-rust` executable, do the following:
|
||||
|
||||
1. Install [Rust](https://www.rust-lang.org/tools/install), cmake, and Python dependencies `pip install -r requirements.txt`.
|
||||
2. Go to **core/stt** and run `cargo build --release`.
|
||||
3. Move the `whisper-rust` executable from target/release to this directory.
|
Binary file not shown.
@ -1 +0,0 @@
|
||||
WHISPER_MODEL_PATH=/path/to/ggml-tiny.en.bin
|
@ -1,9 +0,0 @@
|
||||
|
||||
# Setup
|
||||
|
||||
1. Install [Rust](https://www.rust-lang.org/tools/install) and Python dependencies `pip install -r requirements.txt`.
|
||||
2. Go to **core/stt** and run `cargo build --release`.
|
||||
3. Download GGML Whisper model from [Huggingface](https://huggingface.co/ggerganov/whisper.cpp).
|
||||
4. In core, copy `.env.example` to `.env` and put the path to model.
|
||||
5. Run `python core/i_endpoint.py` to start the server.
|
||||
6. Run `python core/test_cli.py PATH_TO_FILE` to test sending audio to service and getting transcription back over websocket.
|
@ -1,55 +0,0 @@
|
||||
from datetime import datetime
|
||||
import os
|
||||
import contextlib
|
||||
import tempfile
|
||||
import ffmpeg
|
||||
import subprocess
|
||||
|
||||
def convert_mime_type_to_format(mime_type: str) -> str:
|
||||
if mime_type == "audio/x-wav" or mime_type == "audio/wav":
|
||||
return "wav"
|
||||
if mime_type == "audio/webm":
|
||||
return "webm"
|
||||
|
||||
return mime_type
|
||||
|
||||
@contextlib.contextmanager
|
||||
def export_audio_to_wav_ffmpeg(audio: bytearray, mime_type: str) -> str:
|
||||
temp_dir = tempfile.gettempdir()
|
||||
|
||||
# Create a temporary file with the appropriate extension
|
||||
input_ext = convert_mime_type_to_format(mime_type)
|
||||
input_path = os.path.join(temp_dir, f"input_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.{input_ext}")
|
||||
with open(input_path, 'wb') as f:
|
||||
f.write(audio)
|
||||
|
||||
# Export to wav
|
||||
output_path = os.path.join(temp_dir, f"output_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav")
|
||||
ffmpeg.input(input_path).output(output_path, acodec='pcm_s16le', ac=1, ar='16k').run()
|
||||
|
||||
print(f"Temporary file path: {output_path}")
|
||||
|
||||
try:
|
||||
yield output_path
|
||||
finally:
|
||||
os.remove(input_path)
|
||||
os.remove(output_path)
|
||||
|
||||
def run_command(command):
|
||||
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||
return result.stdout, result.stderr
|
||||
|
||||
def get_transcription(audio_bytes: bytearray, mime_type):
|
||||
with export_audio_to_wav_ffmpeg(audio_bytes, mime_type) as wav_file_path:
|
||||
model_path = os.getenv("WHISPER_MODEL_PATH")
|
||||
if not model_path:
|
||||
raise EnvironmentError("WHISPER_MODEL_PATH environment variable is not set.")
|
||||
|
||||
output, error = run_command([
|
||||
os.path.join(os.path.dirname(__file__), 'whisper-rust', 'target', 'release', 'whisper-rust'),
|
||||
'--model-path', model_path,
|
||||
'--file-path', wav_file_path
|
||||
])
|
||||
|
||||
print("Exciting transcription result:", output)
|
||||
return output
|
@ -1,40 +0,0 @@
|
||||
import argparse
|
||||
import asyncio
|
||||
import websockets
|
||||
import os
|
||||
import json
|
||||
|
||||
# Define the function to send audio file in chunks
|
||||
async def send_audio_in_chunks(file_path, chunk_size=4096):
|
||||
async with websockets.connect("ws://localhost:8000/a") as websocket:
|
||||
# Send the start command with mime type
|
||||
await websocket.send(json.dumps({"action": "command", "state": "start", "mimeType": "audio/webm"}))
|
||||
|
||||
# Open the file in binary mode and send in chunks
|
||||
with open(file_path, 'rb') as audio_file:
|
||||
chunk = audio_file.read(chunk_size)
|
||||
while chunk:
|
||||
await websocket.send(chunk)
|
||||
chunk = audio_file.read(chunk_size)
|
||||
|
||||
# Send the end command
|
||||
await websocket.send(json.dumps({"action": "command", "state": "end"}))
|
||||
|
||||
# Receive a json message and then close the connection
|
||||
message = await websocket.recv()
|
||||
print("Received message:", json.loads(message))
|
||||
await websocket.close()
|
||||
|
||||
# Parse command line arguments
|
||||
parser = argparse.ArgumentParser(description="Send a webm audio file to the /a websocket endpoint and print the responses.")
|
||||
parser.add_argument("file_path", help="The path to the webm audio file to send.")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Check if the file exists
|
||||
if not os.path.isfile(args.file_path):
|
||||
print(args.file_path)
|
||||
print("Error: The file does not exist.")
|
||||
exit(1)
|
||||
|
||||
# Run the asyncio event loop
|
||||
asyncio.get_event_loop().run_until_complete(send_audio_in_chunks(args.file_path))
|
Loading…
Reference in new issue