Merge pull request #36 from shivenmian/u/shivenmian/fix_local

fix: add back whisper-rust local scripts, compile binary in start script
pull/39/head
killian 11 months ago committed by GitHub
commit 03f212109c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

BIN
.DS_Store vendored

Binary file not shown.

1
.gitignore vendored

@ -168,3 +168,4 @@ cython_debug/
# ignore the aifs index files # ignore the aifs index files
_.aifs _.aifs
01OS/output_audio.wav 01OS/output_audio.wav
.DS_Store

@ -5,6 +5,7 @@
# else we use whisper.cpp and piper local models # else we use whisper.cpp and piper local models
ALL_LOCAL=False ALL_LOCAL=False
WHISPER_MODEL_NAME="ggml-tiny.en.bin" WHISPER_MODEL_NAME="ggml-tiny.en.bin"
WHISPER_MODEL_URL="https://huggingface.co/ggerganov/whisper.cpp/resolve/main/"
# Uncomment to set your OpenAI API key # Uncomment to set your OpenAI API key
# OPENAI_API_KEY=sk-... # OPENAI_API_KEY=sk-...

Binary file not shown.

Binary file not shown.

@ -57,7 +57,7 @@ def run_command(command):
def get_transcription_file(wav_file_path: str): def get_transcription_file(wav_file_path: str):
local_path = os.path.join(os.path.dirname(__file__), 'local_service') local_path = os.path.join(os.path.dirname(__file__), 'local_service')
whisper_rust_path = os.path.join(local_path, 'whisper-rust') whisper_rust_path = os.path.join(os.path.dirname(__file__), 'whisper-rust', 'target', 'release')
model_name = os.getenv('WHISPER_MODEL_NAME') model_name = os.getenv('WHISPER_MODEL_NAME')
if not model_name: if not model_name:
raise EnvironmentError("WHISPER_MODEL_NAME environment variable is not set.") raise EnvironmentError("WHISPER_MODEL_NAME environment variable is not set.")

@ -0,0 +1,10 @@
# Generated by Cargo
# will have compiled files and executables
debug/
target/
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb

File diff suppressed because it is too large Load Diff

@ -0,0 +1,14 @@
[package]
name = "whisper-rust"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
anyhow = "1.0.79"
clap = { version = "4.4.18", features = ["derive"] }
cpal = "0.15.2"
hound = "3.5.1"
whisper-rs = "0.10.0"
whisper-rs-sys = "0.8.0"

@ -0,0 +1,34 @@
mod transcribe;
use clap::Parser;
use std::path::PathBuf;
use transcribe::transcribe;
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// This is the model for Whisper STT
#[arg(short, long, value_parser, required = true)]
model_path: PathBuf,
/// This is the wav audio file that will be converted from speech to text
#[arg(short, long, value_parser, required = true)]
file_path: Option<PathBuf>,
}
fn main() {
let args = Args::parse();
let file_path = match args.file_path {
Some(fp) => fp,
None => panic!("No file path provided")
};
let result = transcribe(&args.model_path, &file_path);
match result {
Ok(transcription) => print!("{}", transcription),
Err(e) => panic!("Error: {}", e),
}
}

@ -0,0 +1,64 @@
use whisper_rs::{FullParams, SamplingStrategy, WhisperContext, WhisperContextParameters};
use std::path::PathBuf;
/// Transcribes the given audio file using the whisper-rs library.
///
/// # Arguments
/// * `model_path` - Path to Whisper model file
/// * `file_path` - A string slice that holds the path to the audio file to be transcribed.
///
/// # Returns
///
/// A Result containing a String with the transcription if successful, or an error message if not.
pub fn transcribe(model_path: &PathBuf, file_path: &PathBuf) -> Result<String, String> {
let model_path_str = model_path.to_str().expect("Not valid model path");
// Load a context and model
let ctx = WhisperContext::new_with_params(
model_path_str, // Replace with the actual path to the model
WhisperContextParameters::default(),
)
.map_err(|_| "failed to load model")?;
// Create a state
let mut state = ctx.create_state().map_err(|_| "failed to create state")?;
// Create a params object
// Note that currently the only implemented strategy is Greedy, BeamSearch is a WIP
let mut params = FullParams::new(SamplingStrategy::Greedy { best_of: 1 });
// Edit parameters as needed
params.set_n_threads(1); // Set the number of threads to use
params.set_translate(true); // Enable translation
params.set_language(Some("en")); // Set the language to translate to English
// Disable printing to stdout
params.set_print_special(false);
params.set_print_progress(false);
params.set_print_realtime(false);
params.set_print_timestamps(false);
// Load the audio file
let audio_data = std::fs::read(file_path)
.map_err(|e| format!("failed to read audio file: {}", e))?
.chunks_exact(2)
.map(|chunk| i16::from_ne_bytes([chunk[0], chunk[1]]))
.collect::<Vec<i16>>();
// Convert the audio data to the required format (16KHz mono i16 samples)
let audio_data = whisper_rs::convert_integer_to_float_audio(&audio_data);
// Run the model
state.full(params, &audio_data[..]).map_err(|_| "failed to run model")?;
// Fetch the results
let num_segments = state.full_n_segments().map_err(|_| "failed to get number of segments")?;
let mut transcription = String::new();
for i in 0..num_segments {
let segment = state.full_get_segment_text(i).map_err(|_| "failed to get segment")?;
transcription.push_str(&segment);
transcription.push('\n');
}
Ok(transcription)
}

@ -71,19 +71,39 @@ if [[ "$ALL_LOCAL" == "True" ]]; then
## WHISPER ## WHISPER
WHISPER_MODEL_URL="https://huggingface.co/ggerganov/whisper.cpp/resolve/main/" CWD=$(pwd)
WHISPER_PATH="$SCRIPT_DIR/01OS/server/stt/local_service"
if [[ ! -f "${WHISPER_PATH}/${WHISPER_MODEL_NAME}" ]]; then STT_PATH="$SCRIPT_DIR/01OS/server/stt"
mkdir -p "${WHISPER_PATH}" WHISPER_RUST_PATH="${STT_PATH}/whisper-rust"
curl -L "${WHISPER_MODEL_URL}${WHISPER_MODEL_NAME}" -o "${WHISPER_PATH}/${WHISPER_MODEL_NAME}" cd ${WHISPER_RUST_PATH}
# Check if whisper-rust executable exists
if [[ ! -f "${WHISPER_RUST_PATH}/target/release/whisper-rust" ]]; then
# Check if Rust is installed. Needed to build whisper executable
if ! command -v rustc &> /dev/null; then
echo "Rust is not installed or is not in system PATH. Please install Rust before proceeding."
exit 1
fi
# Build the Whisper Rust executable
cargo build --release
fi
WHISPER_MODEL_PATH="${STT_PATH}/local_service"
if [[ ! -f "${WHISPER_MODEL_PATH}/${WHISPER_MODEL_NAME}" ]]; then
mkdir -p "${WHISPER_MODEL_PATH}"
curl -L "${WHISPER_MODEL_URL}${WHISPER_MODEL_NAME}" -o "${WHISPER_MODEL_PATH}/${WHISPER_MODEL_NAME}"
fi fi
cd $CWD
## PIPER ## PIPER
PIPER_FILE_PATH="$SCRIPT_DIR/01OS/server/tts/local_service${PIPER_URL}${PIPER_ASSETNAME}" PIPER_FOLDER_PATH="$SCRIPT_DIR/01OS/server/tts/local_service"
if [[ ! -f "$PIPER_FILE_PATH" ]]; then if [[ ! -f "$PIPER_FOLDER_PATH" ]]; then
mkdir -p "${PIPER_FILE_PATH}" mkdir -p "${PIPER_FOLDER_PATH}"
OS=$(uname -s) OS=$(uname -s)
ARCH=$(uname -m) ARCH=$(uname -m)
@ -104,7 +124,7 @@ if [[ "$ALL_LOCAL" == "True" ]]; then
CWD=$(pwd) CWD=$(pwd)
# Navigate to SCRIPT_DIR/01OS/server/tts/local_service # Navigate to SCRIPT_DIR/01OS/server/tts/local_service
cd $SCRIPT_DIR/01OS/server/tts/local_service cd ${PIPER_FOLDER_PATH}
curl -L "${PIPER_URL}${PIPER_ASSETNAME}" -o "${PIPER_ASSETNAME}" curl -L "${PIPER_URL}${PIPER_ASSETNAME}" -o "${PIPER_ASSETNAME}"
tar -xvzf $PIPER_ASSETNAME tar -xvzf $PIPER_ASSETNAME

@ -20,6 +20,8 @@ brew install portaudio ffmpeg
sudo apt-get install portaudio19-dev ffmpeg sudo apt-get install portaudio19-dev ffmpeg
``` ```
If you want to run local speech-to-text using Whisper, install Rust. Follow the instructions given [here](https://www.rust-lang.org/tools/install).
## Setup for usage (experimental): ## Setup for usage (experimental):
```bash ```bash

Loading…
Cancel
Save