remove logging

pull/256/head
Ben Xu 8 months ago
parent f732ca185d
commit 6b7b6ab65d

@ -107,13 +107,13 @@ const Main: React.FC<MainProps> = ({ route }) => {
} }
const playNextAudio = useCallback(async () => { const playNextAudio = useCallback(async () => {
console.log( // console.log(
`in playNextAudio audioQueue is ${audioQueue.length} and sound is ${sound}` // `in playNextAudio audioQueue is ${audioQueue.length} and sound is ${sound}`
); //);
if (audioQueue.length > 0 && sound == null) { if (audioQueue.length > 0 && sound == null) {
const uri = audioQueue.shift() as string; const uri = audioQueue.shift() as string;
console.log("load audio from", uri); // console.log("load audio from", uri);
try { try {
const { sound: newSound } = await Audio.Sound.createAsync({ uri }); const { sound: newSound } = await Audio.Sound.createAsync({ uri });
@ -126,7 +126,7 @@ const Main: React.FC<MainProps> = ({ route }) => {
playNextAudio(); playNextAudio();
} }
} else { } else {
console.log("audioQueue is empty or sound is not null"); // console.log("audioQueue is empty or sound is not null");
return; return;
} }
}, [audioQueue, sound, soundUriMap]); }, [audioQueue, sound, soundUriMap]);
@ -144,26 +144,13 @@ const Main: React.FC<MainProps> = ({ route }) => {
[sound, soundUriMap, playNextAudio] [sound, soundUriMap, playNextAudio]
); );
const isAVPlaybackStatusSuccess = (
status: AVPlaybackStatus
): status is AVPlaybackStatusSuccess => {
return (status as AVPlaybackStatusSuccess).isLoaded !== undefined;
};
// useEffect(() => {
// console.log("audioQueue has been updated:", audioQueue.length);
// if (audioQueue.length == 1) {
// playNextAudio();
// }
// }, [audioQueue]);
useEffect(() => { useEffect(() => {
if (audioQueue.length > 0 && !sound) { if (audioQueue.length > 0 && !sound) {
playNextAudio(); playNextAudio();
} }
}, [audioQueue, sound, playNextAudio]); }, [audioQueue, sound, playNextAudio]);
useEffect(() => {
console.log("sound has been updated:", sound); useEffect(() => {}, [sound]);
}, [sound]);
useEffect(() => { useEffect(() => {
let websocket: WebSocket; let websocket: WebSocket;
@ -182,27 +169,22 @@ const Main: React.FC<MainProps> = ({ route }) => {
try { try {
const message = JSON.parse(e.data); const message = JSON.parse(e.data);
if (message.content && message.type === "audio") { if (message.content && message.type == "audio") {
console.log("✅✅✅✅✅✅✅✅✅✅✅✅✅✅✅✅✅✅✅ Audio message"); console.log("✅✅✅✅✅✅✅✅✅✅✅✅✅✅✅✅✅✅✅ Audio message");
const buffer = message.content; const buffer = message.content;
console.log(buffer.length); // console.log(buffer.length);
if (buffer && buffer.length > 0) { if (buffer && buffer.length > 0) {
const filePath = await constructTempFilePath(buffer); const filePath = await constructTempFilePath(buffer);
if (filePath !== null) { if (filePath !== null) {
addToQueue(filePath); addToQueue(filePath);
console.log("audio file written to", filePath); // console.log("audio file written to", filePath);
} else { } else {
console.error("Failed to create file path"); console.error("Failed to create file path");
} }
} else { } else {
console.error("Received message is empty or undefined"); console.error("Received message is empty or undefined");
} }
} else {
// console.log(typeof message);
// console.log(typeof message.content);
console.log("Received message content is not a string.");
console.log(message);
} }
} catch (error) { } catch (error) {
console.error("Error handling WebSocket message:", error); console.error("Error handling WebSocket message:", error);
@ -286,19 +268,9 @@ const Main: React.FC<MainProps> = ({ route }) => {
allowsRecordingIOS: false, allowsRecordingIOS: false,
}); });
const uri = recording.getURI(); const uri = recording.getURI();
console.log("recording uri at ", uri); // console.log("recording uri at ", uri);
setRecording(null); setRecording(null);
// sanity check play the audio recording locally
// recording is working fine; is the server caching the audio file somewhere?
/**
if (uri) {
const { sound } = await Audio.Sound.createAsync({ uri });
sound.playAsync();
console.log("playing audio recording from", uri);
}
*/
if (ws && uri) { if (ws && uri) {
const response = await fetch(uri); const response = await fetch(uri);
// console.log("fetched audio file", response); // console.log("fetched audio file", response);
@ -312,10 +284,10 @@ const Main: React.FC<MainProps> = ({ route }) => {
ws.send(audioBytes); ws.send(audioBytes);
const audioArray = new Uint8Array(audioBytes as ArrayBuffer); const audioArray = new Uint8Array(audioBytes as ArrayBuffer);
const decoder = new TextDecoder("utf-8"); const decoder = new TextDecoder("utf-8");
console.log( // console.log(
"sent audio bytes to WebSocket", // "sent audio bytes to WebSocket",
decoder.decode(audioArray).slice(0, 50) // decoder.decode(audioArray).slice(0, 50)
); // );
} }
}; };
} }

@ -198,10 +198,10 @@ async def send_messages(websocket: WebSocket):
try: try:
if isinstance(message, dict): if isinstance(message, dict):
print(f"Sending to the device: {type(message)} {str(message)[:100]}") # print(f"Sending to the device: {type(message)} {str(message)[:100]}")
await websocket.send_json(message) await websocket.send_json(message)
elif isinstance(message, bytes): elif isinstance(message, bytes):
print(f"Sending to the device: {type(message)} {str(message)[:100]}") # print(f"Sending to the device: {type(message)} {str(message)[:100]}")
await websocket.send_bytes(message) await websocket.send_bytes(message)
else: else:
raise TypeError("Message must be a dict or bytes") raise TypeError("Message must be a dict or bytes")
@ -235,7 +235,7 @@ async def listener(mobile: bool):
# Will be None until we have a full message ready # Will be None until we have a full message ready
continue continue
print(str(message)[:1000]) # print(str(message)[:1000])
# At this point, we have our message # At this point, we have our message
@ -250,9 +250,9 @@ async def listener(mobile: bool):
# Convert bytes to audio file # Convert bytes to audio file
# Format will be bytes.wav or bytes.opus # Format will be bytes.wav or bytes.opus
mime_type = "audio/" + message["format"].split(".")[1] mime_type = "audio/" + message["format"].split(".")[1]
print("input audio file content", message["content"][:100]) # print("input audio file content", message["content"][:100])
audio_file_path = bytes_to_wav(message["content"], mime_type) audio_file_path = bytes_to_wav(message["content"], mime_type)
print("Audio file path:", audio_file_path) # print("Audio file path:", audio_file_path)
# For microphone debugging: # For microphone debugging:
if False: if False:
@ -299,7 +299,7 @@ async def listener(mobile: bool):
# Send it to the user # Send it to the user
await to_device.put(chunk) await to_device.put(chunk)
# Yield to the event loop, so you actxually send it out # Yield to the event loop, so you actually send it out
await asyncio.sleep(0.01) await asyncio.sleep(0.01)
if os.getenv("TTS_RUNNER") == "server": if os.getenv("TTS_RUNNER") == "server":

@ -36,7 +36,7 @@ def export_audio_to_wav_ffmpeg(audio: bytearray, mime_type: str) -> str:
output_path = os.path.join( output_path = os.path.join(
temp_dir, f"output_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav" temp_dir, f"output_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav"
) )
print(mime_type, input_path, output_path) # print(mime_type, input_path, output_path)
if mime_type == "audio/raw": if mime_type == "audio/raw":
ffmpeg.input( ffmpeg.input(
input_path, input_path,

Loading…
Cancel
Save