From 6b7b6ab65d791d965aa117f3d251ed4666db067c Mon Sep 17 00:00:00 2001 From: Ben Xu Date: Thu, 2 May 2024 16:55:05 -0400 Subject: [PATCH] remove logging --- .../ios/react-native/src/screens/Main.tsx | 58 +++++-------------- software/source/server/server.py | 12 ++-- software/source/server/utils/bytes_to_wav.py | 2 +- 3 files changed, 22 insertions(+), 50 deletions(-) diff --git a/software/source/clients/ios/react-native/src/screens/Main.tsx b/software/source/clients/ios/react-native/src/screens/Main.tsx index 0bd7c8f..3f37ac2 100644 --- a/software/source/clients/ios/react-native/src/screens/Main.tsx +++ b/software/source/clients/ios/react-native/src/screens/Main.tsx @@ -107,13 +107,13 @@ const Main: React.FC = ({ route }) => { } const playNextAudio = useCallback(async () => { - console.log( - `in playNextAudio audioQueue is ${audioQueue.length} and sound is ${sound}` - ); + // console.log( + // `in playNextAudio audioQueue is ${audioQueue.length} and sound is ${sound}` + //); if (audioQueue.length > 0 && sound == null) { const uri = audioQueue.shift() as string; - console.log("load audio from", uri); + // console.log("load audio from", uri); try { const { sound: newSound } = await Audio.Sound.createAsync({ uri }); @@ -126,7 +126,7 @@ const Main: React.FC = ({ route }) => { playNextAudio(); } } else { - console.log("audioQueue is empty or sound is not null"); + // console.log("audioQueue is empty or sound is not null"); return; } }, [audioQueue, sound, soundUriMap]); @@ -144,26 +144,13 @@ const Main: React.FC = ({ route }) => { [sound, soundUriMap, playNextAudio] ); - const isAVPlaybackStatusSuccess = ( - status: AVPlaybackStatus - ): status is AVPlaybackStatusSuccess => { - return (status as AVPlaybackStatusSuccess).isLoaded !== undefined; - }; - - // useEffect(() => { - // console.log("audioQueue has been updated:", audioQueue.length); - // if (audioQueue.length == 1) { - // playNextAudio(); - // } - // }, [audioQueue]); useEffect(() => { if (audioQueue.length > 0 && !sound) { playNextAudio(); } }, [audioQueue, sound, playNextAudio]); - useEffect(() => { - console.log("sound has been updated:", sound); - }, [sound]); + + useEffect(() => {}, [sound]); useEffect(() => { let websocket: WebSocket; @@ -182,27 +169,22 @@ const Main: React.FC = ({ route }) => { try { const message = JSON.parse(e.data); - if (message.content && message.type === "audio") { + if (message.content && message.type == "audio") { console.log("✅✅✅✅✅✅✅✅✅✅✅✅✅✅✅✅✅✅✅ Audio message"); const buffer = message.content; - console.log(buffer.length); + // console.log(buffer.length); if (buffer && buffer.length > 0) { const filePath = await constructTempFilePath(buffer); if (filePath !== null) { addToQueue(filePath); - console.log("audio file written to", filePath); + // console.log("audio file written to", filePath); } else { console.error("Failed to create file path"); } } else { console.error("Received message is empty or undefined"); } - } else { - // console.log(typeof message); - // console.log(typeof message.content); - console.log("Received message content is not a string."); - console.log(message); } } catch (error) { console.error("Error handling WebSocket message:", error); @@ -286,19 +268,9 @@ const Main: React.FC = ({ route }) => { allowsRecordingIOS: false, }); const uri = recording.getURI(); - console.log("recording uri at ", uri); + // console.log("recording uri at ", uri); setRecording(null); - // sanity check play the audio recording locally - // recording is working fine; is the server caching the audio file somewhere? - /** - if (uri) { - const { sound } = await Audio.Sound.createAsync({ uri }); - sound.playAsync(); - console.log("playing audio recording from", uri); - } - */ - if (ws && uri) { const response = await fetch(uri); // console.log("fetched audio file", response); @@ -312,10 +284,10 @@ const Main: React.FC = ({ route }) => { ws.send(audioBytes); const audioArray = new Uint8Array(audioBytes as ArrayBuffer); const decoder = new TextDecoder("utf-8"); - console.log( - "sent audio bytes to WebSocket", - decoder.decode(audioArray).slice(0, 50) - ); + // console.log( + // "sent audio bytes to WebSocket", + // decoder.decode(audioArray).slice(0, 50) + // ); } }; } diff --git a/software/source/server/server.py b/software/source/server/server.py index 6483160..b40dc71 100644 --- a/software/source/server/server.py +++ b/software/source/server/server.py @@ -198,10 +198,10 @@ async def send_messages(websocket: WebSocket): try: if isinstance(message, dict): - print(f"Sending to the device: {type(message)} {str(message)[:100]}") + # print(f"Sending to the device: {type(message)} {str(message)[:100]}") await websocket.send_json(message) elif isinstance(message, bytes): - print(f"Sending to the device: {type(message)} {str(message)[:100]}") + # print(f"Sending to the device: {type(message)} {str(message)[:100]}") await websocket.send_bytes(message) else: raise TypeError("Message must be a dict or bytes") @@ -235,7 +235,7 @@ async def listener(mobile: bool): # Will be None until we have a full message ready continue - print(str(message)[:1000]) + # print(str(message)[:1000]) # At this point, we have our message @@ -250,9 +250,9 @@ async def listener(mobile: bool): # Convert bytes to audio file # Format will be bytes.wav or bytes.opus mime_type = "audio/" + message["format"].split(".")[1] - print("input audio file content", message["content"][:100]) + # print("input audio file content", message["content"][:100]) audio_file_path = bytes_to_wav(message["content"], mime_type) - print("Audio file path:", audio_file_path) + # print("Audio file path:", audio_file_path) # For microphone debugging: if False: @@ -299,7 +299,7 @@ async def listener(mobile: bool): # Send it to the user await to_device.put(chunk) - # Yield to the event loop, so you actxually send it out + # Yield to the event loop, so you actually send it out await asyncio.sleep(0.01) if os.getenv("TTS_RUNNER") == "server": diff --git a/software/source/server/utils/bytes_to_wav.py b/software/source/server/utils/bytes_to_wav.py index a789792..286ae4d 100644 --- a/software/source/server/utils/bytes_to_wav.py +++ b/software/source/server/utils/bytes_to_wav.py @@ -36,7 +36,7 @@ def export_audio_to_wav_ffmpeg(audio: bytearray, mime_type: str) -> str: output_path = os.path.join( temp_dir, f"output_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav" ) - print(mime_type, input_path, output_path) + # print(mime_type, input_path, output_path) if mime_type == "audio/raw": ffmpeg.input( input_path,