feat(UI): Adding linter and prettier for UI (#3156)

This commit is contained in:
Francisco Arceo 2025-08-14 15:58:43 -06:00 committed by GitHub
parent 61582f327c
commit e69acbafbf
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
73 changed files with 1452 additions and 1226 deletions

View file

@ -1,85 +1,85 @@
import { useEffect, useRef, useState } from "react"
import { useEffect, useRef, useState } from "react";
import { recordAudio } from "@/lib/audio-utils"
import { recordAudio } from "@/lib/audio-utils";
interface UseAudioRecordingOptions {
transcribeAudio?: (blob: Blob) => Promise<string>
onTranscriptionComplete?: (text: string) => void
transcribeAudio?: (blob: Blob) => Promise<string>;
onTranscriptionComplete?: (text: string) => void;
}
export function useAudioRecording({
transcribeAudio,
onTranscriptionComplete,
}: UseAudioRecordingOptions) {
const [isListening, setIsListening] = useState(false)
const [isSpeechSupported, setIsSpeechSupported] = useState(!!transcribeAudio)
const [isRecording, setIsRecording] = useState(false)
const [isTranscribing, setIsTranscribing] = useState(false)
const [audioStream, setAudioStream] = useState<MediaStream | null>(null)
const activeRecordingRef = useRef<any>(null)
const [isListening, setIsListening] = useState(false);
const [isSpeechSupported, setIsSpeechSupported] = useState(!!transcribeAudio);
const [isRecording, setIsRecording] = useState(false);
const [isTranscribing, setIsTranscribing] = useState(false);
const [audioStream, setAudioStream] = useState<MediaStream | null>(null);
const activeRecordingRef = useRef<any>(null);
useEffect(() => {
const checkSpeechSupport = async () => {
const hasMediaDevices = !!(
navigator.mediaDevices && navigator.mediaDevices.getUserMedia
)
setIsSpeechSupported(hasMediaDevices && !!transcribeAudio)
}
);
setIsSpeechSupported(hasMediaDevices && !!transcribeAudio);
};
checkSpeechSupport()
}, [transcribeAudio])
checkSpeechSupport();
}, [transcribeAudio]);
const stopRecording = async () => {
setIsRecording(false)
setIsTranscribing(true)
setIsRecording(false);
setIsTranscribing(true);
try {
// First stop the recording to get the final blob
recordAudio.stop()
recordAudio.stop();
// Wait for the recording promise to resolve with the final blob
const recording = await activeRecordingRef.current
const recording = await activeRecordingRef.current;
if (transcribeAudio) {
const text = await transcribeAudio(recording)
onTranscriptionComplete?.(text)
const text = await transcribeAudio(recording);
onTranscriptionComplete?.(text);
}
} catch (error) {
console.error("Error transcribing audio:", error)
console.error("Error transcribing audio:", error);
} finally {
setIsTranscribing(false)
setIsListening(false)
setIsTranscribing(false);
setIsListening(false);
if (audioStream) {
audioStream.getTracks().forEach((track) => track.stop())
setAudioStream(null)
audioStream.getTracks().forEach(track => track.stop());
setAudioStream(null);
}
activeRecordingRef.current = null
activeRecordingRef.current = null;
}
}
};
const toggleListening = async () => {
if (!isListening) {
try {
setIsListening(true)
setIsRecording(true)
setIsListening(true);
setIsRecording(true);
// Get audio stream first
const stream = await navigator.mediaDevices.getUserMedia({
audio: true,
})
setAudioStream(stream)
});
setAudioStream(stream);
// Start recording with the stream
activeRecordingRef.current = recordAudio(stream)
activeRecordingRef.current = recordAudio(stream);
} catch (error) {
console.error("Error recording audio:", error)
setIsListening(false)
setIsRecording(false)
console.error("Error recording audio:", error);
setIsListening(false);
setIsRecording(false);
if (audioStream) {
audioStream.getTracks().forEach((track) => track.stop())
setAudioStream(null)
audioStream.getTracks().forEach(track => track.stop());
setAudioStream(null);
}
}
} else {
await stopRecording()
await stopRecording();
}
}
};
return {
isListening,
@ -89,5 +89,5 @@ export function useAudioRecording({
audioStream,
toggleListening,
stopRecording,
}
};
}