mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-27 06:28:50 +00:00
have messages working but streaming still not fixed yet
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
This commit is contained in:
parent
fc3286be7e
commit
f7c9651ca7
23 changed files with 4749 additions and 184 deletions
93
llama_stack/ui/hooks/use-audio-recording.ts
Normal file
93
llama_stack/ui/hooks/use-audio-recording.ts
Normal file
|
@ -0,0 +1,93 @@
|
|||
import { useEffect, useRef, useState } from "react"
|
||||
|
||||
import { recordAudio } from "@/lib/audio-utils"
|
||||
|
||||
interface UseAudioRecordingOptions {
|
||||
transcribeAudio?: (blob: Blob) => Promise<string>
|
||||
onTranscriptionComplete?: (text: string) => void
|
||||
}
|
||||
|
||||
export function useAudioRecording({
|
||||
transcribeAudio,
|
||||
onTranscriptionComplete,
|
||||
}: UseAudioRecordingOptions) {
|
||||
const [isListening, setIsListening] = useState(false)
|
||||
const [isSpeechSupported, setIsSpeechSupported] = useState(!!transcribeAudio)
|
||||
const [isRecording, setIsRecording] = useState(false)
|
||||
const [isTranscribing, setIsTranscribing] = useState(false)
|
||||
const [audioStream, setAudioStream] = useState<MediaStream | null>(null)
|
||||
const activeRecordingRef = useRef<any>(null)
|
||||
|
||||
useEffect(() => {
|
||||
const checkSpeechSupport = async () => {
|
||||
const hasMediaDevices = !!(
|
||||
navigator.mediaDevices && navigator.mediaDevices.getUserMedia
|
||||
)
|
||||
setIsSpeechSupported(hasMediaDevices && !!transcribeAudio)
|
||||
}
|
||||
|
||||
checkSpeechSupport()
|
||||
}, [transcribeAudio])
|
||||
|
||||
const stopRecording = async () => {
|
||||
setIsRecording(false)
|
||||
setIsTranscribing(true)
|
||||
try {
|
||||
// First stop the recording to get the final blob
|
||||
recordAudio.stop()
|
||||
// Wait for the recording promise to resolve with the final blob
|
||||
const recording = await activeRecordingRef.current
|
||||
if (transcribeAudio) {
|
||||
const text = await transcribeAudio(recording)
|
||||
onTranscriptionComplete?.(text)
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error transcribing audio:", error)
|
||||
} finally {
|
||||
setIsTranscribing(false)
|
||||
setIsListening(false)
|
||||
if (audioStream) {
|
||||
audioStream.getTracks().forEach((track) => track.stop())
|
||||
setAudioStream(null)
|
||||
}
|
||||
activeRecordingRef.current = null
|
||||
}
|
||||
}
|
||||
|
||||
const toggleListening = async () => {
|
||||
if (!isListening) {
|
||||
try {
|
||||
setIsListening(true)
|
||||
setIsRecording(true)
|
||||
// Get audio stream first
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
audio: true,
|
||||
})
|
||||
setAudioStream(stream)
|
||||
|
||||
// Start recording with the stream
|
||||
activeRecordingRef.current = recordAudio(stream)
|
||||
} catch (error) {
|
||||
console.error("Error recording audio:", error)
|
||||
setIsListening(false)
|
||||
setIsRecording(false)
|
||||
if (audioStream) {
|
||||
audioStream.getTracks().forEach((track) => track.stop())
|
||||
setAudioStream(null)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
await stopRecording()
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
isListening,
|
||||
isSpeechSupported,
|
||||
isRecording,
|
||||
isTranscribing,
|
||||
audioStream,
|
||||
toggleListening,
|
||||
stopRecording,
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue