refactor(voice): extract chat dictation into reusable component
This commit is contained in:
@@ -0,0 +1,168 @@
|
||||
<script setup lang="ts">
|
||||
import { onBeforeUnmount, ref, watch } from "vue";
|
||||
import { isVoiceCaptureSupported, transcribeAudioBlob } from "~~/app/composables/useVoiceTranscription";
|
||||
|
||||
const props = defineProps<{
|
||||
disabled?: boolean;
|
||||
sessionKey?: string;
|
||||
idleTitle?: string;
|
||||
recordingTitle?: string;
|
||||
transcribingTitle?: string;
|
||||
}>();
|
||||
|
||||
const emit = defineEmits<{
|
||||
(e: "update:recording", value: boolean): void;
|
||||
(e: "update:transcribing", value: boolean): void;
|
||||
(e: "transcript", value: string): void;
|
||||
(e: "error", value: string): void;
|
||||
}>();
|
||||
|
||||
const recording = ref(false);
|
||||
const transcribing = ref(false);
|
||||
let mediaRecorder: MediaRecorder | null = null;
|
||||
let recorderStream: MediaStream | null = null;
|
||||
let recorderMimeType = "audio/webm";
|
||||
let recordingChunks: Blob[] = [];
|
||||
let discardOnStop = false;
|
||||
|
||||
function setRecording(value: boolean) {
|
||||
recording.value = value;
|
||||
emit("update:recording", value);
|
||||
}
|
||||
|
||||
function setTranscribing(value: boolean) {
|
||||
transcribing.value = value;
|
||||
emit("update:transcribing", value);
|
||||
}
|
||||
|
||||
function clearRecorderResources() {
|
||||
if (recorderStream) {
|
||||
recorderStream.getTracks().forEach((track) => track.stop());
|
||||
recorderStream = null;
|
||||
}
|
||||
mediaRecorder = null;
|
||||
recordingChunks = [];
|
||||
discardOnStop = false;
|
||||
}
|
||||
|
||||
async function startRecording() {
|
||||
if (recording.value || transcribing.value) return;
|
||||
emit("error", "");
|
||||
|
||||
if (!isVoiceCaptureSupported()) {
|
||||
emit("error", "Recording is not supported in this browser");
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||
const preferredMime = "audio/webm;codecs=opus";
|
||||
const recorder = MediaRecorder.isTypeSupported(preferredMime)
|
||||
? new MediaRecorder(stream, { mimeType: preferredMime })
|
||||
: new MediaRecorder(stream);
|
||||
|
||||
recorderStream = stream;
|
||||
recorderMimeType = recorder.mimeType || "audio/webm";
|
||||
mediaRecorder = recorder;
|
||||
recordingChunks = [];
|
||||
discardOnStop = false;
|
||||
setRecording(true);
|
||||
|
||||
recorder.ondataavailable = (event: BlobEvent) => {
|
||||
if (event.data?.size) recordingChunks.push(event.data);
|
||||
};
|
||||
|
||||
recorder.onstop = async () => {
|
||||
const discard = discardOnStop;
|
||||
const audioBlob = new Blob(recordingChunks, { type: recorderMimeType });
|
||||
|
||||
setRecording(false);
|
||||
clearRecorderResources();
|
||||
if (discard || audioBlob.size === 0) return;
|
||||
|
||||
setTranscribing(true);
|
||||
try {
|
||||
const text = await transcribeAudioBlob(audioBlob);
|
||||
if (!text) {
|
||||
emit("error", "Could not recognize speech");
|
||||
return;
|
||||
}
|
||||
emit("error", "");
|
||||
emit("transcript", text);
|
||||
} catch (error: any) {
|
||||
emit("error", String(error?.data?.message ?? error?.message ?? "Voice transcription failed"));
|
||||
} finally {
|
||||
setTranscribing(false);
|
||||
}
|
||||
};
|
||||
|
||||
recorder.start();
|
||||
} catch {
|
||||
setRecording(false);
|
||||
clearRecorderResources();
|
||||
emit("error", "No microphone access");
|
||||
}
|
||||
}
|
||||
|
||||
function stopRecording(discard = false) {
|
||||
if (!mediaRecorder || mediaRecorder.state === "inactive") {
|
||||
setRecording(false);
|
||||
clearRecorderResources();
|
||||
return;
|
||||
}
|
||||
discardOnStop = discard;
|
||||
mediaRecorder.stop();
|
||||
}
|
||||
|
||||
function toggleRecording() {
|
||||
if (props.disabled || transcribing.value) return;
|
||||
if (recording.value) {
|
||||
stopRecording();
|
||||
return;
|
||||
}
|
||||
void startRecording();
|
||||
}
|
||||
|
||||
watch(
|
||||
() => props.sessionKey,
|
||||
() => {
|
||||
if (recording.value) stopRecording(true);
|
||||
},
|
||||
);
|
||||
|
||||
watch(
|
||||
() => props.disabled,
|
||||
(disabled) => {
|
||||
if (disabled && recording.value) stopRecording(true);
|
||||
},
|
||||
);
|
||||
|
||||
onBeforeUnmount(() => {
|
||||
if (recording.value) {
|
||||
stopRecording(true);
|
||||
return;
|
||||
}
|
||||
clearRecorderResources();
|
||||
});
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<button
|
||||
type="button"
|
||||
:disabled="Boolean(props.disabled) || transcribing"
|
||||
:title="
|
||||
recording
|
||||
? (props.recordingTitle || 'Stop and insert transcript')
|
||||
: transcribing
|
||||
? (props.transcribingTitle || 'Transcribing...')
|
||||
: (props.idleTitle || 'Voice input')
|
||||
"
|
||||
@click="toggleRecording"
|
||||
>
|
||||
<slot :recording="recording" :transcribing="transcribing">
|
||||
<svg viewBox="0 0 24 24" class="h-3.5 w-3.5 fill-current">
|
||||
<path d="M12 15a3 3 0 0 0 3-3V7a3 3 0 1 0-6 0v5a3 3 0 0 0 3 3m5-3a1 1 0 1 1 2 0 7 7 0 0 1-6 6.92V21h3a1 1 0 1 1 0 2H8a1 1 0 1 1 0-2h3v-2.08A7 7 0 0 1 5 12a1 1 0 1 1 2 0 5 5 0 0 0 10 0" />
|
||||
</svg>
|
||||
</slot>
|
||||
</button>
|
||||
</template>
|
||||
Reference in New Issue
Block a user