在 Nuxt 3.13 實作「語音識別」元件

實作

建立 AppSpeechRecognition.vue 檔。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
<script setup>
const props = defineProps({
onRecord: {
type: Function,
default: () => {},
},
});

const { isFinal, isListening, isSupported, result, start, stop } = useSpeechRecognition({
lang: navigator.language || 'zh-TW',
});

const state = reactive({
index: 0,
contents: [],
});

watch(result, (after) => {
state.contents[state.index] = after;
props.onRecord(state.contents);
});

watch(isFinal, (after) => {
if (after) {
nextTick(() => {
state.index += 1;
});
}
});
</script>

<template>
<slot
:is-listening="isListening"
:is-supported="isSupported"
:result="result"
:start="start"
:stop="() => {
state.index = 0;
state.contents = [];
stop();
}"
/>
</template>

使用元件。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
<script setup>
const model = defineModel({
type: String,
default: '',
});
</script>

<template>
<AppSpeechRecognition
v-slot="{ isListening, isSupported, start, stop }"
:on-record="(contents) => {
model = contents.join(' ');
}"
>
<v-icon
v-show="isSupported"
:icon="isListening ? 'mdi-microphone-off' : 'mdi-microphone'"
@click.stop="isListening ? stop() : start()"
/>
</AppSpeechRecognition>
<p>
Result: {{ model }}
</p>
</template>

參考資料