В приведенном ниже коде я могу запросить разрешение на распознавание речи. Но я не смог преобразовать голос в текст белой нажатием кнопки. Я использовал библиотеку react-native-voice
для распознавания речи.
import React, { useState, useEffect, useCallback } from 'react';
import { View, Text, Image, StyleSheet, TouchableOpacity, TouchableNativeFeedback, Platform } from 'react-native';
import Voice from 'react-native-voice';
import { Permissions } from 'react-native-unimodules';
const VoiceTest = (props) => {
console.log('voice props initialize');
const [hasSpeechPermission, setHasSpeechPermission] = useState();
const [voiceText, setVoiceText] = useState('');
const [recognize, setRecognize] = useState(false);
const [started, setStarted] = useState(false);
const [startRecognize, setStartRecognize] = useState();
const requestForSpeechPermission = useCallback(async () => {
try {
const request = await Permissions.askAsync(Permissions.AUDIO_RECORDING);
console.log(request);
if (request.status !== 'granted') {
setHasSpeechPermission(false);
Alert.alert(
'Insufficient permissions!',
'You need to grant camera permissions to use this app.',
[{ text: 'Okay' }]
);
} else {
setHasSpeechPermission(true);
}
} catch (err) {
console.log('speech error:=', err.message);
}
}, [setHasSpeechPermission]);
let TouchableCmp = TouchableOpacity;
if (Platform.OS === 'android' && Platform.Version >= 21) {
TouchableCmp = TouchableNativeFeedback;
}
Voice._onSpeechStart = (e) => {
console.log('nice to log:=',e);
}
useEffect(() => {
Voice._onSpeechStart = onSpeechStar;
Voice._onSpeechRecognized = onSpeechRecognized;
Voice._onSpeechResults = onSpeechResults;
return () => {
console.log('voice destroy');
Voice.destroy().then(Voice.removeAllListeners);
}
},[]);
useEffect(() => {
if (startRecognize == true) {
startRecording();
} else if (startRecognize !== undefined && !startRecognize) {
stopRecording();
}
}, [startRecognize, startRecording, stopRecording]);
const onSpeechButtonHandler = () => {
console.log('pressed');
if (hasSpeechPermission === undefined) {
requestForSpeechPermission();
}
}
const onSpeechStar = (e) => {
console.log('onSpeechStar:=', e);
}
const onSpeechRecognized = (e) => {
console.log('onSpeechRecognized:=', e);
}
const onSpeechResults = (e) => {
console.log('onSpeechResults:=', e);
}
const startRecording = async () => {
console.log('startRecognize');
try {
const result = await Voice.start('en-US');
Voice.getSpeechRecognitionServices
console.log('result:=', result);
} catch (err) {
console.log('start recognize error:=', err);
}
};
const stopRecording = async () => {
console.log('stopRecognize');
try {
const result = await Voice.stop();
console.log('result:=', result);
} catch (err) {
console.log('stop recognize error:=', err);
}
}
return (
<View style={{...props.style, ...styles.textContainer}}>
<TouchableCmp
style={styles.imageContainer}
onPress={onSpeechButtonHandler}
onPressIn={() => {
console.log('start pressing');
if (startRecognize == false || startRecognize === undefined) {
setStartRecognize(true);
}
}}
onPressOut={() => {
console.log('stop pressing');
if (startRecognize == true) {
setStartRecognize(false);
}
}} >
<Image source={require('../assets/Voice/Voice.png')}></Image>
</TouchableCmp>
</View>
);
};
const styles = StyleSheet.create({
textContainer: {
// flex: 1,
justifyContent: 'center',
alignItems: 'center',
}
});
export default VoiceTest;