Я получаю сообщение об ошибке при реализации SFSpeechAudioBufferRecognitionRequest на языке target-c.Вот мой код .. он работал до дня.Ошибка: Domain = kAFAssistantErrorDomain Code = 216 "(null)"
- (void)startListening {
// Initialize the AVAudioEngine
audioEngine = [[AVAudioEngine alloc] init];
// Make sure there's not a recognition task already running
if (recognitionTask) {
[recognitionTask cancel];
recognitionTask = nil;
}
// Starts an AVAudio Session
NSError *error;
AVAudioSession *audioSession = [AVAudioSession sharedInstance];
[audioSession setCategory:AVAudioSessionCategoryPlayAndRecord error:&error];
[audioSession setActive:YES withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error:&error];
// Starts a recognition process, in the block it logs the input or stops the audio
// process if there's an error.
recognitionRequest = [[SFSpeechAudioBufferRecognitionRequest alloc] init];
inputNode = audioEngine.inputNode;
recognitionRequest.shouldReportPartialResults = NO;
recognitionRequest.taskHint = SFSpeechRecognitionTaskHintDictation;
[self startWaveAudio];
// Sets the recording format
AVAudioFormat *recordingFormat = [inputNode outputFormatForBus:0];
[inputNode installTapOnBus:0 bufferSize:4096 format:recordingFormat block:^(AVAudioPCMBuffer * _Nonnull buffer, AVAudioTime * _Nonnull when) {
[recognitionRequest appendAudioPCMBuffer:buffer];
}];
// Starts the audio engine, i.e. it starts listening.
[audioEngine prepare];
[audioEngine startAndReturnError:&error];
__block BOOL isFinal = NO;
recognitionTask = [speechRecognizer recognitionTaskWithRequest:recognitionRequest resultHandler:^(SFSpeechRecognitionResult * _Nullable result, NSError * _Nullable error) {
[self stopWaveAudio];
if (result) {
// Whatever you say in the microphone after pressing the button should be being logged
// in the console.
NSLog(@"RESULT:%@",result.bestTranscription.formattedString);
for (SFTranscription *tra in result.transcriptions) {
NSLog(@"Multiple Results : %@", tra.formattedString);
}
if(isFinal == NO) {
[self calculateResultOfSpeechWithResultString:result.bestTranscription.formattedString];
}
isFinal = !result.isFinal;
}
if (error || isFinal) {
NSLog(@"Error Description : %@", error);
[self stopRecording];
}
}];
}
- (IBAction)tap2TlkBtnPrsd:(UIButton *)sender {
if (audioEngine.isRunning) {
[self stopRecording];
} else {
[self startListening];
}
isMicOn = !isMicOn;
micPrompt = NO;
}
-(void)stopRecording {
// dispatch_async(dispatch_get_main_queue(), ^{
if(audioEngine.isRunning){
[inputNode removeTapOnBus:0];
[inputNode reset];
[audioEngine stop];
[recognitionRequest endAudio];
[recognitionTask cancel];
recognitionTask = nil;
recognitionRequest = nil;
}
// });
}
И пытался различными способами, например, добавление аудио-буфера после запроса речи ..
И если возможно, то можнокто-нибудь скажет мне, что как я могу достичь такого сценария, как пользователь будет произносить слово по буквам, и результатом будет только это слово?