У меня проблема с угловым приложением Webspeech API.Голосовой вывод API снова принимает в качестве ввода для API.Так как наушники / гарнитура имеют, а динамик и микрофон находятся рядом, выходной звук API снова рассматривается как вход.Я действительно ценю, если кто-то может помочь в этом.
Ниже приведен пример кода, который я использовал в своем приложении.Есть ли способ остановить
import { SpeechNotification } from './model/speech-notification';
import {ChatconfigrationService} from './service/chatconfigration.service'
import { SpeechError } from './model/speech-error';
import { from } from 'zen-observable';
const { webkitSpeechRecognition, SpeechRecognition, webkitSpeechGrammarList, webkitSpeechRecognitionEvent, SpeechGrammarList, SpeechRecognitionEvent} = (window as any)
const outerthis = this;
@Component({
selector: 'app-chatbot',
templateUrl: './chatbot.component.html',
styleUrls: ['./chatbot.component.scss']
})
export class ChatbotComponent implements OnInit {
recognizing:boolean = false;
notification: string;
currentLanguage: string;
recognition: any;
speechRecz:any;
speechReconizationObject:any
speechText:string
chatSettings:any;
repeatedTimes:any
constructor(private speechRecognizer: SpeechRecognizerService,
private changeDetector: ChangeDetectorRef,
private userchatconfiguration: ChatconfigrationService)
{ }
ngOnInit() {
this.speechRecognizer.initialize();
this.speechText='Hello!, We are listening you?'
this.getUserchatSetting();
}
getUserchatSetting() {
this.userchatconfiguration.getUserChatbotSetting()
.subscribe(data => {
this.chatSettings = data;
this.repeatedTimes = data.chatbotVoiceOneRepeateTimes
setTimeout(()=> {
this.welcomeMessage()
}, 3000);
});
}
welcomeMessage() {
this.speechReconizationObject = new SpeechSynthesisUtterance(this.speechText)
this.speechReconizationObject.rate = this.chatSettings.rateOfSpeech;
window.speechSynthesis.speak(this.speechReconizationObject);
this.speechText = this.chatSettings.welcomeMessage
this.speechRecognizer.start();
this.initRecognition();
}
private initRecognition() {
this.speechRecognizer.onStart().subscribe(data => {
this.recognizing = true;
this.notification = 'I\'m listening...';
console.info('This is speech recognize stated point');
this.detectChanges();
});
this.speechRecognizer.onEnd().subscribe(data => {
this.recognizing = false;
this.notification = 'speech reconization is ended';
console.info('This is speech recognize end point');
});
this.speechRecognizer.onnomatch().subscribe(data => {
})
this.speechRecognizer.onResult().subscribe((data: SpeechNotification) => {
let message = data.content.trim();
if (data.info === 'final_transcript' && message.length > 0) {
console.info('entered into the final result')
console.log(data)
this.recognizing = false;
}
});
}
detectChanges = () => {
const outerthis = this;
(function myLoop(i) {
setTimeout(function () {
var msg = new SpeechSynthesisUtterance(outerthis.speechText);
this.speechReconizationObject = new SpeechSynthesisUtterance(this.speechText)
msg.rate = outerthis.chatSettings.rateOfSpeech;
console.info(i);
if (i == 1) {
outerthis.speechRecognizer.stop();
return false;
} else {
window.speechSynthesis.speak(msg);
}
if (--i)
myLoop(i); // decrement i and call myLoop again if i > 0
}, 4000);
})(this.repeatedTimes);
}
}