Я работаю над пользовательской речью в среде ботов. Я добавляю простую речь Bing, она работает на моем сайте: вот ссылка: https://envolvebot.azurewebsites.net/
, но теперь я хочу знать, как мы можем добавить пользовательскую речь(Пользовательская речь LUIS) в нашем веб-чате?Я создаю собственную речь, используя: https://westus.cris.ai/Home/CustomSpeech Теперь я хочу добавить ее и хочу, чтобы она работала как бинг-речь.
Я использую следующий код.
/*! modernizr 3.6.0 (Custom Build) | MIT *
* https://modernizr.com/download/?-getusermedia-speechsynthesis !*/
!function (e, n, s) { function i(e, n) { return typeof e === n } function o() { var e, n, s, o, a, f, d; for (var c in t) if (t.hasOwnProperty(c)) { if (e = [], n = t[c], n.name && (e.push(n.name.toLowerCase()), n.options && n.options.aliases && n.options.aliases.length)) for (s = 0; s < n.options.aliases.length; s++)e.push(n.options.aliases[s].toLowerCase()); for (o = i(n.fn, "function") ? n.fn() : n.fn, a = 0; a < e.length; a++)f = e[a], d = f.split("."), 1 === d.length ? Modernizr[d[0]] = o : (!Modernizr[d[0]] || Modernizr[d[0]] instanceof Boolean || (Modernizr[d[0]] = new Boolean(Modernizr[d[0]])), Modernizr[d[0]][d[1]] = o), r.push((o ? "" : "no-") + d.join("-")) } } var t = [], a = { _version: "3.6.0", _config: { classPrefix: "", enableClasses: !0, enableJSClass: !0, usePrefixes: !0 }, _q: [], on: function (e, n) { var s = this; setTimeout(function () { n(s[e]) }, 0) }, addTest: function (e, n, s) { t.push({ name: e, fn: n, options: s }) }, addAsyncTest: function (e) { t.push({ name: null, fn: e }) } }, Modernizr = function () { }; Modernizr.prototype = a, Modernizr = new Modernizr, Modernizr.addTest("speechsynthesis", "SpeechSynthesisUtterance" in e), Modernizr.addTest("getUserMedia", "mediaDevices" in navigator && "getUserMedia" in navigator.mediaDevices); var r = []; o(), delete a.addTest, delete a.addAsyncTest; for (var f = 0; f < Modernizr._q.length; f++)Modernizr._q[f](); e.Modernizr = Modernizr }(window, document);
// Necessary for safari
// Safari will only speak after speaking from a button click
var isSafari = /^((?!chrome|android).)*safari/i.test(navigator.userAgent);
const params = BotChat.queryParams(location.search);
debugger;
function SpeakText() {
var msg = new SpeechSynthesisUtterance();
window.speechSynthesis.speak(msg);
document.getElementsByClassName("wc-mic")[0].removeEventListener("click", SpeakText);
}
if (isSafari) {
window.addEventListener("load", function () {
document.getElementsByClassName("wc-mic")[0].addEventListener("click", SpeakText);
});
}
var femaleVoice = null;
if (Modernizr.speechsynthesis) {
var baseSpeechPrototype = SpeechSynthesisUtterance.prototype;
SpeechSynthesisUtterance = function (msg) {
var utterance = new baseSpeechPrototype.constructor(msg);
if (femaleVoice != null)
utterance.voice = femaleVoice;
return utterance;
}
window.speechSynthesis.onvoiceschanged = function () {
var voice = "Microsoft Zira";
var safariVoice = "Samantha";
var voices = window.speechSynthesis.getVoices();
voices.forEach(function (v) {
if (v.name.startsWith(voice))
femaleVoice = v;
else if (v.name == safariVoice && v.lang == "en-US")
femaleVoice = v;
})
}
}
//// Needed to change between the two audio contexts
var AudioContext = window.AudioContext || window.webkitAudioContext;
//// Sets the old style getUserMedia to use the new style that is supported in more browsers even though the framework uses the new style
if (window.navigator.mediaDevices.getUserMedia && !window.navigator.getUserMedia) {
window.navigator.getUserMedia = function (constraints, successCallback, errorCallback) {
window.navigator.mediaDevices.getUserMedia(constraints)
.then(function (e) {
successCallback(e);
})
.catch(function (e) {
errorCallback(e);
});
};
}
const bot = {
id: params['botid'] || 'botid',
name: params['botname'] || 'botname'
};
window.botchatDebug = params['debug'] && params['debug'] === 'true';
var speechOptions;
if (Modernizr.speechsynthesis) {
speechOptions = {
speechRecognizer: new CognitiveServices.SpeechRecognizer({
fetchCallback: function (authFetchEventId) { return getBingToken() },
fetchOnExpiryCallback: function (authFetchEventId) { getBingToken() }
}),
speechSynthesizer: new BotChat.Speech.BrowserSpeechSynthesizer()
};
}
else if (Modernizr.getusermedia) {
speechOptions = {
speechRecognizer: new CognitiveServices.SpeechRecognizer({
fetchCallback: function (authFetchEventId) { return getBingToken() },
fetchOnExpiryCallback: function (authFetchEventId) { getBingToken() }
}),
speechSynthesizer: null
};
}
else {
speechOptions = null;
}
// for bot
var botConnection = new BotChat.DirectLine({
domain: params['domain'],
//token: document.getElementById("directLineToken").value,
secret: 'Secret',
webSocket: params['webSocket'] && params['webSocket'] === 'true' // defaults to true
});
BotChat.App({
sendTyping: true,
locale: params['locale'],
resize: 'detect',
speechOptions: speechOptions,
user: { id:'ID' name: "You" },
bot: { id: 'Vera', name: "Vera" },
botConnection: botConnection
}, document.getElementById('BotChatElement'));
function getBingToken() {
// Normally this token fetch is done from your secured backend to avoid exposing the API key and this call
// would be to your backend, or to retrieve a token that was served as part of the original page.
return fetch(
'https://api.cognitive.microsoft.com/sts/v1.0/issueToken',
{
headers: {
'Ocp-Apim-Subscription-Key': 'Key'
},
method: 'POST'
}
//).then(res => res.text());
).then(function (res) { return res.text() });
}
ЛЮБОЕ ТЕЛОУмеете это делать?
Заранее благодарны.