Проблема при использовании AVSpeechSynthensizer, завершающегося в устройствах реального времени - PullRequest
0 голосов
/ 12 декабря 2018

Я интегрирую диалоговый поток в мой файл swift, который включает вопросы, отображаемые в одном поле с именем chipresponse, и он должен выводить голос, а затем поле сообщения получит ввод через голос, и вот мой код, и он завершается.

    @IBOutlet weak var messageField: UITextField!
    @IBOutlet weak var chipResponse: UILabel!
    var a_count = 0
    var b_count = 0
    var docRef : DocumentReference!
    let uid = Auth.auth().currentUser?.uid
    var db: Firestore!
    var a = ""
    private let speechRecognizer = SFSpeechRecognizer(locale: Locale.init(identifier: "en-US"))
    private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
    private var recognitionTask: SFSpeechRecognitionTask?
    private let audioEngine = AVAudioEngine()

    @IBOutlet weak var microphonebutton: UIButton!

    @IBAction func sendMessage(_ sender: Any) {
        if audioEngine.isRunning
        {
        audioEngine.stop()
        recognitionRequest?.endAudio()
            let audioSession = AVAudioSession.sharedInstance()
            do {

                try audioSession.setCategory(AVAudioSessionCategoryPlayAndRecord)
                try audioSession.setMode(AVAudioSessionModeDefault)

            } catch {
                print("audioSession properties weren't set because of an error.")
            }

        microphonebutton.isEnabled = false
        microphonebutton.setTitle("Start Recording", for: .normal)
        messageField.isUserInteractionEnabled = true
        let request = ApiAI.shared().textRequest()
        if let text = self.messageField.text, text != "" {


                if self.a_count == 1
                {

                    print("count",self.a_count)

                }
                let new = uid! + text
                request?.query = new
                print("new",new)


            print("text",text)
        }
        else
        {
            return
        }


        request?.setMappedCompletionBlockSuccess({ (request1, response1) in
            let response1 = response1 as! AIResponse
            if let textResponse1 = response1.result.fulfillment.messages{
                let textRespoArray = textResponse1 [ 0 ] as NSDictionary
                print(textRespoArray.value(forKey: "speech") as! String)
                self.a = textRespoArray.value(forKey: "speech") as! String
                print("a",self.a)
                print("else count",self.a_count)
                if self.messageField.text != "Early years"
                {

                }
                if self.a == "Level 1 completed. Unlocked the next level. Visit the View Your Story section to see your answers."
                {
                    print("ramya")

                    self.messageField.isHidden = true

                }

                if self.a == "Are there any childhood experiences that stand out that you’d like to share"
                {
                    print("In loop")
                    self.a_count = 1
                    print("a count",self.a_count)
                }
                self.speechAndText(text:  textRespoArray.value(forKey: "speech") as! String)
            }
        }, failure: { (request1, error) in
            print(error!)
        })
        ApiAI.shared().enqueue(request)

        messageField.text = ""
        }
        else
        {
            startRecording()
            microphonebutton.setTitle("Stop Recording", for: .normal)
        }
    }



    let speechSynthesizer = AVSpeechSynthesizer()

    func speechAndText(text: String) {
        let speechUtterance = AVSpeechUtterance(string: text)
        speechSynthesizer.speak(speechUtterance)
        UIView.animate(withDuration: 1.0, delay: 0.0, options: .curveEaseInOut, animations: {
            self.chipResponse.text = text
        }, completion: nil)
    }
   override func viewDidLoad() {
        super.viewDidLoad()
       messageField.isUserInteractionEnabled = false
       microphonebutton.isEnabled = false  //2
       speechRecognizer!.delegate = self  //3
      SFSpeechRecognizer.requestAuthorization { (authStatus) in  //4
        var isButtonEnabled = false
        switch authStatus {  //5
        case .authorized:
            isButtonEnabled = true
        case .denied:
            isButtonEnabled = false
            print("User denied access to speech recognition")
        case .restricted:
            isButtonEnabled = false
            print("Speech recognition restricted on this device")
        case .notDetermined:
            isButtonEnabled = false
            print("Speech recognition not yet authorized")
        }
        OperationQueue.main.addOperation() {
            self.microphonebutton.isEnabled = isButtonEnabled
        }
    }
        // Do any additional setup after loading the view.
    }
    func startRecording() {
        messageField.text = ""
        if recognitionTask != nil {
            recognitionTask?.cancel()
            recognitionTask = nil
        }
        let audioSession = AVAudioSession.sharedInstance()
        do {
            try AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayAndRecord, mode: AVAudioSessionModeDefault, options: .defaultToSpeaker)
            try AVAudioSession.sharedInstance().setActive(true)
            try audioSession.setMode(AVAudioSessionModeMeasurement)
            try audioSession.setActive(true, with: .notifyOthersOnDeactivation)
        } catch {
            print("audioSession properties weren't set because of an error.")
        }
        recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
        let inputNode = audioEngine.inputNode
        guard let recognitionRequest = recognitionRequest else {
            fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")
        }
        recognitionRequest.shouldReportPartialResults = true
        recognitionTask = speechRecognizer!.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in
            var isFinal = false
            if result != nil {

                self.messageField.text = result?.bestTranscription.formattedString
                isFinal = (result?.isFinal)!
            }
            if error != nil || isFinal {
                self.audioEngine.stop()
                inputNode.removeTap(onBus: 0)
                self.recognitionRequest = nil
                self.recognitionTask = nil
                self.microphonebutton.isEnabled = true
            }
        })
        let recordingFormat = inputNode.outputFormat(forBus: 0)
        inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
            self.recognitionRequest?.append(buffer)
        }
        audioEngine.prepare()
        do {
            try audioEngine.start()
        } catch {
            print("audioEngine couldn't start because of an error.")
        }
        messageField.text = "Say something, I'm listening!"

    }
}

Когда я использую этот код, голосовой текст чип-ответа становится слишком низким, а затем завершается и произносит nullptr () - иногда нажмите на ошибку, а иногда она показывает ошибку кода IFFormat

Пожалуйста, помогите мне,Спасибо

...