Did Output Sample Buffer только получает видео буфер, если задержка удалена - PullRequest
0 голосов
/ 14 октября 2019

Я использую следующий код для записи аудио и видео с использованием средства записи ресурсов.

public func start()
    {
        print("start recording")
        guard !isRecording else { return }
        self.captureSession = AVCaptureSession()
        // self.captureSession = self.videoSource?.captureSession





        guard let capturer = self.videoCapturer as? RTCCameraVideoCapturer else
        {
            print("capture session is nil")
            return
        }




        self.captureSession = capturer.captureSession
            self.isRecording = true




        sessionAtSourceTime = nil
        print("setup writer")

        setUpWriter()

        print(isRecording)
        print(videoWriter)
        if videoWriter.status == .writing
        {
            print("status writing")
        } else if videoWriter.status == .failed {
            print("status failed")
        } else if videoWriter.status == .cancelled {
            print("status cancelled")
        } else if videoWriter.status == .unknown {
            print("status unknown")
        } else {
            print("status completed")
        }

    }




 func setUpWriter() {

        do {
            outputFileLocation = self.tempURL()
            let queue = DispatchQueue(label: "myqueue")

            let queue2 =  DispatchQueue(label: "myqueue2")

            audioDataOutput = AVCaptureAudioDataOutput()
            //Define your audio output
            if self.captureSession.canAddOutput(audioDataOutput) {
                audioDataOutput.setSampleBufferDelegate(self, queue: self.captureSessionQueue)
                self.captureSession.addOutput(audioDataOutput)
                print("Added AVCaptureDataOutput: audio")
            }
            for outputs in captureSession.outputs {
                if let videoOutput = outputs as? AVCaptureVideoDataOutput
                {


                        videoOutput.setSampleBufferDelegate(self, queue: self.captureSessionQueue)


                }
            }
            var audioDevice: AVCaptureDevice
            audioDevice = AVCaptureDevice.default(AVCaptureDevice.DeviceType.builtInMicrophone, for: AVMediaType.audio, position: AVCaptureDevice.Position.unspecified)!
            print("Created AVCaptureDeviceInput: audio")
            let audioInput = try AVCaptureDeviceInput(device: audioDevice)
            if self.captureSession.canAddInput(audioInput) {
                self.captureSession.addInput(audioInput)
                print("Added AVCaptureDeviceInput: audio")
            } else
            {
                print("Could not add MIC!!!")
            }
            videoWriter = try AVAssetWriter(outputURL: outputFileLocation!, fileType: AVFileType.mov)


            videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: [
                AVVideoCodecKey : AVVideoCodecType.h264,
                AVVideoWidthKey : 1920,
                AVVideoHeightKey : 1080,
                AVVideoCompressionPropertiesKey : [
                    AVVideoAverageBitRateKey : 2300000,
                ],

                ])


         videoWriter.shouldOptimizeForNetworkUse = true



            videoWriterInput.expectsMediaDataInRealTime = true
            if videoWriter.canAdd(videoWriterInput) {
                videoWriter.add(videoWriterInput)
                print("video input added")
            } else {
                print("no input added")
            }


            self.captureSession.usesApplicationAudioSession = true
            self.captureSession.commitConfiguration()



            let micAudioSettings = [
                AVFormatIDKey: NSNumber(value: kAudioFormatMPEG4AAC),
                AVNumberOfChannelsKey: NSNumber(value: 2),
                AVSampleRateKey: NSNumber(value: 44100.0),
                AVEncoderBitRateKey: NSNumber(value: 128000)]


            self.audioWriterInput = AVAssetWriterInput(mediaType:.audio, outputSettings:micAudioSettings)

            self.micAudioWriteInput = AVAssetWriterInput(mediaType:.audio, outputSettings:micAudioSettings)


            self.audioWriterInput.expectsMediaDataInRealTime = true
            self.micAudioWriteInput.expectsMediaDataInRealTime = true

            if videoWriter.canAdd(audioWriterInput!) {
                videoWriter.add(audioWriterInput!)
                print("audio input added")
            }


                self.videoWriter.startWriting()


        } catch let error {
            debugPrint(error.localizedDescription)
        }


    }



    public func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection)
    {

     /*  if(self.shouldStartRecording == false)
        {
            return
        }*/

        if(self.isAnswered == false)
        {

            let presentationTimeStamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
            if assetWriter == nil
            {
                print("createwriterinput2")

                createWriterInput(for: presentationTimeStamp)
            } else {
                let chunkDuration = CMTimeGetSeconds(CMTimeSubtract(presentationTimeStamp, chunkStartTime))

                print("chunk duration",chunkDuration)
                print("chunk",chunk)
                if chunkDuration > chunk
                {

                    print("video created")
                    chunk = 2
                    assetWriter.endSession(atSourceTime: presentationTimeStamp)

                    let newChunkURL = chunkOutputURL!
                    let chunkAssetWriter = assetWriter!



                    chunkAssetWriter.finishWriting {
                        print("finishWriting says",chunkAssetWriter.status.rawValue,chunkAssetWriter.error)
                        print("queuing \(newChunkURL)")
                        self.player.insert(AVPlayerItem(url: newChunkURL), after: nil)


                        if(self.isFirstChunk == false)
                        {

                            //self.uplodVideo(chunkURL: newChunkURL)

                        }
                        self.isFirstChunk = false


                    }
                    print("createwriterinput3")

                    createWriterInput(for: presentationTimeStamp)
                }
            }




            let writable = canWrite()
            //let queue = DispatchQueue(label: "recording")
            let queue = DispatchQueue(label: "recording")
            queue.sync(execute:
                {
                    if writable,
                        self.sessionAtSourceTime == nil {
                        self.sessionAtSourceTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
                         // DispatchQueue.main.asyncAfter(deadline: .now() + 0.4) {

                        self.videoWriter.startSession(atSourceTime: self.sessionAtSourceTime!)
                       // }
                    }

                    if output is AVCaptureVideoDataOutput
                    {
                                               if connection.isVideoMirroringSupported
                        {
                            print("video mirrored false")
                            connection.isVideoMirrored = false
                        }
                    }

                    if writable,
                        output is AVCaptureVideoDataOutput,
                        (self.videoWriterInput.isReadyForMoreMediaData)
                    {
                        // write video buffer



                       self.videoWriterInput.append(sampleBuffer)
                        if(connection.isVideoOrientationSupported)
                        {
                            print("#portrait1")
                            connection.videoOrientation = .portrait


                        }


                    } else if writable,
                        output is AVCaptureAudioDataOutput,
                        (self.audioWriterInput.isReadyForMoreMediaData)
                    {
                        print("audio buffer")
                        // write audio buffer
                        self.audioWriterInput?.append(sampleBuffer)

                        // print("audio buffering")
                    }

            })

        }
        else
        {



            if(connection.isVideoOrientationSupported)
            {
                print("#portrait2")
               // connection.videoOrientation = .portrait
            }

            if(isLastChunkWritten == false)
            {

                let presentationTimeStamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)

                if assetWriter == nil
                {
                    print("createwriterinput1")
                    createWriterInput(for: presentationTimeStamp)
                } else {


                    print("chunk url last chunk",chunkOutputURL)

                    isLastChunkWritten = true

                    assetWriter.endSession(atSourceTime: presentationTimeStamp)

                    let newChunkURL = chunkOutputURL!
                    let chunkAssetWriter = assetWriter!

                    chunkAssetWriter.finishWriting {
                        print("finishWriting says",chunkAssetWriter.status.rawValue,chunkAssetWriter.error)
                        print("queuing \(newChunkURL)")
                        self.player.insert(AVPlayerItem(url: newChunkURL), after: nil)



                    }
                }
            }
        }
    }

Я использую этот следующий код для начала записи.

  self.captureSessionQueue.async
        {
            Thread.sleep(forTimeInterval: 1.5)
            self.start()
        }

Я изменил задержку вThread.sleep до 1,5 секунд, теперь это не занимает время от 7 до 10 секунд, и аудио + видео записывается. Но он останавливает интерфейс при ответе на вызов. Если я удаляю Thread.sleep (forTimeInterval: 1.5), он не замораживает пользовательский интерфейс, но в этом случае я не получаю аудио-буфер в образце метода делегата. Я должен поставить задержку, потому что я использую библиотеку Google Webrtc для видео-звонков, и она имеет AVcapturesession добавлен с выводом видеоданных. Я создаю новый AVCaptureSession и назначаю сеанс google webrtc вновь созданному сеансу, а затем добавляю вывод аудиоданных. Так что, если задержка будет удалена, будет получен видеобуфер, но не будет получен аудиобуфер. Как остановить зависание пользовательского интерфейса, а также получить видео и аудио буфер одновременно?

...