Как я могу записать видео AVDepthData и сохранить в галерее? - PullRequest
0 голосов
/ 20 июня 2019

Я разрабатываю приложение для записи последовательностей RGB-D на iPhone с помощью DualRearCamera или TrueDepthCamera. Я могу захватывать и визуализировать фреймы RGB и фреймы глубины, и я разработал версию, в которой я могу сжимать эти данные и сохранять во внутренних файлах iPhone. Тем не менее, моя идея состоит в том, чтобы сохранить обе последовательности (последовательности RGB и карты глубины) в галерее, но у меня возникают проблемы с использованием AVAssetWritter и созданием видео карты глубины.

Я использую iPhone X, Xcode 10.2.1 и swift 5

import UIKit
import AVFoundation
import AssetsLibrary

var noMoreSpace = false

class ViewController: UIViewController{

    @IBOutlet weak var previewView: UIImageView!
    @IBOutlet weak var timeLabel: UILabel!
    @IBOutlet weak var previewModeControl: UISegmentedControl!

    let session = AVCaptureSession()
    let dataOutputQueue = DispatchQueue(label: "video data queue")
    let videoOutput = AVCaptureVideoDataOutput()

    let movieOutput = AVCaptureMovieFileOutput()
    let depthOutput = AVCaptureDepthDataOutput()
    let depthCapture = DepthCapture()

    var previewLayer = AVCaptureVideoPreviewLayer()

    var inputDevice: AVCaptureDeviceInput!
    let videoDeviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera, .builtInTrueDepthCamera], mediaType: .video, position: .unspecified)

    var Timestamp: String {
        let currentDate = NSDate()
        let dateFormatter = DateFormatter()
        dateFormatter.dateFormat = "ddMM_HHmmss"
        return "\(dateFormatter.string(from: currentDate as Date))"
    }
    var isRecording = false
    var time = 0
    var timer = Timer()

    enum PreviewMode: Int {
        case original
        case depth
    }
    var previewMode = PreviewMode.original
    var depthMap: CIImage?
    var scale: CGFloat = 0.0


    //let sessionQueue = DispatchQueue(label: "session queue")

    override func viewDidLoad() {
        super.viewDidLoad()

        timeLabel.isHidden = true //TODO: Disable the rest of the UI
        previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
        configureCaptureSession()
        session.startRunning()
    }

    func configureCaptureSession() {

        session.beginConfiguration()
        let camera = AVCaptureDevice.default(.builtInTrueDepthCamera, for: .video, position: .unspecified)!

        do {
            let cameraInput = try AVCaptureDeviceInput(device: camera)

            if session.canAddInput(cameraInput){
                session.sessionPreset = .vga640x480
                session.addInput(cameraInput)
                self.inputDevice = cameraInput
            }

            if session.canAddOutput(videoOutput){
                videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
                videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
                session.addOutput(videoOutput)
                let videoConnection = videoOutput.connection(with: .video)
                videoConnection?.videoOrientation = .portrait

                //previewLayer = AVCaptureVideoPreviewLayer(session: session)
                //previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
                //previewLayer.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
                //previewView.layer.addSublayer(previewLayer)
                //previewLayer.position = CGPoint(x: self.previewView.frame.width / 2, y: self.previewView.frame.height / 2)
                //previewLayer.bounds = previewView.frame
            }
            //Add Depth output to the session
            if session.canAddOutput(depthOutput){
                session.addOutput(depthOutput)
                depthOutput.setDelegate(self, callbackQueue: dataOutputQueue)
                depthOutput.isFilteringEnabled = true
                let depthConnection = depthOutput.connection(with: .depthData)
                depthConnection?.videoOrientation = .portrait

            }

            /*if session.canAddOutput(movieOutput){
                session.addOutput(movieOutput)
            }*/

        } catch {
            print("Error")
        }

        let outputRect = CGRect(x: 0, y: 0, width: 1, height: 1)
        let videoRect = videoOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
        let depthRect = depthOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
        //  Calculate the scaling factor between videoRect and depthRect
        scale = max(videoRect.width, videoRect.height) / max(depthRect.width, depthRect.height)
        // Change the AVCaptureDevice configuration, so you need to lock it
        do{
            try camera.lockForConfiguration()
            //  Set the AVCaptureDevice‘s minimum frame duration (which is the inverse of the maximum frame rate) to be equal to the supported frame rate of the depth data
            if let frameDuration = camera.activeDepthDataFormat?.videoSupportedFrameRateRanges.first?.minFrameDuration{
                camera.activeVideoMinFrameDuration = frameDuration
            }
            //  Unlock the configuration you locked
            camera.unlockForConfiguration()
        }catch{
            fatalError(error.localizedDescription)
        }

        session.commitConfiguration()
    }

    @IBAction func startStopRecording(_ sender: Any) {
        if isRecording{
            stopRecording()
        } else {
            startRecording()
        }
    }

    func startRecording(){
        timeLabel.isHidden = false
        timer = Timer.scheduledTimer(timeInterval: 1, target: self, selector: #selector(ViewController.timerAction), userInfo: nil, repeats: true)
        let paths = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)
        let flagTime = Timestamp
        let auxStr = flagTime+"_output.mp4"
        let fileUrl = paths[0].appendingPathComponent(auxStr)
        depthCapture.prepareForRecording(timeFlag: flagTime)
        movieOutput.startRecording(to: fileUrl, recordingDelegate: self)
        print(fileUrl.absoluteString)
        print("Recording started")
        self.isRecording = true
    }

    func stopRecording(){
        timeLabel.isHidden = true
        timer.invalidate()
        time = 0
        timeLabel.text = "0"
        movieOutput.stopRecording()
        print("Stopped recording!")
        self.isRecording = false
        do {
            try depthCapture.finishRecording(success: { (url: URL) -> Void in
                print(url.absoluteString)
            })
        } catch {
            print("Error while finishing depth capture.")
        }
    }

    @objc func timerAction() {
        time += 1
        timeLabel.text = String(time)
    }

    @IBAction func previeModeChanged(_ sender: UISegmentedControl) {
        previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
    }

    @IBAction func switchCamera(_ sender: Any) {
        let currentDevice = self.inputDevice.device
        let currentPosition = currentDevice.position
        let preferredPosition: AVCaptureDevice.Position
        let preferredDeviceType: AVCaptureDevice.DeviceType
        let devices = self.videoDeviceDiscoverySession.devices
        var newVideoDevice: AVCaptureDevice? = nil

        switch currentPosition {
        case .unspecified, .front:
            preferredPosition = .back
            preferredDeviceType = .builtInDualCamera

        case .back:
            preferredPosition = .front
            preferredDeviceType = .builtInTrueDepthCamera
        @unknown default:
            preferredPosition = .back
            preferredDeviceType = .builtInDualCamera
        }

        // First, seek a device with both the preferred position and device type. Otherwise, seek a device with only the preferred position. ENTENDER MEJOR LQS CONDICIONES
        if let device = devices.first(where: { $0.position == preferredPosition && $0.deviceType == preferredDeviceType }) {
            newVideoDevice = device
        } else if let device = devices.first(where: { $0.position == preferredPosition }) {
            newVideoDevice = device
        }

        if let videoDevice = newVideoDevice {
            do {
                let cameraInput = try AVCaptureDeviceInput(device: videoDevice)
                self.session.beginConfiguration()
                self.session.removeInput(self.inputDevice)
                if self.session.canAddInput(cameraInput) {
                    session.sessionPreset = .vga640x480
                    self.session.addInput(cameraInput)
                    self.inputDevice = cameraInput
                }else {
                    self.session.addInput(self.inputDevice)
                }
                self.session.commitConfiguration()
            } catch{
                print("Error occurred while creating video device input: \(error)")
            }
        }
    }        
}

extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate{
    func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
        let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
        let image = CIImage(cvPixelBuffer: pixelBuffer!)

        let previewImage: CIImage

        switch previewMode {
        case .original:
            previewImage = image
        case .depth:
            previewImage = depthMap ?? image
        }

        let displayImage = UIImage(ciImage: previewImage)
        DispatchQueue.main.async {[weak self] in self?.previewView.image = displayImage}
    }
}

extension ViewController: AVCaptureDepthDataOutputDelegate{
    func depthDataOutput(_ output: AVCaptureDepthDataOutput, didOutput depthData: AVDepthData, timestamp: CMTime, connection: AVCaptureConnection) {

        var convertedDepth: AVDepthData
        //  Ensure the depth data is the format you need: 32 bit FP disparity.???
        if depthData.depthDataType != kCVPixelFormatType_DepthFloat16{
            convertedDepth = depthData.converting(toDepthDataType: kCVPixelFormatType_DepthFloat32)
        }else{
            convertedDepth = depthData
        }
        //  You save the depth data map from the AVDepthData object as a CVPixelBuffer
        let pixelBuffer = convertedDepth.depthDataMap
        //Using an extension, you then clamp the pixels in the pixel buffer to keep them between 0.0 and 1.0.
        pixelBuffer.clamp()
        //  Convert the pixel buffer into a CIImage
        let depthMap = CIImage(cvPixelBuffer: pixelBuffer)

        //  You store depthMap in a class variable for later use
        DispatchQueue.main.async {
            [weak self] in self?.depthMap = depthMap
        }
    }
}
...