Я хочу масштабировать входное изображение в модели coreML до 64X64, но не могу сделать - PullRequest
0 голосов
/ 28 августа 2018

вот код

класс ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {

let identifierLabel: UILabel = {
    let label = UILabel()
    label.backgroundColor = .white
    label.textAlignment = .center
    label.translatesAutoresizingMaskIntoConstraints = false
    return label
}()

override func viewDidLoad() {
    super.viewDidLoad()

    // here is where we start up the camera
    // for more details visit: https://www.letsbuildthatapp.com/course_video?id=1252
    let captureSession = AVCaptureSession()
    captureSession.sessionPreset = .photo

    guard let captureDevice = AVCaptureDevice.default(for: .video) else { return }
    guard let input = try? AVCaptureDeviceInput(device: captureDevice) else { return }
    captureSession.addInput(input)

    captureSession.startRunning()

    let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
    view.layer.addSublayer(previewLayer)
    previewLayer.frame = view.frame

    let dataOutput = AVCaptureVideoDataOutput()
    dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
    captureSession.addOutput(dataOutput)

// VNImageRequestHandler (cgImage: <# T ## CGImage #>, параметры: [:]). Execute (<# T ## запросов: [VNRequest] ## [VNRequest] #>)

    setupIdentifierConfidenceLabel()
}

fileprivate func setupIdentifierConfidenceLabel() {
    view.addSubview(identifierLabel)
    identifierLabel.bottomAnchor.constraint(equalTo: view.bottomAnchor, constant: -32).isActive = true
    identifierLabel.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
    identifierLabel.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
    identifierLabel.heightAnchor.constraint(equalToConstant: 50).isActive = true
}


func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {

// print («Камера смогла захватить кадр:», Date ())

    guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }

    // !!!Important
    // make sure to go download the models at https://developer.apple.com/machine-learning/ scroll to the bottom 
    guard let model = try? VNCoreMLModel(for: handWritten().model) else { return }
    let request = VNCoreMLRequest(model: model) { (finishedReq, err) in

        //perhaps check the err

// print (законченный запрос.результаты)

        guard let results = finishedReq.results as? [VNClassificationObservation] else { return }

        guard let firstObservation = results.first else { return }

        print(firstObservation.identifier, firstObservation.confidence)

        DispatchQueue.main.async {
            self.identifierLabel.text = "\(firstObservation.identifier) \(firstObservation.confidence * 100)"
        }

    }
    try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}

}

...