Не удается обнаружить событие моргания глаз с помощью комплекта Firebase ML - PullRequest
0 голосов
/ 11 октября 2019

Я использую Firebase ML-Kit для обнаружения лица, теперь я хочу реализовать мигание глаз, но я не могу обнаружить мигание глаз. Я всегда получаю leftEyeOpenProbability и rightEyeOpenProbability 1.0. Я использую следующий код, такой же как https://github.com/firebase/quickstart-ios/tree/master/mlvision

, пожалуйста, помогите мне, как реализовать функцию мерцания глаз.

func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {

    guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
        print("Failed to get image buffer from sample buffer.")
        return
    }
    let visionImage = VisionImage(buffer: sampleBuffer)
    let metadata = VisionImageMetadata()
    let visionOrientation = visionImageOrientation(from:imageOrientation(fromDevicePosition: .front))
    metadata.orientation = visionOrientation
    visionImage.metadata = metadata
    let imageWidth = CGFloat(CVPixelBufferGetWidth(imageBuffer))
    let imageHeight = CGFloat(CVPixelBufferGetHeight(imageBuffer))
    detectFacesOnDevice(in: visionImage, width: imageWidth, height: imageHeight)
}

public func visionImageOrientation(
    from imageOrientation: UIImage.Orientation
    ) -> VisionDetectorImageOrientation {
    switch imageOrientation {
    case .up:
        return .topLeft
    case .down:
        return .bottomRight
    case .left:
        return .leftBottom
    case .right:
        return .rightTop
    case .upMirrored:
        return .topRight
    case .downMirrored:
        return .bottomLeft
    case .leftMirrored:
        return .leftTop
    case .rightMirrored:
        return .rightBottom
    }
}

public  func imageOrientation(
    fromDevicePosition devicePosition: AVCaptureDevice.Position = .back
    ) -> UIImage.Orientation {
    var deviceOrientation = UIDevice.current.orientation
    if deviceOrientation == .faceDown || deviceOrientation == .faceUp ||
        deviceOrientation == .unknown {
        deviceOrientation = currentUIOrientation()
    }
    switch deviceOrientation {
    case .portrait:
        return devicePosition == .front ? .leftMirrored : .right
    case .landscapeLeft:
        return devicePosition == .front ? .downMirrored : .up
    case .portraitUpsideDown:
        return devicePosition == .front ? .rightMirrored : .left
    case .landscapeRight:
        return devicePosition == .front ? .upMirrored : .down
    case .faceDown, .faceUp, .unknown:
        return .up
    }
}

private func currentUIOrientation() -> UIDeviceOrientation {
    let deviceOrientation = { () -> UIDeviceOrientation in
        switch UIApplication.shared.statusBarOrientation {
        case .landscapeLeft:
            return .landscapeRight
        case .landscapeRight:
            return .landscapeLeft
        case .portraitUpsideDown:
            return .portraitUpsideDown
        case .portrait, .unknown:
            return .portrait
        }
    }
    guard Thread.isMainThread else {
        var currentOrientation: UIDeviceOrientation = .portrait
        DispatchQueue.main.sync {
            currentOrientation = deviceOrientation()
        }
        return currentOrientation
    }
    return deviceOrientation()
}

    private func detectFacesOnDevice(in image: VisionImage, width: CGFloat, height: CGFloat) {
    let options = VisionFaceDetectorOptions()
    options.performanceMode = .accurate
    options.landmarkMode = .all
    options.isTrackingEnabled = true
    options.contourMode = .all

    let faceDetector = vision.faceDetector(options: options)


    faceDetector.process(image, completion: { features, error in
        if let error = error {
            print(error.localizedDescription)
            return
        }

        guard error == nil, let features = features, !features.isEmpty else {
            //self.removeDetectionAnnotations()
            print("On-Device face detector returned no results.")
            return
        }
        //self.removeDetectionAnnotations()
        for face in features {

            let angleY = face.headEulerAngleY
            let angleZ = face.headEulerAngleZ
            let smilingProbability = face.smilingProbability
            let leftEyeOpenProbability = face.leftEyeOpenProbability
            let rightEyeOpenProbability = face.rightEyeOpenProbability

            self.infoLabel.text = "right eye open probablity-- \(rightEyeOpenProbability)"


        }
    })

}   

1 Ответ

0 голосов
/ 19 октября 2019

Необходимо установить options.classificationMode = .all, чтобы включить обнаружение leftEyeOpenProbability и rightEyeOpenProbability?

Поиск classificationMode в следующей документации:

https://firebase.google.com/docs/ml-kit/ios/detect-faces

...