Я работаю с AVFoundation
и ImageDetect
библиотекой, которая дает обрезанные лица из изображения.Я запускаю видео с AV Session и устанавливаю таймер на 0,5 с, чтобы получить видеокадр как UIImage.Изображение отлично отображается при просмотре изображения, но когда я передаю это изображение в библиотеку ImageDetect, происходит случай .notFound. Я использую ту же библиотеку с ARKit
рамкой сеанса, она работает нормально, но не работает в этом случае.
import UIKit
import AVFoundation
import ImageDetect
class ViewController: UIViewController,UIImagePickerControllerDelegate,UINavigationControllerDelegate {
@IBOutlet weak var cameraView: UIView!
@IBOutlet weak var imgvew: UIImageView!
private var scanTimer: Timer?
var captureSession = AVCaptureSession()
var sessionOutput = AVCaptureStillImageOutput()
var previewLayer = AVCaptureVideoPreviewLayer()
var isBackCamera = true
var clickedImage: Data?
var captureDevice : AVCaptureDevice?
var imagePicker : UIImagePickerController!
override func viewDidLoad() {
}
override func viewWillAppear(_ animated: Bool) {
if AVCaptureDevice.authorizationStatus(for: AVMediaType.video) == AVAuthorizationStatus.authorized {
self.camera()
} else {
AVCaptureDevice.requestAccess(for: AVMediaType.video, completionHandler: { (granted :Bool) -> Void in
if granted == true
{
DispatchQueue.main.async {
self.camera()
}
}
else
{
// self.showPermistionAlert()
}
})
}
// camera()
scanTimer = Timer.scheduledTimer(timeInterval: 1.0, target: self, selector: #selector(getImages), userInfo: nil, repeats: true)
}
func camera(){
self.tabBarController?.tabBar.isHidden = true
let devices = AVCaptureDevice.devices(for: AVMediaType.video)
for device in devices{
do{
let camera = getDevice(position: .front)
let input = try AVCaptureDeviceInput(device: camera!)
if captureSession.canAddInput(input){
captureSession.addInput(input)
sessionOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if captureSession.canAddOutput(sessionOutput){
captureSession.startRunning()
captureSession.addOutput(sessionOutput)
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
previewLayer.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraView.layer.addSublayer(previewLayer)
previewLayer.position = CGPoint(x: self.cameraView.frame.width/2, y: self.cameraView.frame.height/2)
cameraView.contentMode = .scaleToFill
previewLayer.bounds = cameraView.frame
}
}
}
catch{
print("error")
}
}
}
@objc func getImages()
{
if let videoConnection = sessionOutput.connection(with: AVMediaType.video) {
sessionOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: {buffer, error in
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(buffer!)
let captureimage = UIImage(data: imageData!)!
self.imgvew.image = captureimage
captureimage.detector.crop(type: .face) { result in
print(result)
switch result {
case .success(let faces):
print("Success")
case .notFound:
print("Notfound")
case .failure(let error):
print("Failure")
}
}
})
}
}
func getDevice(position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let devices: NSArray = AVCaptureDevice.devices() as NSArray;
for de in devices {
let deviceConverted = de as! AVCaptureDevice
if(deviceConverted.position == position){
return deviceConverted
}
}
return nil
}
}