Как распознать лица в видеосессии с iOS 5 - PullRequest
1 голос
/ 31 октября 2011

В моем приложении я использую видеосессию, где я могу фотографировать. Однако я бы хотел, чтобы в этой видеосессии было обнаружено лицо. Я посмотрел на пример Apple "SquareCam", который именно то, что я ищу. Однако внедрение их кода в мой проект сводит меня с ума.

#import "CaptureSessionManager.h"
#import <ImageIO/ImageIO.h>

@implementation CaptureSessionManager

@synthesize captureSession;
@synthesize previewLayer;
@synthesize stillImageOutput;
@synthesize stillImage;

#pragma mark Capture Session Configuration

- (id)init {
    if ((self = [super init])) {
        [self setCaptureSession:[[AVCaptureSession alloc] init]];
    }
    return self;
}
- (void)didReceiveMemoryWarning
{
    // Releases the view if it doesn't have a superview.
    NSLog(@"memorywarning");

    // Release any cached data, images, etc that aren't in use.
}
- (void)addVideoPreviewLayer {
    [self setPreviewLayer:[[[AVCaptureVideoPreviewLayer alloc] initWithSession:[self captureSession]] autorelease]];
    [[self previewLayer] setVideoGravity:AVLayerVideoGravityResizeAspectFill];

}

- (void)addVideoInput {
    AVCaptureDevice *videoDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];   
    if ([videoDevice isFocusModeSupported:AVCaptureFocusModeLocked]) {

        NSError *error = nil;

        if ([videoDevice lockForConfiguration:&error]) {
            NSLog(@"focus");
            videoDevice.focusMode = AVCaptureFocusModeLocked;
            videoDevice.focusMode = AVCaptureFocusModeContinuousAutoFocus;
            //videoDevice.focusMode = AVCaptureSessionPresetPhoto

            videoDevice.whiteBalanceMode = AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance;
            [captureSession setSessionPreset:AVCaptureSessionPresetPhoto];

            [videoDevice unlockForConfiguration];

        }

        else {

        }
    }

            // Respond to the failure as appropriate.
    if (videoDevice) {
        NSError *error;
        AVCaptureDeviceInput *videoIn = [AVCaptureDeviceInput deviceInputWithDevice:videoDevice error:&error];
        if (!error) {
            if ([[self captureSession] canAddInput:videoIn])
                [[self captureSession] addInput:videoIn];
            else
                NSLog(@"Couldn't add video input");     
        }
        else
            NSLog(@"Couldn't create video input");
    }
    else
        NSLog(@"Couldn't create video capture device");
}

- (void)addStillImageOutput 
{
  [self setStillImageOutput:[[[AVCaptureStillImageOutput alloc] init] autorelease]];
  NSDictionary *outputSettings = [[NSDictionary alloc] initWithObjectsAndKeys:AVVideoCodecJPEG,AVVideoCodecKey,nil];
  [[self stillImageOutput] setOutputSettings:outputSettings];

  AVCaptureConnection *videoConnection = nil;
  for (AVCaptureConnection *connection in [[self stillImageOutput] connections]) {
    for (AVCaptureInputPort *port in [connection inputPorts]) {
      if ([[port mediaType] isEqual:AVMediaTypeVideo] ) {
        videoConnection = connection;
        break;
      }
    }
    if (videoConnection) { 
      break; 
    }
  }

  [[self captureSession] addOutput:[self stillImageOutput]];
    [outputSettings release];
}

- (void)captureStillImage
{  
    AVCaptureConnection *videoConnection = nil;
    for (AVCaptureConnection *connection in [[self stillImageOutput] connections]) {
        for (AVCaptureInputPort *port in [connection inputPorts]) {
            if ([[port mediaType] isEqual:AVMediaTypeVideo]) {
                videoConnection = connection;
                break;
            }
        }
        if (videoConnection) { 
      break; 
    }
    }

    NSLog(@"about to request a capture from: %@", [self stillImageOutput]);
    [[self stillImageOutput] captureStillImageAsynchronouslyFromConnection:videoConnection 
                                                       completionHandler:^(CMSampleBufferRef imageSampleBuffer, NSError *error) { 
                                                         CFDictionaryRef exifAttachments = CMGetAttachment(imageSampleBuffer, kCGImagePropertyExifDictionary, NULL);
                                                         if (exifAttachments) {
                                                           NSLog(@"attachements: %@", exifAttachments);
                                                         } else { 
                                                           NSLog(@"no attachments");
                                                         }
                                                         NSData *imageData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageSampleBuffer];

                                                         UIImage *image = [[UIImage alloc] initWithData:imageData];

                                                         [self setStillImage:image];
                                                         [image release];
                                                         [[NSNotificationCenter defaultCenter] postNotificationName:kImageCapturedSuccessfully object:nil];
                                                       }];
}


- (void)dealloc {

    [[self captureSession] stopRunning];

    [previewLayer release], previewLayer = nil;
    [captureSession release], captureSession = nil;
    [stillImageOutput release], stillImageOutput = nil;
    [stillImage release], stillImage = nil;

    [super dealloc];
}

@end

Помимо видеосессии, мне удалось распознать лица в UIImage, который я импортировал в свой проект. Я сделал это на примере @Abhinav Jha ( Как правильно создать экземпляр объекта класса CIDetector в iOS 5 API распознавания лиц ).

CIImage *ciImage = [[CIImage alloc] initWithImage:[UIImage imageNamed:@"Photo.JPG"]];
if (ciImage == nil)

[imageView setImage:[UIImage imageNamed:@"Photo.JPG"]];

NSDictionary *options = [[NSDictionary alloc] initWithObjectsAndKeys:
                         @"CIDetectorAccuracy", @"CIDetectorAccuracyHigh",nil];
CIDetector *ciDetector = [CIDetector detectorOfType:CIDetectorTypeFace 
                                            context:nil
                                            options:options];
NSArray *features = [ciDetector featuresInImage:ciImage];
NSLog(@"no of face detected: %d", [features count]);

Надеюсь, кто-то может указать мне правильное направление, объединив два примера!

...