一 靜態圖片的人臉識別
靜態圖片的人臉識別比較簡單,直接看代碼注釋即可
- (void)faceDetectWithImage:(UIImage *)image {
for (UIView *view in _imageView.subviews) {
[view removeFromSuperview];
}
// 圖像識別能力:可以在CIDetectorAccuracyHigh(較強的處理能力)與CIDetectorAccuracyLow(較弱的處理能力)中選擇,因為想讓準確度高一些在這里選擇CIDetectorAccuracyHigh
NSDictionary *opts = [NSDictionary dictionaryWithObject:
CIDetectorAccuracyHigh forKey:CIDetectorAccuracy];
// 將圖像轉換為CIImage
CIImage *faceImage = [CIImage imageWithCGImage:image.CGImage];
CIDetector *faceDetector=[CIDetector detectorOfType:CIDetectorTypeFace context:nil options:opts];
// 識別出人臉數組
NSArray *features = [faceDetector featuresInImage:faceImage];
// 得到圖片的尺寸
CGSize inputImageSize = [faceImage extent].size;
//將image沿y軸對稱
CGAffineTransform transform = CGAffineTransformScale(CGAffineTransformIdentity, 1, -1);
//將圖片上移
transform = CGAffineTransformTranslate(transform, 0, -inputImageSize.height);
// 取出所有人臉
for (CIFaceFeature *faceFeature in features){
//獲取人臉的frame
CGRect faceViewBounds = CGRectApplyAffineTransform(faceFeature.bounds, transform);
CGSize viewSize = _imageView.bounds.size;
CGFloat scale = MIN(viewSize.width / inputImageSize.width,
viewSize.height / inputImageSize.height);
CGFloat offsetX = (viewSize.width - inputImageSize.width * scale) / 2;
CGFloat offsetY = (viewSize.height - inputImageSize.height * scale) / 2;
// 縮放
CGAffineTransform scaleTransform = CGAffineTransformMakeScale(scale, scale);
// 修正
faceViewBounds = CGRectApplyAffineTransform(faceViewBounds,scaleTransform);
faceViewBounds.origin.x += offsetX;
faceViewBounds.origin.y += offsetY;
//描繪人臉區域
UIView* faceView = [[UIView alloc] initWithFrame:faceViewBounds];
faceView.layer.borderWidth = 2;
faceView.layer.borderColor = [[UIColor redColor] CGColor];
[_imageView addSubview:faceView];
// 判斷是否有左眼位置
if(faceFeature.hasLeftEyePosition){}
// 判斷是否有右眼位置
if(faceFeature.hasRightEyePosition){}
// 判斷是否有嘴位置
if(faceFeature.hasMouthPosition){}
}
NSLog(@"識別出了%ld張臉", features.count);
}
二 相機實時的人臉識別,分步驟如下:
(1)初始化相機
#pragma mark - 初始化相機
- (void)getCameraSession
{
//初始化會話
_captureSession=[[AVCaptureSession alloc]init];
if ([_captureSession canSetSessionPreset:AVCaptureSessionPreset1280x720]) {//設置分辨率
_captureSession.sessionPreset = AVCaptureSessionPreset1280x720;
}
//獲得輸入設備
AVCaptureDevice *captureDevice=[self getCameraDeviceWithPosition:AVCaptureDevicePositionFront];//取得前置攝像頭
if (!captureDevice) {
NSLog(@"取得前置攝像頭時出現問題.");
return;
}
NSError *error=nil;
//根據輸入設備初始化設備輸入對象,用于獲得輸入數據
_captureDeviceInput=[[AVCaptureDeviceInput alloc]initWithDevice:captureDevice error:&error];
if (error) {
NSLog(@"取得設備輸入對象時出錯,錯誤原因:%@",error.localizedDescription);
return;
}
[_captureSession addInput:_captureDeviceInput];
//初始化設備輸出對象,用于獲得輸出數據
_captureStillImageOutput=[[AVCaptureStillImageOutput alloc]init];
NSDictionary *outputSettings = @{AVVideoCodecKey:AVVideoCodecJPEG};
[_captureStillImageOutput setOutputSettings:outputSettings];//輸出設置
//將設備輸入添加到會話中
if ([_captureSession canAddInput:_captureDeviceInput]) {
[_captureSession addInput:_captureDeviceInput];
}
//將設備輸出添加到會話中
if ([_captureSession canAddOutput:_captureStillImageOutput]) {
[_captureSession addOutput:_captureStillImageOutput];
}
//創建視頻預覽層,用于實時展示攝像頭狀態
_captureVideoPreviewLayer=[[AVCaptureVideoPreviewLayer alloc]initWithSession:self.captureSession];
CALayer *layer=self.videoMainView.layer;
layer.masksToBounds=YES;
_captureVideoPreviewLayer.frame=layer.bounds;
_captureVideoPreviewLayer.videoGravity=AVLayerVideoGravityResizeAspectFill;//填充模式
//將視頻預覽層添加到界面中
[layer addSublayer:_captureVideoPreviewLayer];
[layer insertSublayer:_captureVideoPreviewLayer below:self.focusCursor.layer];// 沒啥用的東西
// 初始化數據流
[self addVidelDataOutput];
}
(2)需要動態進行人臉識別,所以需要啟用數據流,在這里需要設置并遵守代理
/**
* AVCaptureVideoDataOutput 獲取數據流
*/
- (void)addVidelDataOutput
{
AVCaptureVideoDataOutput *captureOutput = [[AVCaptureVideoDataOutput alloc] init];
captureOutput.alwaysDiscardsLateVideoFrames = YES;
dispatch_queue_t queue;
queue = dispatch_queue_create("myQueue", DISPATCH_QUEUE_SERIAL);
[captureOutput setSampleBufferDelegate:self queue:queue];
NSString *key = (NSString *)kCVPixelBufferPixelFormatTypeKey;
NSNumber *value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA];
NSDictionary *settings = @{key:value};
[captureOutput setVideoSettings:settings];
[self.captureSession addOutput:captureOutput];
}
(3)實現數據流代理
#pragma mark - Samle Buffer Delegate
// 抽樣緩存寫入時所調用的委托程序
- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection
{
UIImage *img = [self imageFromSampleBuffer:sampleBuffer];
UIImage *image = [[InfoTools shareInfoTools] fixOrientation:img];
// 人臉檢測
NSArray *features = [[InfoTools shareInfoTools]leftEyePositionsWithImage:image];
dispatch_async(dispatch_get_main_queue(), ^{
if (self.videoMainView.subviews.count -1 <features.count) {
FaceLabel *label =[[FaceLabel alloc]init];
label.hidden = YES;
[self.videoMainView addSubview:label];
}
for (UIView *label in self.videoMainView.subviews) {
if ([label isMemberOfClass:[FaceLabel class]]) {
label.hidden = YES;
}
}
if (features.count >0) {
for (int i=0;i<features.count; i++) {
NSValue *layerRect = features[i];
FaceLabel *label = self.videoMainView.subviews[i+1];
CGRect originalRect = [layerRect CGRectValue];
CGRect getRect = [self getUIImageViewRectFromCIImageRect:originalRect];
label.frame = getRect;
label.hidden = NO;
}
}
else{
for (UIView *label in self.videoMainView.subviews) {
if ([label isMemberOfClass:[FaceLabel class]]) {
label.hidden = YES;
}
}
}
});
}
(4)通過抽樣緩存數據創建一個UIImage對象
//在該代理方法中,sampleBuffer是一個Core Media對象,可以引入Core Video供使用
// 通過抽樣緩存數據創建一個UIImage對象
- (UIImage *)imageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer
{
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CIImage *ciImage = [CIImage imageWithCVPixelBuffer:imageBuffer];
CIContext *temporaryContext = [CIContext contextWithOptions:nil];
CGImageRef videoImage = [temporaryContext createCGImage:ciImage fromRect:CGRectMake(0, 0, CVPixelBufferGetWidth(imageBuffer), CVPixelBufferGetHeight(imageBuffer))];
UIImage *result = [[UIImage alloc] initWithCGImage:videoImage scale:1.0 orientation:UIImageOrientationLeftMirrored];
CGImageRelease(videoImage);
return result;
}