給AVCaptureSession添加outputMetadata的方式得到人臉
self.metaDataOutput = [[AVCaptureMetadataOutput alloc] init];
if ([self.session canAddOutput:self.metaDataOutput]) {
[self.session addOutput:self.metaDataOutput];
NSArray *metadataObjectTypes = @[AVMetadataObjectTypeFace]; // 3
self.metaDataOutput.metadataObjectTypes = metadataObjectTypes;
dispatch_queue_t mainQueue = dispatch_get_main_queue();
[self.metaDataOutput setMetadataObjectsDelegate:self // 4
queue:mainQueue];
}else{
return;
}
https://stackoverflow.com/questions/41354698/face-detection-with-camera
//代理方法中獲取到人臉數據,但是你打印會發現,該數值是處于0-1之間的數值,而且可能有點莫名其妙,它是wrt,以下代碼,蘋果API中有preViewLayer該類有專門的方法來進行坐標轉化,但是由于很多場景下需要自己轉換,例如使用opengl來處理這些坐標數據,以下代碼轉化成熟悉的屏幕坐標。
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputMetadataObjects:(NSArray *)metadataObjects fromConnection:(AVCaptureConnection *)connection{
for (AVMetadataFaceObject *faceframe in faces) { // 2
// CGRect bounds = face.bounds;
CGRect face = faceframe.bounds;
// wrt 圖形
CGRect facePreviewBounds = CGRectMake(face.origin.y * self.bounds.size.width,
face.origin.x * self.bounds.size.height,
face.size.width * self.bounds.size.height,
face.size.height * self.bounds.size.width);
NSLog(@"%@",NSStringFromCGRect(facePreviewBounds));
//以下為使用opengl設置Viewport參數大小,不需要可以注釋
CGFloat viewY = self.bounds.size.height - (face.origin.x * self.bounds.size.height)-face.size.width * self.bounds.size.height;
glViewport(face.origin.y * self.bounds.size.width, viewY, face.size.width * self.bounds.size.height, face.size.height * self.bounds.size.width);
}
}