能檢測出坐標,不能生成特征值。
1,人臉檢測 AVCaptureMetadataOutput
AVMetadataFaceObject
2,CIDetector
CIFaceFuture
難點:
坐標系轉化
1,將識別出來的臉部區域,轉化為uikit正確坐標
AVCaputureVideoPreviewLayer rectForMetadataOutputRect 方法可以轉換
AVMetadataFaceObject bounds 轉化為相對AVCaputureVideoPreviewLayer上的rect
2,
func detect() {
let image = CIImage(image: imageView.image!)
let options = [CIDetectorAccuracy:CIDetectorAccuracyHigh]
let detector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: options)
detector?.features(in: image!, options: nil).forEach({ (s) in
let t = s as! CIFaceFeature
let v = UIView(frame: converFrame(faceCoreImageFrame: t.bounds, imageCoreImageSize: image!.extent.size, imageViewUIKitFrame: imageView.frame))
self.imageView.addSubview(v)
v.layer.borderColor = UIColor.red.cgColor
v.layer.borderWidth = 1
v.layer.backgroundColor = UIColor.clear.cgColor
if t.hasSmile {
print("笑了")
}
if t.hasRightEyePosition {
print("右邊眼睛打開的")
}
if t.hasLeftEyePosition {
print("左邊眼睛打開的")
}
})
}
func converFrame(faceCoreImageFrame:CGRect, imageCoreImageSize:CGSize, imageViewUIKitFrame:CGRect) -> CGRect {
var transform = CGAffineTransform.identity
transform = transform.scaledBy(x: 1, y: -1)
transform = transform.translatedBy(x: 0, y: -imageCoreImageSize.height)
var faceUIKitFrame = faceCoreImageFrame.applying(transform)
let viewSize = imageViewUIKitFrame.size
let scale = min(viewSize.width / imageCoreImageSize.width,
viewSize.height / imageCoreImageSize.height)
let offsetX = (viewSize.width - imageCoreImageSize.width * scale) / 2
let offsetY = (viewSize.height - imageCoreImageSize.height * scale) / 2
faceUIKitFrame = faceUIKitFrame.applying(CGAffineTransform.identity.scaledBy(x: scale, y: scale))
faceUIKitFrame.origin.x += offsetX
faceUIKitFrame.origin.y += offsetY
return faceUIKitFrame
}```
