I am developing an iphone application where I directly use AVFoundation to capture videos via the camera.
I've implemented a feature to enable the tap to focus
function for a user.
- (void) focus:(CGPoint) aPoint;
{
#if HAS_AVFF
Class captureDeviceClass = NSClassFromString(@"AVCaptureDevice");
if (captureDeviceClass != nil) {
AVCaptureDevice *device = [captureDeviceClass defaultDeviceWithMediaType:AVMediaTypeVideo];
if([device isFocusPointOfInterestSupported] &&
[device isFocusModeSupported:AVCaptureFocusModeAutoFocus]) {
CGRect screenRect = [[UIScreen mainScreen] bounds];
double screenWidth = screenRect.size.width;
double screenHeight = screenRect.size.height;
double focus_x = aPoint.x/screenWidth;
double focus_y = aPoint.y/screenHeight;
if([device lockForConfiguration:nil]) {
[device setFocusPointOfInterest:CGPointMake(focus_x,focus_y)];
[device setFocusMode:AVCaptureFocusModeAutoFocus];
if ([device isExposureModeSupported:AVCaptureExposureModeAutoExpose]){
[device setExposureMode:AVCaptureExposureModeAutoExpose];
}
[device unlockForConfiguration];
}
}
}
#endif
}
So far so good, but I am missing the feedback rectangle like in the photos app. Is there any way to tell the AVFoundation Framework to show this feedback rectangle or do I have to implement this feature myself?
Here's what I did: This is the class that creates the square that is shown when the user taps on the camera overlay.
CameraFocusSquare.h
#import <UIKit/UIKit.h>
@interface CameraFocusSquare : UIView
@end
CameraFocusSquare.m
#import "CameraFocusSquare.h"
#import <QuartzCore/QuartzCore.h>
const float squareLength = 80.0f;
@implementation FBKCameraFocusSquare
- (id)initWithFrame:(CGRect)frame
{
self = [super initWithFrame:frame];
if (self) {
// Initialization code
[self setBackgroundColor:[UIColor clearColor]];
[self.layer setBorderWidth:2.0];
[self.layer setCornerRadius:4.0];
[self.layer setBorderColor:[UIColor whiteColor].CGColor];
CABasicAnimation* selectionAnimation = [CABasicAnimation
animationWithKeyPath:@"borderColor"];
selectionAnimation.toValue = (id)[UIColor blueColor].CGColor;
selectionAnimation.repeatCount = 8;
[self.layer addAnimation:selectionAnimation
forKey:@"selectionAnimation"];
}
return self;
}
@end
And in the view where you receive your taps, do the following:
- (void) touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event
{
UITouch *touch = [[event allTouches] anyObject];
CGPoint touchPoint = [touch locationInView:touch.view];
[self focus:touchPoint];
if (camFocus)
{
[camFocus removeFromSuperview];
}
if ([[touch view] isKindOfClass:[FBKVideoRecorderView class]])
{
camFocus = [[CameraFocusSquare alloc]initWithFrame:CGRectMake(touchPoint.x-40, touchPoint.y-40, 80, 80)];
[camFocus setBackgroundColor:[UIColor clearColor]];
[self addSubview:camFocus];
[camFocus setNeedsDisplay];
[UIView beginAnimations:nil context:NULL];
[UIView setAnimationDuration:1.5];
[camFocus setAlpha:0.0];
[UIView commitAnimations];
}
}
- (void) focus:(CGPoint) aPoint;
{
Class captureDeviceClass = NSClassFromString(@"AVCaptureDevice");
if (captureDeviceClass != nil) {
AVCaptureDevice *device = [captureDeviceClass defaultDeviceWithMediaType:AVMediaTypeVideo];
if([device isFocusPointOfInterestSupported] &&
[device isFocusModeSupported:AVCaptureFocusModeAutoFocus]) {
CGRect screenRect = [[UIScreen mainScreen] bounds];
double screenWidth = screenRect.size.width;
double screenHeight = screenRect.size.height;
double focus_x = aPoint.x/screenWidth;
double focus_y = aPoint.y/screenHeight;
if([device lockForConfiguration:nil]) {
[device setFocusPointOfInterest:CGPointMake(focus_x,focus_y)];
[device setFocusMode:AVCaptureFocusModeAutoFocus];
if ([device isExposureModeSupported:AVCaptureExposureModeAutoExpose]){
[device setExposureMode:AVCaptureExposureModeAutoExpose];
}
[device unlockForConfiguration];
}
}
}
}
Adding to Anil's brilliant answer: Instead of doing the calculations yourself, you should have a look at AVCaptureVideoPreviewLayer's captureDevicePointOfInterestForPoint:. It will give you a much more consistent focus point (available from iOS 6 and forward).
- (void) focus:(CGPoint) aPoint;
{
Class captureDeviceClass = NSClassFromString(@"AVCaptureDevice");
if (captureDeviceClass != nil) {
AVCaptureDevice *device = [captureDeviceClass defaultDeviceWithMediaType:AVMediaTypeVideo];
if([device isFocusPointOfInterestSupported] &&
[device isFocusModeSupported:AVCaptureFocusModeAutoFocus]) {
CGPoint focusPoint = [self.captureVideoPreviewLayer captureDevicePointOfInterestForPoint:aPoint];
if([device lockForConfiguration:nil]) {
[device setFocusPointOfInterest:CGPointMake(focusPoint.x,focusPoint.y)];
[device setFocusMode:AVCaptureFocusModeAutoFocus];
if ([device isExposureModeSupported:AVCaptureExposureModeAutoExpose]){
[device setExposureMode:AVCaptureExposureModeAutoExpose];
}
[device unlockForConfiguration];
}
}
}
}
The documentation is available here: https://developer.apple.com/library/ios/documentation/AVFoundation/Reference/AVCaptureVideoPreviewLayer_Class/index.html#//apple_ref/occ/instm/AVCaptureVideoPreviewLayer/captureDevicePointOfInterestForPoint:
Swift implementation:
CameraFocusSquare view:
class CameraFocusSquare: UIView,CAAnimationDelegate {
internal let kSelectionAnimation:String = "selectionAnimation"
fileprivate var _selectionBlink: CABasicAnimation?
convenience init(touchPoint: CGPoint) {
self.init()
self.updatePoint(touchPoint)
self.backgroundColor = UIColor.clear
self.layer.borderWidth = 2.0
self.layer.borderColor = UIColor.orange.cgColor
initBlink()
}
override init(frame: CGRect) {
super.init(frame: frame)
}
fileprivate func initBlink() {
// create the blink animation
self._selectionBlink = CABasicAnimation(keyPath: "borderColor")
self._selectionBlink!.toValue = (UIColor.white.cgColor as AnyObject)
self._selectionBlink!.repeatCount = 3
// number of blinks
self._selectionBlink!.duration = 0.4
// this is duration per blink
self._selectionBlink!.delegate = self
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
/**
Updates the location of the view based on the incoming touchPoint.
*/
func updatePoint(_ touchPoint: CGPoint) {
let squareWidth: CGFloat = 100
let frame: CGRect = CGRect(x: touchPoint.x - squareWidth / 2, y: touchPoint.y - squareWidth / 2, width: squareWidth, height: squareWidth)
self.frame = frame
}
/**
This unhides the view and initiates the animation by adding it to the layer.
*/
func animateFocusingAction() {
if let blink = _selectionBlink {
// make the view visible
self.alpha = 1.0
self.isHidden = false
// initiate the animation
self.layer.add(blink, forKey: kSelectionAnimation)
}
}
/**
Hides the view after the animation stops. Since the animation is automatically removed, we don't need to do anything else here.
*/
public func animationDidStop(_ anim: CAAnimation, finished flag: Bool){
if flag{
// hide the view
self.alpha = 0.0
self.isHidden = true
}
}
}
Gesture action:
open func tapToFocus(_ gesture : UILongPressGestureRecognizer) {
if (gesture.state == UIGestureRecognizerState.began) {
let touchPoint:CGPoint = gesture.location(in: self.previewView)
if let fsquare = self.focusSquare {
fsquare.updatePoint(touchPoint)
}else{
self.focusSquare = CameraFocusSquare(touchPoint: touchPoint)
self.previewView.addSubview(self.focusSquare!)
self.focusSquare?.setNeedsDisplay()
}
self.focusSquare?.animateFocusingAction()
let convertedPoint:CGPoint = self.previewLayer!.captureDevicePointOfInterest(for: touchPoint)
let currentDevice:AVCaptureDevice = self.videoDeviceInput!.device
if currentDevice.isFocusPointOfInterestSupported && currentDevice.isFocusModeSupported(AVCaptureFocusMode.autoFocus){
do {
try currentDevice.lockForConfiguration()
currentDevice.focusPointOfInterest = convertedPoint
currentDevice.focusMode = AVCaptureFocusMode.autoFocus
if currentDevice.isExposureModeSupported(AVCaptureExposureMode.continuousAutoExposure){
currentDevice.exposureMode = AVCaptureExposureMode.continuousAutoExposure
}
currentDevice.isSubjectAreaChangeMonitoringEnabled = true
currentDevice.unlockForConfiguration()
} catch {
}
}
}
}
@Anil's answer is a great start, but it didn't work for me. I wanted to be able to have the user continue to be able to select a focus point, instead of only once (which is what his solution does). Thanks to @Anil for pointing me in the right direction.
There are some differences with my solution.
initWithFrame:
, I implemented my own initWithTouchPoint:
.CameraFocusSquare
, which means that it's easier to find and update the size as needed.CameraFocusSquare.h
@import UIKit;
@interface CameraFocusSquare : UIView
- (instancetype)initWithTouchPoint:(CGPoint)touchPoint;
- (void)updatePoint:(CGPoint)touchPoint;
- (void)animateFocusingAction;
@end
CameraFocusSquare.m
#import "CameraFocusSquare.h"
@implementation CameraFocusSquare {
CABasicAnimation *_selectionBlink;
}
/**
This is the init method for the square. It sets the frame for the view and sets border parameters. It also creates the blink animation.
*/
- (instancetype)initWithTouchPoint:(CGPoint)touchPoint {
self = [self init];
if (self) {
[self updatePoint:touchPoint];
self.backgroundColor = [UIColor clearColor];
self.layer.borderWidth = 2.0f;
self.layer.borderColor = [UIColor orangeColor].CGColor;
// create the blink animation
_selectionBlink = [CABasicAnimation
animationWithKeyPath:@"borderColor"];
_selectionBlink.toValue = (id)[UIColor whiteColor].CGColor;
_selectionBlink.repeatCount = 3; // number of blinks
_selectionBlink.duration = 0.4; // this is duration per blink
_selectionBlink.delegate = self;
}
return self;
}
/**
Updates the location of the view based on the incoming touchPoint.
*/
- (void)updatePoint:(CGPoint)touchPoint {
CGFloat squareWidth = 50;
CGRect frame = CGRectMake(touchPoint.x - squareWidth/2, touchPoint.y - squareWidth/2, squareWidth, squareWidth);
self.frame = frame;
}
/**
This unhides the view and initiates the animation by adding it to the layer.
*/
- (void)animateFocusingAction {
// make the view visible
self.alpha = 1.0f;
self.hidden = NO;
// initiate the animation
[self.layer addAnimation:_selectionBlink forKey:@"selectionAnimation"];
}
/**
Hides the view after the animation stops. Since the animation is automatically removed, we don't need to do anything else here.
*/
- (void)animationDidStop:(CAAnimation *)animation finished:(BOOL)flag {
// hide the view
self.alpha = 0.0f;
self.hidden = YES;
}
@end
I initiate all of this on top of a view. This allows me greater flexibility and separates my UI code from my controller code (think MVC).
PreviewView.h
@import UIKit;
@interface PreviewView : UIView
- (IBAction)tapToFocus:(UITapGestureRecognizer *)gestureRecognizer;
@end
PreviewView.m
#import "PreviewView.h"
#import "CameraFocusSquare.h"
@implementation PreviewView {
CameraFocusSquare *_focusSquare;
}
- (IBAction)tapToFocus:(UITapGestureRecognizer *)gestureRecognizer {
CGPoint touchPoint = [gestureRecognizer locationOfTouch:0 inView:self];
if (!_focusSquare) {
_focusSquare = [[CameraFocusSquare alloc] initWithTouchPoint:touchPoint];
[self addSubview:_focusSquare];
[_focusSquare setNeedsDisplay];
}
else {
[_focusSquare updatePoint:touchPoint];
}
[_focusSquare animateFocusingAction];
}
@end
Finally, in my UIViewController
subclass, I have my UITapGestureRecognizer
created and attached to the view. I also implement my tap-to-focus code here.
CameraViewController.m
- (void)viewDidLoad {
// do other initialization stuff here
// create the tap-to-focus gesture
UITapGestureRecognizer *tapToFocusRecognizer = [[UITapGestureRecognizer alloc] initWithTarget:self action:@selector(tapToFocus:)];
tapToFocusRecognizer.numberOfTapsRequired = 1;
tapToFocusRecognizer.numberOfTouchesRequired = 1;
[self.previewView addGestureRecognizer:tapToFocusRecognizer];
}
- (IBAction)tapToFocus:(UITapGestureRecognizer *)tapGestureRecognizer {
if (!_captureDevice) {
return;
}
if (![_captureDevice isFocusPointOfInterestSupported]) {
return;
}
if (![_captureDevice isFocusModeSupported:AVCaptureFocusModeAutoFocus]) {
return;
}
[self.previewView tapToFocus:tapGestureRecognizer];
NSError *error;
[_captureDevice lockForConfiguration:&error];
if (error) {
NSLog(@"Error trying to lock configuration of camera. %@", [error localizedDescription]);
return;
}
CGPoint touchPoint = [tapGestureRecognizer locationOfTouch:0 inView:self.cameraView];
// range of touch point is from (0,0) to (1,1)
CGFloat touchX = touchPoint.x / self.previewView.frame.size.width;
CGFloat touchY = touchPoint.y / self.previewView.frame.size.height;
_captureDevice.focusMode = AVCaptureFocusModeAutoFocus;
if ([_captureDevice isExposureModeSupported:AVCaptureExposureModeAutoExpose]) {
_captureDevice.exposureMode = AVCaptureExposureModeAutoExpose;
}
_captureDevice.focusPointOfInterest = CGPointMake(touchX, touchY);
if ([_captureDevice isExposurePointOfInterestSupported]) {
_captureDevice.exposurePointOfInterest = CGPointMake(touchX, touchY);
}
[_captureDevice unlockForConfiguration];
}
Hope this helps people so they can move onto more important code!
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With