diff --git a/wagtail/wagtailimages/backends/base.py b/wagtail/wagtailimages/backends/base.py index 8aa67ebb37..baa156a81a 100644 --- a/wagtail/wagtailimages/backends/base.py +++ b/wagtail/wagtailimages/backends/base.py @@ -60,7 +60,8 @@ class BaseImageBackend(object): image_mode, image_data = self.image_data_as_rgb(image) # Use feature detection to find a focal point - focal_point = feature_detection.get_focal_point(image.size, image_mode, image_data) + feature_detector = feature_detection.FeatureDetector(image.size, image_mode, image_data) + focal_point = feature_detector.get_focal_point() if focal_point: return self.crop_to_point(image, size, focal_point) diff --git a/wagtail/wagtailimages/utils/feature_detection.py b/wagtail/wagtailimages/utils/feature_detection.py index daf34c24aa..6381fabf7a 100644 --- a/wagtail/wagtailimages/utils/feature_detection.py +++ b/wagtail/wagtailimages/utils/feature_detection.py @@ -13,69 +13,72 @@ except ImportError: opencv_available = False -from wagtail.wagtailimages.utils.focal_point import FocalPoint, combine_points +from wagtail.wagtailimages.utils.focal_point import FocalPoint, combine_focal_points -def get_cv_gray_image(image_size, image_mode, image_data): - image = cv.CreateImageHeader(image_size, cv.IPL_DEPTH_8U, 3) - cv.SetData(image, image_data) +class FeatureDetector(object): + def __init__(self, image_size, image_mode, image_data): + self.image_size = image_size + self.image_mode = image_mode + self.image_data = image_data - gray_image = cv.CreateImage(image_size, 8, 1) - convert_mode = getattr(cv, 'CV_%s2GRAY' % image_mode) - cv.CvtColor(image, gray_image, convert_mode) + def opencv_grey_image(self): + image = cv.CreateImageHeader(self.image_size, cv.IPL_DEPTH_8U, 3) + cv.SetData(image, self.image_data) - return gray_image + gray_image = cv.CreateImage(self.image_size, 8, 1) + convert_mode = getattr(cv, 'CV_%s2GRAY' % self.image_mode) + cv.CvtColor(image, gray_image, convert_mode) + return gray_image -def detect_features(image_size, image_mode, image_data): - if opencv_available: - image = get_cv_gray_image(image_size, image_mode, image_data) - rows = image_size[0] - cols = image_size[1] + def detect_features(self): + if opencv_available: + image = self.opencv_grey_image() + rows = self.image_size[0] + cols = self.image_size[1] - eig_image = cv.CreateMat(rows, cols, cv.CV_32FC1) - temp_image = cv.CreateMat(rows, cols, cv.CV_32FC1) - points = cv.GoodFeaturesToTrack(image, eig_image, temp_image, 20, 0.04, 1.0, useHarris=False) + eig_image = cv.CreateMat(rows, cols, cv.CV_32FC1) + temp_image = cv.CreateMat(rows, cols, cv.CV_32FC1) + points = cv.GoodFeaturesToTrack(image, eig_image, temp_image, 20, 0.04, 1.0, useHarris=False) - if points: - return [FocalPoint(x, y, 1) for x, y in points] + if points: + return [FocalPoint(x, y, 1) for x, y in points] - return [] + return [] + def detect_faces(self): + if opencv_available: + cascade_filename = os.path.join(os.path.dirname(__file__), 'face_detection', 'haarcascade_frontalface_alt2.xml') + cascade = cv.Load(cascade_filename) + image = self.opencv_grey_image() -def detect_faces(image_size, image_mode, image_data): - if opencv_available: - cascade_filename = os.path.join(os.path.dirname(__file__), 'face_detection', 'haarcascade_frontalface_alt2.xml') - cascade = cv.Load(cascade_filename) - image = get_cv_gray_image(image_size, image_mode, image_data) + cv.EqualizeHist(image, image) - cv.EqualizeHist(image, image) + min_size = (40, 40) + haar_scale = 1.1 + min_neighbors = 3 + haar_flags = 0 - min_size = (40, 40) - haar_scale = 1.1 - min_neighbors = 3 - haar_flags = 0 + faces = cv.HaarDetectObjects( + image, cascade, cv.CreateMemStorage(0), + haar_scale, min_neighbors, haar_flags, min_size + ) - faces = cv.HaarDetectObjects( - image, cascade, cv.CreateMemStorage(0), - haar_scale, min_neighbors, haar_flags, min_size - ) + if faces: + return [FocalPoint.from_square(face[0][0], face[0][1], face[0][2], face[0][3]) for face in faces] + + return [] + + def get_focal_point(self): + # Face detection + faces = self.detect_faces() if faces: - return [FocalPoint.from_square(face[0][0], face[0][1], face[0][2], face[0][3]) for face in faces] + return combine_focal_points(faces) - return [] + # Feature detection + features = self.detect_features() - -def get_focal_point(image_size, image_mode, image_data): - # Face detection - faces = feature_detection.detect_faces(image_size, image_mode, image_data) - - if faces: - return combine_points(faces) - - # Feature detection - features = feature_detection.detect_features(image_size, image_mode, image_data) - - if features: - return focal_point.combine_points(features) + if features: + return combine_focal_points(features)