Merge branch 'image-smartcropping' of https://github.com/kaedroho/wagtail into kaedroho-image-smartcropping

pull/477/merge
Matt Westcott 2014-07-29 15:51:03 +01:00
commit f70c42e6c3
17 zmienionych plików z 24327 dodań i 55 usunięć

Wyświetl plik

@ -0,0 +1,86 @@
=================
Feature Detection
=================
Wagtail has the ability to automatically detect faces and features inside your images and crop the images to those features.
Feature detection uses OpenCV to detect faces/features in an image when the image is uploaded. The detected features stored internally as a focal point in the ``focal_point_{x, y, width, height}`` fields on the ``Image`` model. These fields are used by the ``fill`` image filter when an image is rendered in a template to crop the image.
Setup
=====
Feature detection requires OpenCV which can be a bit tricky to install as it's not currently pip-installable.
Installing OpenCV on Debian/Ubuntu
----------------------------------
Debian and ubuntu provide an apt-get package called ``python-opencv``:
.. code-block:: bash
sudo apt-get install python-opencv python-numpy
This will install PyOpenCV into your site packages. If you are using a virtual environment, you need to make sure site packages are enabled or Wagtail will not be able to import PyOpenCV.
Enabling site packages in the virtual environment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you are not using a virtual envionment, you can skip this step.
Enabling site packages is different depending on whether you are using pyvenv (Python 3.3+ only) or virtualenv to manage your virtual environment.
pyvenv
``````
Go into your pyvenv directory and open the ``pyvenv.cfg`` file then set ``include-system-site-packages`` to ``true``.
virtualenv
``````````
Go into your virtualenv directory and delete a file called ``lib/python-x.x/no-global-site-packages.txt``.
Testing the OpenCV installation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can test that OpenCV can be seen by Wagtail by opening up a python shell (with your virtual environment active) and typing:
.. code-block:: python
import cv
If you don't see an ``ImportError``, it worked.
Switching on feature detection in Wagtail
-----------------------------------------
Once OpenCV is installed, you need to set the ``WAGTAILIMAGES_FEATURE_DETECTION_ENABLED`` setting to ``True``:
.. code-block:: python
# settings.py
WAGTAILIMAGES_FEATURE_DETECTION_ENABLED = True
Manually running feature detection
----------------------------------
Feature detection runs when new images are uploaded in to Wagtail. If you already have images in your Wagtail site and would like to run feature detection on them, you will have to run it manually.
You can manually run feature detection on all images by running the following code in the python shell:
.. code-block:: python
from wagtail.wagtailimages.models import Image
for image in Image.objects.all():
if image.focal_point is None:
image.focal_point = image.get_suggested_focal_point()
image.save()

Wyświetl plik

@ -0,0 +1,8 @@
Images
======
.. toctree::
:maxdepth: 2
feature_detection

Wyświetl plik

@ -5,6 +5,7 @@ Core components
:maxdepth: 2
pages/index
images/index
snippets
search/index
form_builder

Wyświetl plik

@ -181,6 +181,7 @@ The available resizing methods are:
Leaves the image at its original size - no resizing is performed.
.. Note::
Wagtail does not allow deforming or stretching images. Image dimension ratios will always be kept. Wagtail also *does not support upscaling*. Small images forced to appear at larger sizes will "max out" at their their native dimensions.

Wyświetl plik

@ -1,5 +1,7 @@
from django.conf import settings
from wagtail.wagtailimages.utils import crop
class BaseImageBackend(object):
def __init__(self, params):
@ -27,10 +29,34 @@ class BaseImageBackend(object):
"""
raise NotImplementedError('subclasses of BaseImageBackend must provide an resize() method')
def crop_to_centre(self, image, size):
raise NotImplementedError('subclasses of BaseImageBackend must provide a crop_to_centre() method')
def image_data_as_rgb(self, image):
raise NotImplementedError('subclasses of BaseImageBackend must provide an image_data_as_rgb() method')
def resize_to_max(self, image, size):
def crop(self, image, crop_box):
raise NotImplementedError('subclasses of BaseImageBackend must provide a crop() method')
def crop_to_centre(self, image, size):
crop_box = crop.crop_to_centre(image.size, size)
if crop_box.size != image.size:
return self.crop(image, crop_box)
else:
return image
def crop_to_point(self, image, size, focal_point):
crop_box = crop.crop_to_point(image.size, size, focal_point)
# Don't crop if we don't need to
if crop_box.size != image.size:
image = self.crop(image, crop_box)
# If the focal points are too large, the cropping system may not
# crop it fully, resize the image if this has happened:
if crop_box.size != size:
image = self.resize_to_fill(image, size)
return image
def resize_to_max(self, image, size, focal_point=None):
"""
Resize image down to fit within the given dimensions, preserving aspect ratio.
Will leave image unchanged if it's already within those dimensions.
@ -54,7 +80,7 @@ class BaseImageBackend(object):
return self.resize(image, final_size)
def resize_to_min(self, image, size):
def resize_to_min(self, image, size, focal_point=None):
"""
Resize image down to cover the given dimensions, preserving aspect ratio.
Will leave image unchanged if width or height is already within those limits.
@ -78,7 +104,7 @@ class BaseImageBackend(object):
return self.resize(image, final_size)
def resize_to_width(self, image, target_width):
def resize_to_width(self, image, target_width, focal_point=None):
"""
Resize image down to the given width, preserving aspect ratio.
Will leave image unchanged if it's already within that width.
@ -94,7 +120,7 @@ class BaseImageBackend(object):
return self.resize(image, final_size)
def resize_to_height(self, image, target_height):
def resize_to_height(self, image, target_height, focal_point=None):
"""
Resize image down to the given height, preserving aspect ratio.
Will leave image unchanged if it's already within that height.
@ -110,16 +136,18 @@ class BaseImageBackend(object):
return self.resize(image, final_size)
def resize_to_fill(self, image, size):
def resize_to_fill(self, image, size, focal_point=None):
"""
Resize down and crop image to fill the given dimensions. Most suitable for thumbnails.
(The final image will match the requested size, unless one or the other dimension is
already smaller than the target size)
"""
resized_image = self.resize_to_min(image, size)
return self.crop_to_centre(resized_image, size)
if focal_point is not None:
return self.crop_to_point(image, size, focal_point)
else:
resized_image = self.resize_to_min(image, size)
return self.crop_to_centre(resized_image, size)
def no_operation(self, image, param):
def no_operation(self, image, param, focal_point=None):
"""Return the image unchanged"""
return image

Wyświetl plik

@ -1,8 +1,9 @@
from __future__ import absolute_import
from .base import BaseImageBackend
import PIL.Image
from wagtail.wagtailimages.backends.base import BaseImageBackend
class PillowBackend(BaseImageBackend):
def __init__(self, params):
@ -20,19 +21,15 @@ class PillowBackend(BaseImageBackend):
image = image.convert('RGB')
return image.resize(size, PIL.Image.ANTIALIAS)
def crop_to_centre(self, image, size):
(original_width, original_height) = image.size
(target_width, target_height) = size
def crop(self, image, crop_box):
return image.crop(crop_box)
# final dimensions should not exceed original dimensions
final_width = min(original_width, target_width)
final_height = min(original_height, target_height)
def image_data_as_rgb(self, image):
# https://github.com/thumbor/thumbor/blob/f52360dc96eedd9fc914fcf19eaf2358f7e2480c/thumbor/engines/pil.py#L206-L215
if image.mode not in ['RGB', 'RGBA']:
if 'A' in image.mode:
image = image.convert('RGBA')
else:
image = image.convert('RGB')
if final_width == original_width and final_height == original_height:
return image
left = (original_width - final_width) / 2
top = (original_height - final_height) / 2
return image.crop(
(left, top, left + final_width, top + final_height)
)
return image.mode, image.tostring()

Wyświetl plik

@ -1,9 +1,10 @@
from __future__ import absolute_import
from .base import BaseImageBackend
from wand.image import Image
from wand.api import library
from wagtail.wagtailimages.backends.base import BaseImageBackend
class WandBackend(BaseImageBackend):
def __init__(self, params):
@ -24,22 +25,17 @@ class WandBackend(BaseImageBackend):
new_image.resize(size[0], size[1])
return new_image
def crop_to_centre(self, image, size):
(original_width, original_height) = image.size
(target_width, target_height) = size
# final dimensions should not exceed original dimensions
final_width = min(original_width, target_width)
final_height = min(original_height, target_height)
if final_width == original_width and final_height == original_height:
return image
left = (original_width - final_width) / 2
top = (original_height - final_height) / 2
def crop(self, image, crop_box):
new_image = image.clone()
new_image.crop(
left=left, top=top, right=left + final_width, bottom=top + final_height
left=crop_box[0], top=crop_box[1], right=crop_box[2], bottom=crop_box[3]
)
return new_image
def image_data_as_rgb(self, image):
# Only return image data if this image is not animated
if image.animation:
return
return 'RGB', image.make_blob('RGB')

Wyświetl plik

@ -8,7 +8,7 @@ from taggit.managers import TaggableManager
from django.core.files import File
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.db import models
from django.db.models.signals import pre_delete
from django.db.models.signals import pre_delete, pre_save
from django.dispatch.dispatcher import receiver
from django.utils.safestring import mark_safe
from django.utils.html import escape, format_html_join
@ -21,7 +21,9 @@ from unidecode import unidecode
from wagtail.wagtailadmin.taggable import TagSearchable
from wagtail.wagtailimages.backends import get_image_backend
from wagtail.wagtailsearch import indexed
from .utils import validate_image_format
from wagtail.wagtailimages.utils.validators import validate_image_format
from wagtail.wagtailimages.utils.focal_point import FocalPoint
from wagtail.wagtailimages.utils.feature_detection import FeatureDetector, opencv_available
@python_2_unicode_compatible
@ -49,6 +51,11 @@ class AbstractImage(models.Model, TagSearchable):
tags = TaggableManager(help_text=None, blank=True, verbose_name=_('Tags'))
focal_point_x = models.PositiveIntegerField(null=True, editable=False)
focal_point_y = models.PositiveIntegerField(null=True, editable=False)
focal_point_width = models.PositiveIntegerField(null=True, editable=False)
focal_point_height = models.PositiveIntegerField(null=True, editable=False)
search_fields = TagSearchable.search_fields + (
indexed.FilterField('uploaded_by_user'),
)
@ -56,6 +63,60 @@ class AbstractImage(models.Model, TagSearchable):
def __str__(self):
return self.title
@property
def focal_point(self):
if self.focal_point_x is not None and \
self.focal_point_y is not None and \
self.focal_point_width is not None and \
self.focal_point_height is not None:
return FocalPoint(
self.focal_point_x,
self.focal_point_y,
width=self.focal_point_width,
height=self.focal_point_height,
)
@focal_point.setter
def focal_point(self, focal_point):
if focal_point is not None:
self.focal_point_x = focal_point.x
self.focal_point_y = focal_point.y
self.focal_point_width = focal_point.width
self.focal_point_height = focal_point.height
else:
self.focal_point_x = None
self.focal_point_y = None
self.focal_point_width = None
self.focal_point_height = None
def get_suggested_focal_point(self, backend_name='default'):
backend = get_image_backend(backend_name)
image_file = self.file.file
# Make sure image is open and seeked to the beginning
image_file.open('rb')
image_file.seek(0)
# Load the image
image = backend.open_image(self.file.file)
image_data = backend.image_data_as_rgb(image)
# Make sure we have image data
# If the image is animated, image_data_as_rgb will return None
if image_data is None:
return
# Use feature detection to find a focal point
feature_detector = FeatureDetector(image.size, image_data[0], image_data[1])
focal_point = feature_detector.get_focal_point()
# Add 20% extra room around the edge of the focal point
if focal_point:
focal_point.width *= 1.20
focal_point.height *= 1.20
return focal_point
def get_rendition(self, filter):
if not hasattr(filter, 'process_image'):
# assume we've been passed a filter spec string, rather than a Filter object
@ -63,17 +124,36 @@ class AbstractImage(models.Model, TagSearchable):
filter, created = Filter.objects.get_or_create(spec=filter)
try:
rendition = self.renditions.get(filter=filter)
if self.focal_point:
rendition = self.renditions.get(
filter=filter,
focal_point_key=self.focal_point.get_key(),
)
else:
rendition = self.renditions.get(
filter=filter,
focal_point_key=None,
)
except ObjectDoesNotExist:
file_field = self.file
# If we have a backend attribute then pass it to process
# image - else pass 'default'
backend_name = getattr(self, 'backend', 'default')
generated_image_file = filter.process_image(file_field.file, backend_name=backend_name)
generated_image_file = filter.process_image(file_field.file, focal_point=self.focal_point, backend_name=backend_name)
rendition, created = self.renditions.get_or_create(
filter=filter, defaults={'file': generated_image_file})
if self.focal_point:
rendition, created = self.renditions.get_or_create(
filter=filter,
focal_point_key=self.focal_point.get_key(),
defaults={'file': generated_image_file}
)
else:
rendition, created = self.renditions.get_or_create(
filter=filter,
focal_point_key=None,
defaults={'file': generated_image_file}
)
return rendition
@ -112,6 +192,19 @@ class Image(AbstractImage):
pass
# Do smartcropping calculations when user saves an image without a focal point
@receiver(pre_save, sender=Image)
def image_feature_detection(sender, instance, **kwargs):
if getattr(settings, 'WAGTAILIMAGES_FEATURE_DETECTION_ENABLED', False):
if not opencv_available:
raise ImproperlyConfigured("pyOpenCV could not be found.")
# Make sure the image doesn't already have a focal point
if instance.focal_point is None:
# Set the focal point
instance.focal_point = instance.get_suggested_focal_point()
# Receive the pre_delete signal and delete the file associated with the model instance.
@receiver(pre_delete, sender=Image)
def image_delete(sender, instance, **kwargs):
@ -187,7 +280,7 @@ class Filter(models.Model):
# Spec is not one of our recognised patterns
raise ValueError("Invalid image filter spec: %r" % self.spec)
def process_image(self, input_file, backend_name='default'):
def process_image(self, input_file, focal_point=None, backend_name='default'):
"""
Given an input image file as a django.core.files.File object,
generate an output image with this filter applied, returning it
@ -205,7 +298,7 @@ class Filter(models.Model):
method = getattr(backend, self.method_name)
image = method(image, self.method_arg)
image = method(image, self.method_arg, focal_point=focal_point)
output = BytesIO()
backend.save_image(image, output, file_format)
@ -213,11 +306,16 @@ class Filter(models.Model):
# and then close the input file
input_file.close()
# generate new filename derived from old one, inserting the filter spec string before the extension
# generate new filename derived from old one, inserting the filter spec and focal point string before the extension
if focal_point is not None:
focal_point_key = "focus-" + focal_point.get_key()
else:
focal_point_key = "focus-none"
input_filename_parts = os.path.basename(input_file.name).split('.')
filename_without_extension = '.'.join(input_filename_parts[:-1])
filename_without_extension = filename_without_extension[:60] # trim filename base so that we're well under 100 chars
output_filename_parts = [filename_without_extension, self.spec] + input_filename_parts[-1:]
output_filename_parts = [filename_without_extension, focal_point_key, self.spec] + input_filename_parts[-1:]
output_filename = '.'.join(output_filename_parts)
output_file = File(output, name=output_filename)
@ -230,6 +328,7 @@ class AbstractRendition(models.Model):
file = models.ImageField(upload_to='images', width_field='width', height_field='height')
width = models.IntegerField(editable=False)
height = models.IntegerField(editable=False)
focal_point_key = models.CharField(max_length=255, null=True, editable=False)
@property
def url(self):
@ -257,7 +356,7 @@ class Rendition(AbstractRendition):
class Meta:
unique_together = (
('image', 'filter'),
('image', 'filter', 'focal_point_key'),
)

Wyświetl plik

@ -0,0 +1,136 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Rendition', fields ['image', 'filter']
db.delete_unique('wagtailimages_rendition', ['image_id', 'filter_id'])
# Adding field 'Image.focal_point_x'
db.add_column('wagtailimages_image', 'focal_point_x',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True),
keep_default=False)
# Adding field 'Image.focal_point_y'
db.add_column('wagtailimages_image', 'focal_point_y',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True),
keep_default=False)
# Adding field 'Image.focal_point_width'
db.add_column('wagtailimages_image', 'focal_point_width',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True),
keep_default=False)
# Adding field 'Image.focal_point_height'
db.add_column('wagtailimages_image', 'focal_point_height',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True),
keep_default=False)
# Adding field 'Rendition.focal_point_key'
db.add_column('wagtailimages_rendition', 'focal_point_key',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True),
keep_default=False)
# Adding unique constraint on 'Rendition', fields ['image', 'filter', 'focal_point_key']
db.create_unique('wagtailimages_rendition', ['image_id', 'filter_id', 'focal_point_key'])
def backwards(self, orm):
# Removing unique constraint on 'Rendition', fields ['image', 'filter', 'focal_point_key']
db.delete_unique('wagtailimages_rendition', ['image_id', 'filter_id', 'focal_point_key'])
# Deleting field 'Image.focal_point_x'
db.delete_column('wagtailimages_image', 'focal_point_x')
# Deleting field 'Image.focal_point_y'
db.delete_column('wagtailimages_image', 'focal_point_y')
# Deleting field 'Image.focal_point_width'
db.delete_column('wagtailimages_image', 'focal_point_width')
# Deleting field 'Image.focal_point_height'
db.delete_column('wagtailimages_image', 'focal_point_height')
# Deleting field 'Rendition.focal_point_key'
db.delete_column('wagtailimages_rendition', 'focal_point_key')
# Adding unique constraint on 'Rendition', fields ['image', 'filter']
db.create_unique('wagtailimages_rendition', ['image_id', 'filter_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_set'", 'blank': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_set'", 'blank': 'True', 'to': "orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'wagtailimages.filter': {
'Meta': {'object_name': 'Filter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'spec': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'wagtailimages.image': {
'Meta': {'object_name': 'Image'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'focal_point_height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'focal_point_width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'focal_point_x': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'focal_point_y': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uploaded_by_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {})
},
'wagtailimages.rendition': {
'Meta': {'unique_together': "(('image', 'filter', 'focal_point_key'),)", 'object_name': 'Rendition'},
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'filter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['wagtailimages.Filter']"}),
'focal_point_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'renditions'", 'to': "orm['wagtailimages.Image']"}),
'width': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['wagtailimages']

Wyświetl plik

@ -20,6 +20,9 @@ from wagtail.wagtailimages.formats import (
from wagtail.wagtailimages.backends import get_image_backend
from wagtail.wagtailimages.backends.pillow import PillowBackend
from wagtail.wagtailimages.utils.crop import crop_to_point, CropBox
from wagtail.wagtailimages.utils.focal_point import FocalPoint
def get_test_image_file():
@ -681,3 +684,67 @@ class TestMultipleImageUploader(TestCase, WagtailTestUtils):
# Check response
self.assertEqual(response.status_code, 400)
class TestCropToPoint(TestCase):
def test_basic(self):
"Test basic cropping in the centre of the image"
self.assertEqual(
crop_to_point((640, 480), (100, 100), FocalPoint(x=320, y=240)),
CropBox(270, 190, 370, 290),
)
def test_basic_no_focal_point(self):
"If focal point is None, it should make one in the centre of the image"
self.assertEqual(
crop_to_point((640, 480), (100, 100), None),
CropBox(270, 190, 370, 290),
)
def test_doesnt_exit_top_left(self):
"Test that the cropbox doesn't exit the image at the top left"
self.assertEqual(
crop_to_point((640, 480), (100, 100), FocalPoint(x=0, y=0)),
CropBox(0, 0, 100, 100),
)
def test_doesnt_exit_bottom_right(self):
"Test that the cropbox doesn't exit the image at the bottom right"
self.assertEqual(
crop_to_point((640, 480), (100, 100), FocalPoint(x=640, y=480)),
CropBox(540, 380, 640, 480),
)
def test_doesnt_get_smaller_than_focal_point(self):
"Test that the cropbox doesn't get any smaller than the focal point"
self.assertEqual(
crop_to_point((640, 480), (10, 10), FocalPoint(x=320, y=240, width=100, height=100)),
CropBox(270, 190, 370, 290),
)
def test_keeps_composition(self):
"Test that the cropbox tries to keep the composition of the original image as much as it can"
self.assertEqual(
crop_to_point((300, 300), (150, 150), FocalPoint(x=100, y=200)),
CropBox(50, 100, 200, 250), # Focal point is 1/3 across and 2/3 down in the crop box
)
def test_keeps_focal_point_in_view_bottom_left(self):
"""
Even though it tries to keep the composition of the image,
it shouldn't let that get in the way of keeping the entire subject in view
"""
self.assertEqual(
crop_to_point((300, 300), (150, 150), FocalPoint(x=100, y=200, width=150, height=150)),
CropBox(25, 125, 175, 275),
)
def test_keeps_focal_point_in_view_top_right(self):
"""
Even though it tries to keep the composition of the image,
it shouldn't let that get in the way of keeping the entire subject in view
"""
self.assertEqual(
crop_to_point((300, 300), (150, 150), FocalPoint(x=200, y=100, width=150, height=150)),
CropBox(125, 25, 275, 175),
)

Wyświetl plik

@ -0,0 +1,121 @@
from __future__ import division
from wagtail.wagtailimages.utils.focal_point import FocalPoint
class CropBox(object):
def __init__(self, left, top, right, bottom):
self.left = int(left)
self.top = int(top)
self.right = int(right)
self.bottom = int(bottom)
def __getitem__(self, key):
return (self.left, self.top, self.right, self.bottom)[key]
@property
def width(self):
return self.right - self.left
@property
def height(self):
return self.bottom - self.top
@property
def size(self):
return self.width, self.height
def as_tuple(self):
return self.left, self.top, self.right, self.bottom
def __eq__(self, other):
return self.as_tuple() == other.as_tuple()
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return 'CropBox(left: %d, top: %d, right: %d, bottom: %d)' % (
self.left, self.top, self.right, self.bottom
)
def crop_to_centre(image_size, crop_size):
(original_width, original_height) = image_size
(crop_width, crop_height) = crop_size
# final dimensions should not exceed original dimensions
final_width = min(original_width, crop_width)
final_height = min(original_height, crop_height)
left = (original_width - final_width) / 2
top = (original_height - final_height) / 2
return CropBox(left, top, left + final_width, top + final_height)
def crop_to_point(image_size, crop_size, focal_point):
(original_width, original_height) = image_size
(crop_width, crop_height) = crop_size
if not focal_point:
focal_point = FocalPoint(original_width / 2, original_height / 2)
# Make sure that the crop size is no smaller than the focal point
crop_width = max(crop_width, focal_point.width)
crop_height = max(crop_height, focal_point.height)
# Make sure final dimensions do not exceed original dimensions
final_width = min(original_width, crop_width)
final_height = min(original_height, crop_height)
# Get UV for focal point
focal_point_u = focal_point.x / original_width
focal_point_v = focal_point.y / original_height
# Get crop box
left = focal_point.x - focal_point_u * final_width
top = focal_point.y - focal_point_v * final_height
right = focal_point.x - focal_point_u * final_width + final_width
bottom = focal_point.y - focal_point_v * final_height + final_height
# Make sure the entire focal point is in the crop box
focal_point_left = focal_point.x - focal_point.width / 2
focal_point_top = focal_point.y - focal_point.height / 2
focal_point_right = focal_point.x + focal_point.width / 2
focal_point_bottom = focal_point.y + focal_point.height / 2
if left > focal_point_left:
right -= left - focal_point_left
left = focal_point_left
if top > focal_point_top:
bottom -= top - focal_point_top
top = focal_point_top
if right < focal_point_right:
left += focal_point_right - right;
right = focal_point_right
if bottom < focal_point_bottom:
top += focal_point_bottom - bottom;
bottom = focal_point_bottom
# Don't allow the crop box to go over the image boundary
if left < 0:
right -= left
left = 0
if top < 0:
bottom -= top
top = 0
if right > original_width:
left -= right - original_width
right = original_width
if bottom > original_height:
top -= bottom - original_height
bottom = original_height
return CropBox(left, top, right, bottom)

Wyświetl plik

@ -0,0 +1,84 @@
import os
try:
import cv
opencv_available = True
except ImportError:
try:
import cv2.cv as cv
opencv_available = True
except ImportError:
opencv_available = False
from wagtail.wagtailimages.utils.focal_point import FocalPoint, combine_focal_points
class FeatureDetector(object):
def __init__(self, image_size, image_mode, image_data):
self.image_size = image_size
self.image_mode = image_mode
self.image_data = image_data
def opencv_grey_image(self):
image = cv.CreateImageHeader(self.image_size, cv.IPL_DEPTH_8U, 3)
cv.SetData(image, self.image_data)
gray_image = cv.CreateImage(self.image_size, 8, 1)
convert_mode = getattr(cv, 'CV_%s2GRAY' % self.image_mode)
cv.CvtColor(image, gray_image, convert_mode)
return gray_image
def detect_features(self):
if opencv_available:
image = self.opencv_grey_image()
rows = self.image_size[0]
cols = self.image_size[1]
eig_image = cv.CreateMat(rows, cols, cv.CV_32FC1)
temp_image = cv.CreateMat(rows, cols, cv.CV_32FC1)
points = cv.GoodFeaturesToTrack(image, eig_image, temp_image, 20, 0.04, 1.0, useHarris=False)
if points:
return [FocalPoint(x, y, 1) for x, y in points]
return []
def detect_faces(self):
if opencv_available:
cascade_filename = os.path.join(os.path.dirname(__file__), 'face_detection', 'haarcascade_frontalface_alt2.xml')
cascade = cv.Load(cascade_filename)
image = self.opencv_grey_image()
cv.EqualizeHist(image, image)
min_size = (40, 40)
haar_scale = 1.1
min_neighbors = 3
haar_flags = 0
faces = cv.HaarDetectObjects(
image, cascade, cv.CreateMemStorage(0),
haar_scale, min_neighbors, haar_flags, min_size
)
if faces:
return [FocalPoint.from_square(face[0][0], face[0][1], face[0][2], face[0][3]) for face in faces]
return []
def get_focal_point(self):
# Face detection
faces = self.detect_faces()
if faces:
return combine_focal_points(faces)
# Feature detection
features = self.detect_features()
if features:
return combine_focal_points(features)

Wyświetl plik

@ -0,0 +1,98 @@
# https://github.com/thumbor/thumbor/blob/8a50bfba9443e8d2a1a691ab20eeb525815be597/thumbor/point.py
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
class FocalPoint(object):
ALIGNMENT_PERCENTAGES = {
'left': 0.0,
'center': 0.5,
'right': 1.0,
'top': 0.0,
'middle': 0.5,
'bottom': 1.0
}
def to_dict(self):
return {
'x': self.x,
'y': self.y,
'z': self.weight,
'height': self.height,
'width': self.width,
'origin': self.origin
}
@classmethod
def from_dict(cls, values):
return cls(
x=float(values['x']),
y=float(values['y']),
weight=float(values['z']),
width=float(values.get('width', 1)),
height=float(values.get('height', 1)),
origin=values.get('origin', 'alignment')
)
def __init__(self, x, y, height=1, width=1, weight=1.0, origin="alignment"):
self.x = x
self.y = y
self.height = height
self.width = width
self.weight = weight
self.origin = origin
@classmethod
def from_square(cls, x, y, width, height, origin='detection'):
center_x = x + (width / 2)
center_y = y + (height / 2)
return cls(center_x, center_y, height=height, width=width, weight=width * height, origin=origin)
@classmethod
def from_alignment(cls, halign, valign, width, height):
x = width * cls.ALIGNMENT_PERCENTAGES[halign]
y = height * cls.ALIGNMENT_PERCENTAGES[valign]
return cls(x, y)
def __repr__(self):
return 'FocalPoint(x: %d, y: %d, width: %d, height: %d, weight: %d, origin: %s)' % (
self.x, self.y, self.width, self.height, self.weight, self.origin
)
def get_key(self):
return "%(x)d-%(y)d-%(width)dx%(height)d" % self.to_dict()
def combine_focal_points(focal_points):
# https://github.com/thumbor/thumbor/blob/fc75f2d617942e3548986fe8403ad717fc9978ba/thumbor/transformer.py#L255-L269
if not focal_points:
return
total_weight = 0.0
total_x = 0.0
total_y = 0.0
for focal_point in focal_points:
total_weight += focal_point.weight
total_x += focal_point.x * focal_point.weight
total_y += focal_point.y * focal_point.weight
x = total_x / total_weight
y = total_y / total_weight
min_x = min([point.x - point.width / 2 for point in focal_points])
min_y = min([point.y - point.height / 2 for point in focal_points])
max_x = max([point.x + point.width / 2 for point in focal_points])
max_y = max([point.y + point.height / 2 for point in focal_points])
width = max_x - min_x
height = max_y - min_y
return FocalPoint(x, y, width=width, height=height, weight=total_weight)

Wyświetl plik

@ -3,7 +3,7 @@ import os
from PIL import Image
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext_lazy as _
def validate_image_format(f):

Wyświetl plik

@ -12,7 +12,7 @@ from django.utils.translation import ugettext as _
from wagtail.wagtailimages.models import get_image_model
from wagtail.wagtailimages.forms import get_image_form_for_multi
from wagtail.wagtailimages.utils import validate_image_format
from wagtail.wagtailimages.utils.validators import validate_image_format
def json_response(document):