Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support for animated formats (gif/webp/apng) #628

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
133 changes: 98 additions & 35 deletions easy_thumbnails/processors.py
@@ -1,5 +1,7 @@
import itertools
import re
from functools import partial
from io import BytesIO

from PIL import Image, ImageChops, ImageFilter
from easy_thumbnails import utils
Expand Down Expand Up @@ -35,6 +37,31 @@ def _points_table():
yield j


class FrameAware:
def __new__(cls, im):
if getattr(im, "n_frames", 1) > 1:
return super().__new__(cls)
return im

def __init__(self, im):
self.im = im

def apply_to_frames(self, method, *args, **kwargs):
new_frames = []
for i in range(self.im.n_frames):
self.im.seek(i)
new_frames.append(method(*args, **kwargs))
write_to = BytesIO()
new_frames[0].save(
write_to, format=self.im.format, save_all=True, append_images=new_frames[1:]
)
return Image.open(write_to)

def __getattr__(self, key):
method = getattr(self.im, key)
return partial(self.apply_to_frames, method)


def colorspace(im, bw=False, replace_alpha=False, **kwargs):
"""
Convert images to the correct color space.
Expand All @@ -54,32 +81,46 @@ def colorspace(im, bw=False, replace_alpha=False, **kwargs):
white.

"""
if im.mode == 'I':
if im.mode == "I":
# PIL (and pillow) have can't convert 16 bit grayscale images to lower
# modes, so manually convert them to an 8 bit grayscale.
im = im.point(list(_points_table()), 'L')
im = FrameAware(im).point(list(_points_table()), "L")

is_transparent = utils.is_transparent(im)
is_grayscale = im.mode in ('L', 'LA')
is_grayscale = im.mode in ("L", "LA")
new_mode = im.mode
if is_grayscale or bw:
new_mode = 'L'
new_mode = "L"
else:
new_mode = 'RGB'
new_mode = "RGB"

if is_transparent:
if replace_alpha:
if im.mode != 'RGBA':
im = im.convert('RGBA')
base = Image.new('RGBA', im.size, replace_alpha)
base.paste(im, mask=im)
im = base
if not getattr(im, "is_animated", False):
if im.mode != "RGBA":
im = FrameAware(im).convert("RGBA")
base = Image.new("RGBA", im.size, replace_alpha)
base.paste(im, mask=im)
im = base
else:
frames = []
for i in range(im.n_frames):
im.seek(i)
if im.mode != "RGBA":
im = FrameAware(im).convert("RGBA")
base = Image.new("RGBA", im.size, replace_alpha)
base.paste(im, mask=im)
frames.append(base)
write_to = BytesIO()
frames[0].save(
write_to, format=im.format, save_all=True, append_images=frames[1:]
)
return Image.open(write_to)
else:
new_mode = new_mode + 'A'
new_mode = new_mode + "A"

if im.mode != new_mode:
im = im.convert(new_mode)

im = FrameAware(im).convert(new_mode)
return im


Expand All @@ -97,23 +138,25 @@ def autocrop(im, autocrop=False, **kwargs):
if autocrop:
# If transparent, flatten.
if utils.is_transparent(im):
no_alpha = Image.new('L', im.size, (255))
no_alpha = Image.new("L", im.size, (255))
no_alpha.paste(im, mask=im.split()[-1])
else:
no_alpha = im.convert('L')
no_alpha = im.convert("L")
# Convert to black and white image.
bw = no_alpha.convert('L')
bw = no_alpha.convert("L")
# bw = bw.filter(ImageFilter.MedianFilter)
# White background.
bg = Image.new('L', im.size, 255)
bg = Image.new("L", im.size, 255)
bbox = ImageChops.difference(bw, bg).getbbox()
if bbox:
im = im.crop(bbox)
# im = im.crop(bbox)
im = FrameAware(im).crop(bbox)
return im


def scale_and_crop(im, size, crop=False, upscale=False, zoom=None, target=None,
**kwargs):
def scale_and_crop(
im, size, crop=False, upscale=False, zoom=None, target=None, **kwargs
):
"""
Handle scaling and cropping the source image.

Expand Down Expand Up @@ -202,19 +245,20 @@ def scale_and_crop(im, size, crop=False, upscale=False, zoom=None, target=None,
if scale < 1.0 or (scale > 1.0 and upscale):
# Resize the image to the target size boundary. Round the scaled
# boundary sizes to avoid floating point errors.
im = im.resize((int(round(source_x * scale)),
int(round(source_y * scale))),
resample=Image__Resampling__LANCZOS)
im = FrameAware(im).resize(
(int(round(source_x * scale)), int(round(source_y * scale))),
resample=Image__Resampling__LANCZOS,
)

if crop:
# Use integer values now.
source_x, source_y = im.size
# Difference between new image size and requested size.
diff_x = int(source_x - min(source_x, target_x))
diff_y = int(source_y - min(source_y, target_y))
if crop != 'scale' and (diff_x or diff_y):
if crop != "scale" and (diff_x or diff_y):
if isinstance(target, str):
target = re.match(r'(\d+)?,(\d+)?$', target)
target = re.match(r"(\d+)?,(\d+)?$", target)
if target:
target = target.groups()
if target:
Expand All @@ -232,8 +276,9 @@ def scale_and_crop(im, size, crop=False, upscale=False, zoom=None, target=None,
box.append(int(min(source_x, box[0] + target_x)))
box.append(int(min(source_y, box[1] + target_y)))
# See if an edge cropping argument was provided.
edge_crop = (isinstance(crop, str) and
re.match(r'(?:(-?)(\d+))?,(?:(-?)(\d+))?$', crop))
edge_crop = isinstance(crop, str) and re.match(
r"(?:(-?)(\d+))?,(?:(-?)(\d+))?$", crop
)
if edge_crop and filter(None, edge_crop.groups()):
x_right, x_crop, y_bottom, y_crop = edge_crop.groups()
if x_crop:
Expand All @@ -253,7 +298,7 @@ def scale_and_crop(im, size, crop=False, upscale=False, zoom=None, target=None,
box[1] = offset
box[3] = source_y - (diff_y - offset)
# See if the image should be "smart cropped".
elif crop == 'smart':
elif crop == "smart":
left = top = 0
right, bottom = source_x, source_y
while diff_x:
Expand All @@ -274,7 +319,9 @@ def scale_and_crop(im, size, crop=False, upscale=False, zoom=None, target=None,
diff_y = diff_y - add - remove
box = (left, top, right, bottom)
# Finally, crop the image!
im = im.crop(box)
# im = im.crop(box)
# im = _call_pil_method(im, "crop", box)
im = FrameAware(im).crop(box)
return im


Expand All @@ -291,9 +338,11 @@ def filters(im, detail=False, sharpen=False, **kwargs):

"""
if detail:
im = im.filter(ImageFilter.DETAIL)
# im = im.filter(ImageFilter.DETAIL)
im = FrameAware(im).filter(ImageFilter.DETAIL)
if sharpen:
im = im.filter(ImageFilter.SHARPEN)
# im = im.filter(ImageFilter.SHARPEN)
im = FrameAware(im).filter(ImageFilter.SHARPEN)
return im


Expand All @@ -317,9 +366,23 @@ def background(im, size, background=None, **kwargs):
# there's nothing to do.
return im
im = colorspace(im, replace_alpha=background, **kwargs)
new_im = Image.new('RGB', size, background)
new_im = Image.new("RGB", size, background)
if new_im.mode != im.mode:
new_im = new_im.convert(im.mode)
offset = (size[0]-x)//2, (size[1]-y)//2
new_im.paste(im, offset)
return new_im
offset = (size[0] - x) // 2, (size[1] - y) // 2
# animated format (gif/webp/...) support manually added.
if not getattr(im, "is_animated", False):
new_im.paste(im, offset)
return new_im
else:
frames = []
for i in range(im.n_frames):
im.seek(i)
copied_new_im = new_im.copy()
copied_new_im.paste(im, offset)
frames.append(copied_new_im)
write_to = BytesIO()
frames[0].save(
write_to, format=im.format, save_all=True, append_images=frames[1:]
)
return Image.open(write_to)
72 changes: 72 additions & 0 deletions easy_thumbnails/tests/test_animated_formats.py
@@ -0,0 +1,72 @@
from io import BytesIO
from PIL import Image, ImageChops, ImageDraw
from PIL.GifImagePlugin import GifImageFile
from easy_thumbnails import processors
from unittest import TestCase


def create_animated_image(mode='RGB', format="gif", size=(500, 500)):
frames = []
for i in range(10):
image = Image.new(mode, size, (255, 255, 255))
draw = ImageDraw.Draw(image)
x_bit, y_bit = size[0] // 10 * i, size[1] // 10 * i
draw.rectangle((x_bit, y_bit * 2, x_bit * 7, y_bit * 3), 'red')
draw.rectangle((x_bit * 2, y_bit, x_bit * 3, y_bit * 8), 'red')
frames.append(image)
write_to = BytesIO()
frames[0].save(
write_to, format=format, save_all=True, append_images=frames[1:]
)
return Image.open(write_to)


class AnimatedFormatProcessorsTests(TestCase):

def test_scale(self):
im = create_animated_image()
frames_count = im.n_frames
self.assertGreater(frames_count, 1)
processed = processors.scale_and_crop(im, (100, 100))
processed_frames_count = processed.n_frames
self.assertEqual(frames_count, processed_frames_count)
self.assertEqual(processed.size, (100, 100))

def test_scale_crop(self):
im = create_animated_image()
frames_count = im.n_frames
processed = processors.scale_and_crop(im, (100, 50), crop=True)
processed_frames_count = processed.n_frames
self.assertEqual(frames_count, processed_frames_count)
self.assertEqual(processed.size, (100, 50))

def test_colorspace(self):
# to have a color conversion
im = create_animated_image(format="png")
frames_count = im.n_frames
processed = processors.colorspace(im, bw=True)
processed_frames_count = processed.n_frames
# indeed processed?
self.assertEqual(processed.mode, "L")
self.assertEqual(frames_count, processed_frames_count)
self.assertEqual(processed.size, (500, 500))

def test_filter(self):
# to have a color conversion
im = create_animated_image(format="webp")
frames_count = im.n_frames
processed = processors.filters(im, detail=True, sharpen=True)
processed_frames_count = processed.n_frames
# indeed processed?
self.assertEqual(frames_count, processed_frames_count)
self.assertEqual(processed.size, (500, 500))

def test_background(self):
# to have a color conversion
im = create_animated_image(format="webp")
frames_count = im.n_frames
processed = processors.background(im, background="#ff00ff", size=(500, 800))
processed_frames_count = processed.n_frames
# indeed processed?
self.assertEqual(frames_count, processed_frames_count)
self.assertEqual(processed.size, (500, 800))