199 lines
5.3 KiB
Python
199 lines
5.3 KiB
Python
import asyncio
|
|
from typing import Any, cast
|
|
|
|
import cv2
|
|
import numpy as np
|
|
import PIL.Image
|
|
import PIL.ImageChops
|
|
import PIL.ImageEnhance
|
|
|
|
from konabot.common.path import ASSETS_PATH
|
|
|
|
cao_image = PIL.Image.open(ASSETS_PATH / "img" / "meme" / "caoimg1.png")
|
|
CAO_QUAD_POINTS = np.float32(cast(Any, [
|
|
[392, 540],
|
|
[577, 557],
|
|
[567, 707],
|
|
[381, 687],
|
|
]))
|
|
|
|
snaur_image_base = PIL.Image.open(ASSETS_PATH / "img" / "meme" / "snaur_1_base.png")
|
|
snaur_image_top = PIL.Image.open(ASSETS_PATH / "img" / "meme" / "snaur_1_top.png")
|
|
SNAUR_RATIO = (1 / 2) ** .5
|
|
SNAUR_QUAD_POINTS = np.float32(cast(Any, [
|
|
[0, 466 ],
|
|
[673, 471 ],
|
|
[640, 1196],
|
|
[106, 1280],
|
|
]))
|
|
|
|
anan_image_base = PIL.Image.open(ASSETS_PATH / "img" / "meme" / "anan_base.png")
|
|
anan_image_top = PIL.Image.open(ASSETS_PATH / "img" / "meme" / "anan_top.png")
|
|
ANAN_QUAD_POINTS = np.float32([
|
|
[157, 585],
|
|
[793, 599],
|
|
[781, 908],
|
|
[160, 908]
|
|
])
|
|
|
|
def _draw_cao_display(image: PIL.Image.Image):
|
|
src = np.array(image.convert("RGB"))
|
|
h, w = src.shape[:2]
|
|
src_points = np.float32(cast(Any, [
|
|
[0, 0],
|
|
[w, 0],
|
|
[w, h],
|
|
[0, h]
|
|
]))
|
|
dst_points = CAO_QUAD_POINTS
|
|
M = cv2.getPerspectiveTransform(cast(Any, src_points), cast(Any, dst_points))
|
|
output_size = cao_image.size
|
|
output_w, output_h = output_size
|
|
warped = cv2.warpPerspective(
|
|
src,
|
|
M,
|
|
(output_w, output_h),
|
|
flags=cv2.INTER_LINEAR,
|
|
borderMode=cv2.BORDER_CONSTANT,
|
|
borderValue=(0, 0, 0)
|
|
)
|
|
result = PIL.Image.fromarray(warped, 'RGB').convert('RGBA')
|
|
result = PIL.Image.alpha_composite(result, cao_image)
|
|
return result
|
|
|
|
|
|
async def draw_cao_display(image: PIL.Image.Image):
|
|
return await asyncio.to_thread(_draw_cao_display, image)
|
|
|
|
|
|
def _draw_snaur_display(
|
|
image : PIL.Image.Image,
|
|
whiteness : float = 0.0 ,
|
|
black_level: float = 0.2 ,
|
|
opacity : float = 0.8 ,
|
|
saturation : float = 0.85 ,
|
|
):
|
|
src = np.array(image.convert("RGBA"))
|
|
_h, _w = src.shape[:2]
|
|
|
|
if _w / _h < SNAUR_RATIO:
|
|
_w_target = _w
|
|
_h_target = int(_w / SNAUR_RATIO)
|
|
else:
|
|
_w_target = int(_h * SNAUR_RATIO)
|
|
_h_target = _h
|
|
|
|
x_center = _w / 2
|
|
y_center = _h / 2
|
|
|
|
x1 = int(x_center - _w_target / 2)
|
|
x2 = int(x_center + _w_target / 2)
|
|
y1 = int(y_center - _h_target / 2)
|
|
y2 = int(y_center + _h_target / 2)
|
|
|
|
src = src[y1:y2, x1:x2, :]
|
|
|
|
h, w = src.shape[:2]
|
|
src_points = np.float32(cast(Any, [
|
|
[0, 0],
|
|
[w, 0],
|
|
[w, h],
|
|
[0, h],
|
|
]))
|
|
dst_points = SNAUR_QUAD_POINTS
|
|
M = cv2.getPerspectiveTransform(cast(Any, src_points), cast(Any, dst_points))
|
|
output_size = snaur_image_top.size
|
|
output_w, output_h = output_size
|
|
warped = cv2.warpPerspective(
|
|
src,
|
|
M,
|
|
(output_w, output_h),
|
|
flags=cv2.INTER_LINEAR,
|
|
borderMode=cv2.BORDER_CONSTANT,
|
|
borderValue=(0, 0, 0)
|
|
)
|
|
|
|
result = PIL.Image.fromarray(warped, 'RGBA')
|
|
|
|
r, g, b, a = result.split()
|
|
a = a.point(lambda p: int(p * opacity))
|
|
f2 = lambda p: int(
|
|
((p / 255) ** (2 ** whiteness)) * 255 * (1 - black_level)
|
|
+ 255 * black_level
|
|
)
|
|
r = r.point(f2)
|
|
g = g.point(f2)
|
|
b = b.point(f2)
|
|
result = PIL.Image.merge('RGBA', (r, g, b, a))
|
|
|
|
enhancer = PIL.ImageEnhance.Color(result)
|
|
result = enhancer.enhance(saturation)
|
|
|
|
result = PIL.ImageChops.multiply(result, snaur_image_base)
|
|
|
|
result = PIL.Image.alpha_composite(snaur_image_base, result)
|
|
result = PIL.Image.alpha_composite(result, snaur_image_top)
|
|
return result
|
|
|
|
|
|
async def draw_snaur_display(
|
|
image : PIL.Image.Image,
|
|
whiteness : float = 0.0 ,
|
|
black_level: float = 0.2 ,
|
|
opacity : float = 0.8 ,
|
|
saturation : float = 0.85 ,
|
|
) -> PIL.Image.Image:
|
|
return await asyncio.to_thread(
|
|
_draw_snaur_display, image, whiteness, black_level,
|
|
opacity, saturation,
|
|
)
|
|
|
|
|
|
def _draw_anan_display(image: PIL.Image.Image) -> PIL.Image.Image:
|
|
src = np.array(image.convert("RGBA"))
|
|
h, w = src.shape[:2]
|
|
|
|
src_points = np.float32([
|
|
[0, 0],
|
|
[w, 0],
|
|
[w, h],
|
|
[0, h]
|
|
])
|
|
dst_points = ANAN_QUAD_POINTS
|
|
|
|
M = cv2.getPerspectiveTransform(src_points, dst_points)
|
|
output_w, output_h = anan_image_top.size
|
|
|
|
src_rgb = cv2.cvtColor(src, cv2.COLOR_RGBA2RGB) if src.shape[2] == 4 else src
|
|
warped_rgb = cv2.warpPerspective(
|
|
src_rgb,
|
|
M,
|
|
(output_w, output_h),
|
|
flags=cv2.INTER_LINEAR,
|
|
borderMode=cv2.BORDER_CONSTANT,
|
|
borderValue=(0, 0, 0)
|
|
)
|
|
|
|
mask = np.zeros((h, w), dtype=np.uint8)
|
|
mask[:, :] = 255
|
|
warped_mask = cv2.warpPerspective(
|
|
mask,
|
|
M,
|
|
(output_w, output_h),
|
|
flags=cv2.INTER_LINEAR,
|
|
borderMode=cv2.BORDER_CONSTANT,
|
|
borderValue=0
|
|
)
|
|
|
|
warped_rgba = cv2.cvtColor(warped_rgb, cv2.COLOR_RGB2RGBA)
|
|
warped_rgba[:, :, 3] = warped_mask
|
|
|
|
warped_pil = PIL.Image.fromarray(warped_rgba, 'RGBA')
|
|
|
|
result = PIL.Image.alpha_composite(anan_image_base, warped_pil)
|
|
result = PIL.Image.alpha_composite(result, anan_image_top)
|
|
return result
|
|
|
|
|
|
async def draw_anan_display(image: PIL.Image.Image) -> PIL.Image.Image:
|
|
return await asyncio.to_thread(_draw_anan_display, image) |