Files
konabot/konabot/plugins/memepack/drawing/display.py
passthem 81aac10665
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
添加文档并修复问题
2025-10-16 23:27:42 +08:00

142 lines
3.7 KiB
Python

import asyncio
from typing import Any, cast
import cv2
import numpy as np
import PIL.Image
import PIL.ImageChops
import PIL.ImageEnhance
from konabot.common.path import ASSETS_PATH
cao_image = PIL.Image.open(ASSETS_PATH / "img" / "meme" / "caoimg1.png")
CAO_QUAD_POINTS = np.float32(cast(Any, [
[392, 540],
[577, 557],
[567, 707],
[381, 687],
]))
snaur_image_base = PIL.Image.open(ASSETS_PATH / "img" / "meme" / "snaur_1_base.png")
snaur_image_top = PIL.Image.open(ASSETS_PATH / "img" / "meme" / "snaur_1_top.png")
SNAUR_RATIO = (1 / 2) ** .5
SNAUR_QUAD_POINTS = np.float32(cast(Any, [
[0, 466 ],
[673, 471 ],
[640, 1196],
[106, 1280],
]))
def _draw_cao_display(image: PIL.Image.Image):
src = np.array(image.convert("RGB"))
h, w = src.shape[:2]
src_points = np.float32(cast(Any, [
[0, 0],
[w, 0],
[w, h],
[0, h]
]))
dst_points = CAO_QUAD_POINTS
M = cv2.getPerspectiveTransform(cast(Any, src_points), cast(Any, dst_points))
output_size = cao_image.size
output_w, output_h = output_size
warped = cv2.warpPerspective(
src,
M,
(output_w, output_h),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=(0, 0, 0)
)
result = PIL.Image.fromarray(warped, 'RGB').convert('RGBA')
result = PIL.Image.alpha_composite(result, cao_image)
return result
async def draw_cao_display(image: PIL.Image.Image):
return await asyncio.to_thread(_draw_cao_display, image)
def _draw_snaur_display(
image : PIL.Image.Image,
whiteness : float = 0.0 ,
black_level: float = 0.2 ,
opacity : float = 0.8 ,
saturation : float = 0.85 ,
):
src = np.array(image.convert("RGBA"))
_h, _w = src.shape[:2]
if _w / _h < SNAUR_RATIO:
_w_target = _w
_h_target = int(_w / SNAUR_RATIO)
else:
_w_target = int(_h * SNAUR_RATIO)
_h_target = _h
x_center = _w / 2
y_center = _h / 2
x1 = int(x_center - _w_target / 2)
x2 = int(x_center + _w_target / 2)
y1 = int(y_center - _h_target / 2)
y2 = int(y_center + _h_target / 2)
src = src[y1:y2, x1:x2, :]
h, w = src.shape[:2]
src_points = np.float32(cast(Any, [
[0, 0],
[w, 0],
[w, h],
[0, h],
]))
dst_points = SNAUR_QUAD_POINTS
M = cv2.getPerspectiveTransform(cast(Any, src_points), cast(Any, dst_points))
output_size = snaur_image_top.size
output_w, output_h = output_size
warped = cv2.warpPerspective(
src,
M,
(output_w, output_h),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=(0, 0, 0)
)
result = PIL.Image.fromarray(warped, 'RGBA')
r, g, b, a = result.split()
a = a.point(lambda p: int(p * opacity))
f2 = lambda p: int(
((p / 255) ** (2 ** whiteness)) * 255 * (1 - black_level)
+ 255 * black_level
)
r = r.point(f2)
g = g.point(f2)
b = b.point(f2)
result = PIL.Image.merge('RGBA', (r, g, b, a))
enhancer = PIL.ImageEnhance.Color(result)
result = enhancer.enhance(saturation)
result = PIL.ImageChops.multiply(result, snaur_image_base)
result = PIL.Image.alpha_composite(snaur_image_base, result)
result = PIL.Image.alpha_composite(result, snaur_image_top)
return result
async def draw_snaur_display(
image : PIL.Image.Image,
whiteness : float = 0.0 ,
black_level: float = 0.2 ,
opacity : float = 0.8 ,
saturation : float = 0.85 ,
) -> PIL.Image.Image:
return await asyncio.to_thread(
_draw_snaur_display, image, whiteness, black_level,
opacity, saturation,
)