Files
AiDA_Python/app/service/design_fast/pipeline/print_painting.py
zcr 6892361050
All checks were successful
git commit AiDA python develop 分支构建部署 / scheduled_deploy (push) Has been skipped
修复design印花部分 overall 模式印花平铺起始从印花图片中心开始
2026-04-15 17:36:29 +08:00

592 lines
30 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
import random
import cv2
import numpy as np
from PIL import Image
from app.service.utils.new_oss_client import oss_get_image
class PrintPainting:
def __init__(self, minio_client):
self.minio_client = minio_client
def __call__(self, result):
# single_print = result['print']['single']
overall_print = result['print']['overall']
# element_print = result['print']['element']
# partial_path = result['print']['partial'] if 'partial' in result['print'] else None
single_print = None
element_print = None
partial_path = None
result['single_image'] = None
result['print_image'] = None
# TODO 给result['pattern_image'] resize 到resize_scale的大小
# TODO 给result['mask'] resize 到resize_scale的大小
if result['resize_scale'][0] == 1.0 and result['resize_scale'][1] == 1.0:
pass
else:
# 2025-9-19 印花调整 印花坐标按照sketch的缩放比调整
height, width = result['pattern_image'].shape[:2]
new_width = int(width * result['resize_scale'][0])
new_height = int(height * result['resize_scale'][1])
result['pattern_image'] = cv2.resize(result['pattern_image'], (new_width, new_height))
result['final_image'] = cv2.resize(result['final_image'], (new_width, new_height))
result['mask'] = cv2.resize(result['mask'], (new_width, new_height))
result['gray'] = cv2.resize(result['gray'], (new_width, new_height))
if overall_print['print_path_list']:
overall_print['location'][0] = [x * y for x, y in zip(overall_print['location'][0], result['resize_scale'])]
painting_dict = {'dim_image_h': result['pattern_image'].shape[0], 'dim_image_w': result['pattern_image'].shape[1]}
result['print_image'] = result['pattern_image'].copy()
# 获取平铺 + 旋转 的overall print
painting_dict = self.painting_collection(painting_dict, overall_print)
result['print_image'] = self.printpaint(result, painting_dict, print_=True)
result['single_image'] = result['final_image'] = result['pattern_image'] = result['print_image']
if single_print:
# 2025-9-19 印花调整 印花坐标按照sketch的缩放比调整
sketch_resize_scale = result['resize_scale']
print_background = np.zeros((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), dtype=np.uint8)
mask_background = np.zeros((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), dtype=np.uint8)
for i in range(len(single_print['print_path_list'])):
image, image_mode = self.read_image(single_print['print_path_list'][i])
if image_mode == "RGB":
image_rgba = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA)
image = Image.fromarray(image_rgba)
new_size = (int(result['pattern_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['pattern_image'].shape[0] * single_print['print_scale_list'][i][1]))
mask = image.split()[3]
resized_source = image.resize(new_size)
resized_source_mask = mask.resize(new_size)
rotated_resized_source = resized_source.rotate(-single_print['print_angle_list'][i])
rotated_resized_source_mask = resized_source_mask.rotate(-single_print['print_angle_list'][i])
source_image_pil = Image.fromarray(cv2.cvtColor(print_background, cv2.COLOR_BGR2RGB))
source_image_pil_mask = Image.fromarray(cv2.cvtColor(mask_background, cv2.COLOR_BGR2RGB))
source_image_pil.paste(rotated_resized_source, (int(single_print['location'][i][0] * sketch_resize_scale[0]), int(single_print['location'][i][1] * sketch_resize_scale[1])), rotated_resized_source)
source_image_pil_mask.paste(rotated_resized_source_mask, (int(single_print['location'][i][0] * sketch_resize_scale[0]), int(single_print['location'][i][1] * sketch_resize_scale[1])), rotated_resized_source_mask)
print_background = cv2.cvtColor(np.array(source_image_pil), cv2.COLOR_RGBA2BGR)
mask_background = cv2.cvtColor(np.array(source_image_pil_mask), cv2.COLOR_RGBA2BGR)
ret, mask_background = cv2.threshold(mask_background, 124, 255, cv2.THRESH_BINARY)
print_mask = cv2.bitwise_and(result['mask'], cv2.cvtColor(mask_background, cv2.COLOR_BGR2GRAY))
img_fg = cv2.bitwise_or(print_background, print_background, mask=print_mask)
img_bg = cv2.bitwise_and(result['pattern_image'], result['pattern_image'], mask=cv2.bitwise_not(print_mask))
mask_mo = np.expand_dims(print_mask, axis=2).repeat(3, axis=2)
gray_mo = np.expand_dims(result['gray'], axis=2).repeat(3, axis=2)
img_fg = (img_fg * (mask_mo / 255) * (gray_mo / 255)).astype(np.uint8) # 当sketch 图像为灰色时(非纯白) 印花*灰度图像会导致印花在sketch上颜色变暗
# img_fg = (img_fg * (mask_mo / 255) ).astype(np.uint8) # 不过灰度图像
result['final_image'] = cv2.add(img_bg, img_fg)
canvas = np.full_like(result['final_image'], 255)
temp_bg = np.expand_dims(cv2.bitwise_not(result['mask']), axis=2).repeat(3, axis=2)
tmp1 = (canvas * (temp_bg / 255)).astype(np.uint8)
temp_fg = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2)
tmp2 = (result['final_image'] * (temp_fg / 255)).astype(np.uint8)
result['single_image'] = cv2.add(tmp1, tmp2)
if element_print:
# 2025-9-19 印花调整 印花坐标按照sketch的缩放比调整
sketch_resize_scale = result['resize_scale']
print_background = np.zeros((result['final_image'].shape[0], result['final_image'].shape[1], 3), dtype=np.uint8)
mask_background = np.zeros((result['final_image'].shape[0], result['final_image'].shape[1], 3), dtype=np.uint8)
for i in range(len(element_print['element_path_list'])):
image, image_mode = self.read_image(element_print['element_path_list'][i])
if image_mode == "RGBA":
new_size = (int(result['final_image'].shape[1] * element_print['element_scale_list'][i][0]), int(result['final_image'].shape[0] * element_print['element_scale_list'][i][1]))
mask = image.split()[3]
resized_source = image.resize(new_size)
resized_source_mask = mask.resize(new_size)
rotated_resized_source = resized_source.rotate(-element_print['element_angle_list'][i])
rotated_resized_source_mask = resized_source_mask.rotate(-element_print['element_angle_list'][i])
source_image_pil = Image.fromarray(cv2.cvtColor(print_background, cv2.COLOR_BGR2RGB))
source_image_pil_mask = Image.fromarray(cv2.cvtColor(mask_background, cv2.COLOR_BGR2RGB))
source_image_pil.paste(rotated_resized_source, (int(element_print['location'][i][0] * sketch_resize_scale[0]), int(element_print['location'][i][1] * sketch_resize_scale[1])), rotated_resized_source)
source_image_pil_mask.paste(rotated_resized_source_mask, (int(element_print['location'][i][0] * sketch_resize_scale[0]), int(element_print['location'][i][1] * sketch_resize_scale[1])), rotated_resized_source_mask)
print_background = cv2.cvtColor(np.array(source_image_pil), cv2.COLOR_RGBA2BGR)
mask_background = cv2.cvtColor(np.array(source_image_pil_mask), cv2.COLOR_RGBA2BGR)
else:
mask = self.get_mask_inv(image)
mask = np.expand_dims(mask, axis=2)
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
mask = cv2.bitwise_not(mask)
mask = cv2.resize(mask, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1])))
image = cv2.resize(image, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1])))
# 旋转后的坐标需要重新算
rotate_mask, _ = self.img_rotate(mask, element_print['element_angle_list'][i])
rotate_image, rotated_new_size = self.img_rotate(image, element_print['element_angle_list'][i])
# x, y = int(result['print']['location'][i][0] - rotated_new_size[0] - (rotate_mask.shape[0] - image.shape[0]) / 2), int(result['print']['location'][i][1] - rotated_new_size[1] - (rotate_mask.shape[1] - image.shape[1]) / 2)
x, y = int(element_print['location'][i][0] - rotated_new_size[0]), int(element_print['location'][i][1] - rotated_new_size[1])
image_x = print_background.shape[1]
image_y = print_background.shape[0]
print_x = rotate_image.shape[1]
print_y = rotate_image.shape[0]
if x <= 0:
rotate_image = rotate_image[:, -x:]
rotate_mask = rotate_mask[:, -x:]
start_x = x = 0
else:
start_x = x
if y <= 0:
rotate_image = rotate_image[-y:, :]
rotate_mask = rotate_mask[-y:, :]
start_y = y = 0
else:
start_y = y
if x + print_x > image_x:
rotate_image = rotate_image[:, :image_x - x]
rotate_mask = rotate_mask[:, :image_x - x]
if y + print_y > image_y:
rotate_image = rotate_image[:image_y - y, :]
rotate_mask = rotate_mask[:image_y - y, :]
mask_background = self.stack_prin(mask_background, result['pattern_image'], rotate_mask, start_y, y, start_x, x)
print_background = self.stack_prin(print_background, result['pattern_image'], rotate_image, start_y, y, start_x, x)
# gray_image = cv2.cvtColor(mask_background, cv2.COLOR_BGR2GRAY)
# print_background = cv2.bitwise_and(print_background, print_background, mask=gray_image)
print_mask = cv2.bitwise_and(result['mask'], cv2.cvtColor(mask_background, cv2.COLOR_BGR2GRAY))
img_fg = cv2.bitwise_or(print_background, print_background, mask=print_mask)
# TODO element 丢失信息
three_channel_image = cv2.merge([cv2.bitwise_not(print_mask), cv2.bitwise_not(print_mask), cv2.bitwise_not(print_mask)])
img_bg = cv2.bitwise_and(result['final_image'], three_channel_image)
# mask_mo = np.expand_dims(print_mask, axis=2).repeat(3, axis=2)
# gray_mo = np.expand_dims(result['gray'], axis=2).repeat(3, axis=2)
# img_fg = (img_fg * (mask_mo / 255) * (gray_mo / 255)).astype(np.uint8)
result['final_image'] = cv2.add(img_bg, img_fg)
canvas = np.full_like(result['final_image'], 255)
temp_bg = np.expand_dims(cv2.bitwise_not(result['mask']), axis=2).repeat(3, axis=2)
tmp1 = (canvas * (temp_bg / 255)).astype(np.uint8)
temp_fg = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2)
tmp2 = (result['final_image'] * (temp_fg / 255)).astype(np.uint8)
result['single_image'] = cv2.add(tmp1, tmp2)
if partial_path:
print_background = np.zeros((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), dtype=np.uint8)
mask_background = np.zeros((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), dtype=np.uint8)
image, image_mode = self.read_image(partial_path)
if image_mode == "RGBA":
new_size = (result['pattern_image'].shape[1], result['pattern_image'].shape[0])
mask = image.split()[3]
resized_source = image.resize(new_size)
resized_source_mask = mask.resize(new_size)
# rotated_resized_source = resized_source.rotate(-partial_print['print_angle_list'][i])
# rotated_resized_source_mask = resized_source_mask.rotate(-partial_print['print_angle_list'][i])
source_image_pil = Image.fromarray(cv2.cvtColor(print_background, cv2.COLOR_BGR2RGB))
source_image_pil_mask = Image.fromarray(cv2.cvtColor(mask_background, cv2.COLOR_BGR2RGB))
source_image_pil.paste(resized_source, (0, 0), resized_source)
source_image_pil_mask.paste(resized_source_mask, (0, 0), resized_source_mask)
print_background = cv2.cvtColor(np.array(source_image_pil), cv2.COLOR_RGBA2BGR)
mask_background = cv2.cvtColor(np.array(source_image_pil_mask), cv2.COLOR_RGBA2BGR)
ret, mask_background = cv2.threshold(mask_background, 124, 255, cv2.THRESH_BINARY)
print_mask = cv2.bitwise_and(result['mask'], cv2.cvtColor(mask_background, cv2.COLOR_BGR2GRAY))
img_fg = cv2.bitwise_or(print_background, print_background, mask=print_mask)
three_channel_image = cv2.merge([cv2.bitwise_not(print_mask), cv2.bitwise_not(print_mask), cv2.bitwise_not(print_mask)])
img_bg = cv2.bitwise_and(result['final_image'], three_channel_image)
result['final_image'] = cv2.add(img_bg, img_fg)
canvas = np.full_like(result['final_image'], 255)
temp_bg = np.expand_dims(cv2.bitwise_not(result['mask']), axis=2).repeat(3, axis=2)
tmp1 = (canvas * (temp_bg / 255)).astype(np.uint8)
temp_fg = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2)
tmp2 = (result['final_image'] * (temp_fg / 255)).astype(np.uint8)
result['single_image'] = cv2.add(tmp1, tmp2)
return result
@staticmethod
def stack_prin(print_background, pattern_image, rotate_image, start_y, y, start_x, x):
temp_print = np.zeros((pattern_image.shape[0], pattern_image.shape[1], 3), dtype=np.uint8)
temp_print[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = rotate_image
img2gray = cv2.cvtColor(temp_print, cv2.COLOR_BGR2GRAY)
ret, mask_ = cv2.threshold(img2gray, 1, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask_)
img1_bg = cv2.bitwise_and(print_background, print_background, mask=mask_inv)
img2_fg = cv2.bitwise_and(temp_print, temp_print, mask=mask_)
print_background = img1_bg + img2_fg
return print_background
def painting_collection(self, painting_dict, print_dict):
print_ = self.get_print(print_dict)
painting_dict['location'] = print_['location']
dim_max = max(painting_dict['dim_image_h'], painting_dict['dim_image_w'])
dim_pattern = (int(dim_max * print_['scale'] / 5), int(dim_max * print_['scale'] / 5))
gap = print_dict.get('gap', [[0, 0]])[0]
painting_dict['tile_print'], painting_dict['mask_inv_print'] = tile_image(pattern=print_['image'],
mask=print_['mask'],
dim=dim_pattern,
gap_x=gap[0],
gap_y=gap[1],
canvas_h=painting_dict['dim_image_h'],
canvas_w=painting_dict['dim_image_w'],
location=painting_dict['location'],
angle=int(print_.get('print_angle_list', [0])[0]))
return painting_dict
def tile_image(self, pattern, dim, scale, dim_image_h, dim_image_w, location, trigger=False):
tile = None
if not trigger:
tile = cv2.resize(pattern, dim, interpolation=cv2.INTER_AREA)
else:
resize_pattern = cv2.resize(pattern, dim, interpolation=cv2.INTER_AREA)
if len(pattern.shape) == 2:
tile = np.tile(resize_pattern, (int((5 + 1) / scale) + 4, int((5 + 1) / scale) + 4))
if len(pattern.shape) == 3:
tile = np.tile(resize_pattern, (int((5 + 1) / scale) + 4, int((5 + 1) / scale) + 4, 1))
tile = self.crop_image(tile, dim_image_h, dim_image_w, location, resize_pattern.shape)
return tile
def get_mask_inv(self, print_):
if print_[0][0][0] == 255 and print_[0][0][1] == 255 and print_[0][0][2] == 255:
bg_color = cv2.cvtColor(print_, cv2.COLOR_BGR2LAB)[0][0]
print_tile = cv2.cvtColor(print_, cv2.COLOR_BGR2LAB)
bg_l, bg_a, bg_b = bg_color[0], bg_color[1], bg_color[2]
bg_L_high, bg_L_low = self.get_low_high_lab(bg_l, L=True)
bg_a_high, bg_a_low = self.get_low_high_lab(bg_a)
bg_b_high, bg_b_low = self.get_low_high_lab(bg_b)
lower = np.array([bg_L_low, bg_a_low, bg_b_low])
upper = np.array([bg_L_high, bg_a_high, bg_b_high])
mask_inv = cv2.inRange(print_tile, lower, upper)
return mask_inv
else:
mask_inv = np.zeros(print_.shape[:2], dtype=np.uint8)
return mask_inv
@staticmethod
def printpaint(result, painting_dict, print_=False):
if print_:
print_mask = cv2.bitwise_and(result['mask'], cv2.bitwise_not(painting_dict['mask_inv_print']))
img_fg = cv2.bitwise_and(painting_dict['tile_print'], painting_dict['tile_print'], mask=print_mask)
else:
print_mask = result['mask']
img_fg = result['final_image']
# if print_ and not painting_dict['Trigger']:
# index_ = None
# try:
# index_ = len(painting_dict['location'])
# except:
# assert f'there must be parameter of location if choose IfSingle'
#
# for i in range(index_):
# start_h, start_w = int(painting_dict['location'][i][1]), int(painting_dict['location'][i][0])
#
# length_h = min(start_h + painting_dict['dim_print_h'], img_fg.shape[0])
# length_w = min(start_w + painting_dict['dim_print_w'], img_fg.shape[1])
#
# change_region = img_fg[start_h: length_h, start_w: length_w, :]
# # problem in change_mask
# change_mask = print_mask[start_h: length_h, start_w: length_w]
# # get real part into change mask
# _, change_mask = cv2.threshold(change_mask, 220, 255, cv2.THRESH_BINARY)
# cv2.bitwise_not(painting_dict['mask_inv_print'])
# img_fg[start_h:start_h + painting_dict['dim_print_h'], start_w:start_w + painting_dict['dim_print_w'], :] = change_region
clothes_mask_print = cv2.bitwise_not(print_mask)
img_bg = cv2.bitwise_and(result['pattern_image'], result['pattern_image'], mask=clothes_mask_print)
mask_mo = np.expand_dims(print_mask, axis=2).repeat(3, axis=2)
gray_mo = np.expand_dims(result['gray'], axis=2).repeat(3, axis=2)
img_fg = (img_fg * (mask_mo / 255) * (gray_mo / 255)).astype(np.uint8)
print_image = cv2.add(img_bg, img_fg)
return print_image
def get_print(self, print_dict):
if 'print_scale_list' not in print_dict.keys() or print_dict['print_scale_list'][0][0] < 0.3:
print_dict['scale'] = 0.3
else:
print_dict['scale'] = print_dict['print_scale_list'][0][0]
bucket_name = print_dict['print_path_list'][0].split("/", 1)[0]
object_name = print_dict['print_path_list'][0].split("/", 1)[1]
image = oss_get_image(oss_client=self.minio_client, bucket=bucket_name, object_name=object_name, data_type="PIL")
# 判断图片格式如果是RGBA 则贴在一张纯白图片上 防止透明转黑
if image.mode == "RGBA":
mask_pil = image.split()[3]
new_background = Image.new('RGB', image.size, (255, 255, 255))
new_background.paste(image, mask=image.split()[3])
image = new_background
else:
mask_pil = Image.new('L', image.size, 255) # L=灰度图255=纯白
print_dict['image'] = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
print_dict['mask'] = cv2.threshold(np.array(mask_pil), 127, 255, cv2.THRESH_BINARY)[1]
return print_dict
def crop_image(self, image, image_size_h, image_size_w, location, print_shape):
print_w = print_shape[1]
print_h = print_shape[0]
# 1.拿到偏移量后和resize后的print宽高取余 得到真正偏移量
# 偏移量增加2分之print.w 使坐标位于图中间 如果要位于左上角删除+ print_w // 2 即可
x_offset = print_w - int(location[0][1] % print_w) + print_w // 2
y_offset = print_h - int(location[0][0] % print_h) + print_h // 2
# y_offset = int(location[0][0])
# x_offset = int(location[0][1])
if len(image.shape) == 2:
image = image[x_offset: x_offset + image_size_h, y_offset: y_offset + image_size_w]
elif len(image.shape) == 3:
image = image[x_offset: x_offset + image_size_h, y_offset: y_offset + image_size_w, :]
return image
@staticmethod
def get_low_high_lab(Lab_value, L=False):
if L:
high = Lab_value + 30 if Lab_value + 30 < 255 else 255
low = Lab_value - 30 if Lab_value - 30 > 0 else 0
else:
high = Lab_value + 30 if Lab_value + 30 < 255 else 255
low = Lab_value - 30 if Lab_value - 30 > 0 else 0
return high, low
@staticmethod
def img_rotate(image, angel):
"""顺时针旋转图像任意角度
Args:
image (np.array): [原始图像]
angel (float): [逆时针旋转的角度]
Returns:
[array]: [旋转后的图像]
"""
h, w = image.shape[:2]
center = (w // 2, h // 2)
# if type(angel) is not int:
# angel = 0
M = cv2.getRotationMatrix2D(center, -angel, 1)
# 调整旋转后的图像长宽
rotated_h = int((w * np.abs(M[0, 1]) + (h * np.abs(M[0, 0]))))
rotated_w = int((h * np.abs(M[0, 1]) + (w * np.abs(M[0, 0]))))
M[0, 2] += (rotated_w - w) // 2
M[1, 2] += (rotated_h - h) // 2
# 旋转图像
rotated_img = cv2.warpAffine(image, M, (rotated_w, rotated_h))
return rotated_img, ((rotated_img.shape[1] - image.shape[1]) // 2, (rotated_img.shape[0] - image.shape[0]) // 2)
# return rotated_img, (0, 0)
@staticmethod
def rotate_crop_image(img, angle, crop):
"""
angle: 旋转的角度
crop: 是否需要进行裁剪,布尔向量
"""
if not isinstance(crop, bool):
raise ValueError("The 'crop' parameter must be a boolean.")
crop_image = lambda img, x0, y0, w, h: img[y0:y0 + h, x0:x0 + w]
h, w = img.shape[:2]
# 旋转角度的周期是360°
angle %= 360
# 计算仿射变换矩阵
M_rotation = cv2.getRotationMatrix2D((w / 2, h / 2), angle, 1)
# 得到旋转后的图像
img_rotated = cv2.warpAffine(img, M_rotation, (w, h))
# 如果需要去除黑边
if crop:
# 裁剪角度的等效周期是180°
angle_crop = angle % 180
if angle_crop > 90:
angle_crop = 180 - angle_crop
# 转化角度为弧度
theta = angle_crop * np.pi / 180
# 计算高宽比
hw_ratio = float(h) / float(w)
# 计算裁剪边长系数的分子项
tan_theta = np.tan(theta)
numerator = np.cos(theta) + np.sin(theta) * np.tan(theta)
# 计算分母中和高宽比相关的项
r = hw_ratio if h > w else 1 / hw_ratio
# 计算分母项
denominator = r * tan_theta + 1
# 最终的边长系数
crop_mult = numerator / denominator
# 得到裁剪区域
w_crop = int(crop_mult * w)
h_crop = int(crop_mult * h)
x0 = int((w - w_crop) / 2)
y0 = int((h - h_crop) / 2)
img_rotated = crop_image(img_rotated, x0, y0, w_crop, h_crop)
return img_rotated
def read_image(self, image_url):
image = oss_get_image(oss_client=self.minio_client, bucket=image_url.split("/", 1)[0], object_name=image_url.split("/", 1)[1], data_type="cv2")
if image.shape[2] == 4:
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA)
image = Image.fromarray(image_rgb)
image_mode = "RGBA"
else:
image_mode = "RGB"
return image, image_mode
@staticmethod
def resize_and_crop(img, target_width, target_height):
# 获取原始图像的尺寸
original_height, original_width = img.shape[:2]
# 计算目标尺寸的宽高比
target_ratio = target_width / target_height
# 计算原始图像的宽高比
original_ratio = original_width / original_height
# 调整尺寸
if original_ratio > target_ratio:
# 原始图像更宽按高度resize然后裁剪宽度
new_height = target_height
new_width = int(original_width * (target_height / original_height))
resized_img = cv2.resize(img, (new_width, new_height))
# 裁剪宽度
start_x = (new_width - target_width) // 2
cropped_img = resized_img[:, start_x:start_x + target_width]
else:
# 原始图像更高按宽度resize然后裁剪高度
new_width = target_width
new_height = int(original_height * (target_width / original_width))
resized_img = cv2.resize(img, (new_width, new_height))
# 裁剪高度
start_y = (new_height - target_height) // 2
cropped_img = resized_img[start_y:start_y + target_height, :]
return cropped_img
def tile_image(pattern, mask, dim, gap_x, gap_y, canvas_h, canvas_w, location, angle=0):
"""
按照指定的 X/Y 间距平铺印花,并支持旋转
【修改版】以被平铺图案的【中心】作为平铺基准点
:param location: [[center_y, center_x]] → 第一个图案中心的坐标
:param angle: 旋转角度 (度数, 逆时针)
"""
# 1. 确保输入是 RGBA
if pattern.shape[2] == 3:
pattern = cv2.cvtColor(pattern, cv2.COLOR_BGR2BGRA)
# 2. 缩放与旋转印花
resized_p = cv2.resize(pattern, dim, interpolation=cv2.INTER_AREA)
rotated_p = rotate_image(resized_p, angle)
p_h, p_w = rotated_p.shape[:2]
# 3. 创建透明单元格(图案放在单元格中心)
cell_h = p_h + gap_y
cell_w = p_w + gap_x
unit_cell = np.zeros((cell_h, cell_w, 4), dtype=np.uint8)
# 计算图案在单元格中的左上角位置(让图案居中)
start_y = (cell_h - p_h) // 2
start_x = (cell_w - p_w) // 2
unit_cell[start_y:start_y + p_h, start_x:start_x + p_w, :] = rotated_p
# 4. 执行平铺
tiles_y = (canvas_h // cell_h) + 3 # 多加一点余量更安全
tiles_x = (canvas_w // cell_w) + 3
full_tiled = np.tile(unit_cell, (tiles_y, tiles_x, 1))
# 5. 计算偏移(关键修改:以中心为基准)
center_y, center_x = location[0][0], location[0][1] # 第一个图案的中心位置
# 计算从哪个位置开始裁剪,才能让中心落在指定坐标
offset_y = int((center_y - (p_h // 2)) % cell_h)
offset_x = int((center_x - (p_w // 2)) % cell_w)
tiled_layer = full_tiled[offset_y: offset_y + canvas_h,
offset_x: offset_x + canvas_w]
# 6. 创建纯白色背景并合成(保持你原来的风格)
white_background = np.full((canvas_h, canvas_w, 3), 255, dtype=np.uint8)
tiled_bgr = tiled_layer[:, :, :3]
alpha_mask = tiled_layer[:, :, 3] / 255.0
alpha_mask = cv2.merge([alpha_mask, alpha_mask, alpha_mask])
tiled_print = (tiled_bgr * alpha_mask + white_background * (1 - alpha_mask)).astype(np.uint8)
# ====================== 处理 Mask ======================
# Mask 也同样居中处理
resized_mask = cv2.resize(mask, dim, interpolation=cv2.INTER_NEAREST)
rotated_mask = rotate_image(resized_mask, angle) # 注意mask也需要旋转
unit_mask = np.zeros((cell_h, cell_w), dtype=np.uint8)
unit_mask[start_y:start_y + p_h, start_x:start_x + p_w] = rotated_mask
full_mask_tiled = np.tile(unit_mask, (tiles_y, tiles_x))
tiled_mask = full_mask_tiled[offset_y: offset_y + canvas_h,
offset_x: offset_x + canvas_w]
return tiled_print, cv2.bitwise_not(tiled_mask)
def rotate_image(image, angle):
"""
旋转图片并保持完整内容(自动扩大画布)
"""
if angle == 0:
return image
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# 获取旋转矩阵
M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
# 计算旋转后新边界的 sine 和 cosine
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# 计算新的画布尺寸
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# 调整旋转矩阵以考虑平移
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# 执行旋转
return cv2.warpAffine(image, M, (nW, nH))
def crop_image(image, image_size_h, image_size_w, location, print_shape):
print_w = print_shape[1]
print_h = print_shape[0]
# 1.拿到偏移量后和resize后的print宽高取余 得到真正偏移量
# 偏移量增加2分之print.w 使坐标位于图中间 如果要位于左上角删除+ print_w // 2 即可
x_offset = print_w - int(location[0][1] % print_w) + print_w // 2
y_offset = print_h - int(location[0][0] % print_h) + print_h // 2
# y_offset = int(location[0][0])
# x_offset = int(location[0][1])
if len(image.shape) == 2:
image = image[x_offset: x_offset + image_size_h, y_offset: y_offset + image_size_w]
elif len(image.shape) == 3:
image = image[x_offset: x_offset + image_size_h, y_offset: y_offset + image_size_w, :]
return image