feat design 功能迁移

This commit is contained in:
zhouchengrong
2024-05-28 15:22:11 +08:00
parent ec2438f97f
commit a9dcd444c8
35 changed files with 3378 additions and 3 deletions

28
app/api/api_design.py Normal file
View File

@@ -0,0 +1,28 @@
import logging
import time
from fastapi import APIRouter
from app.schemas.design import DesignModel
from app.service.design.service import generate
router = APIRouter()
logger = logging.getLogger()
@router.post("/design")
def design(request_data: DesignModel):
try:
logger.info(f"design request item is : @@@@@@:{request_data}")
code = 200
message = "access"
start_time = time.time()
data = generate(request_data=request_data)
logger.info(f"design Run time is @@@@@@:{time.time() - start_time}")
except Exception as e:
code = 400
message = str(e)
data = str(e)
logger.warning(f"design Run Exception @@@@@@:{e}")
logger.info({"code": code, "message": message, "data": data})
return {"code": code, "message": message, "data": data}

View File

@@ -4,6 +4,7 @@ from app.api import api_test
from app.api import api_super_resolution
from app.api import api_generate_image
from app.api import api_attribute_retrieve
from app.api import api_design
router = APIRouter()
@@ -11,3 +12,4 @@ router.include_router(api_test.router, tags=["test"], prefix="/test")
router.include_router(api_super_resolution.router, tags=["super_resolution"], prefix="/api")
router.include_router(api_generate_image.router, tags=["generate_image"], prefix="/api")
router.include_router(api_attribute_retrieve.router, tags=["attribute_retrieve"], prefix="/api")
router.include_router(api_design.router, tags=['design'], prefix="/api")

View File

@@ -80,15 +80,17 @@ GI_SYS_IMAGE_URL = "aida-sys-image/generate_image/white_image.jpg"
# SEG service config
SEG_MODEL_URL = '10.1.1.240:10000'
SEGMENTATION = {
"new_model_name": "seg_knet",
"name": "seg_ocrnet_hr18",
"input": "seg_input__0",
"output": "seg_output__0",
}
# DESIGN config
DESIGN_MODEL_URL = '10.1.1.240:9000'
DESIGN_MODEL_URL = '10.1.1.240:10000'
AIDA_CLOTHING = "aida-clothing"
KEYPOINT_RESULT_TABLE_FIELD_SET = ('neckline_left', 'neckline_right', 'shoulder_left', 'shoulder_right', 'armpit_left', 'armpit_right',
'cuff_left_in', 'cuff_left_out', 'cuff_right_in', 'cuff_right_out', 'waistband_left', 'waistband_right')
# 优先级
PRIORITY_DICT = {
@@ -116,4 +118,3 @@ PRIORITY_DICT = {
'bag_back': -98,
'earring_back': -99,
}

50
app/schemas/design.py Normal file
View File

@@ -0,0 +1,50 @@
from pydantic import BaseModel
# class BodyPointModel(BaseModel):
# waistband_right: list[int]
# hand_point_right: list[int]
# waistband_left: list[int]
# hand_point_left: list[int]
# shoulder_left: list[int]
# shoulder_right: list[int]
#
#
# class BasicModel(BaseModel):
# body_point: BodyPointModel
# layer_order: bool
# scale_bag: float
# scale_earrings: float
# self_template: bool
# single_overall: str
# switch_category: str
# body_path: str
#
#
# class PrintModel(BaseModel):
# if_single: bool
# print_path_list: list[str]
#
#
# class ItemModel(BaseModel):
# color: str
# image_id: str
# offset: list[int]
# path: str
# print: PrintModel
# resize_scale: float
# type: str
#
#
# class CollocationModel(BaseModel):
# basic: BasicModel
# item: list[ItemModel]
#
#
# class DesignModel(BaseModel):
# object: list[CollocationModel]
# process_id: str
class DesignModel(BaseModel):
objects: list[dict]
process_id: str

View File

View File

@@ -0,0 +1,116 @@
import logging
import numpy as np
import cv2
from matplotlib import pyplot as plt
from PIL import Image
def show(img, win_name="temp"):
cv2.imshow(win_name, img)
cv2.waitKey(0)
def crop(img):
mid_point_h, mid_point_w = int(img.shape[0] / 2 + 30), int(img.shape[1] / 2)
img_roi = img[mid_point_h - 520: mid_point_h + 520, mid_point_w - 340: mid_point_w + 340]
return img_roi
class Layer(object):
def __init__(self):
self._layer = []
@property
def layer(self):
return self._layer
def insert(self, layer_instance):
if layer_instance['name'] == 'body':
self._body = layer_instance
self._layer.append(layer_instance)
def sort(self, priority):
self._layer.sort(key=lambda x: priority[x['name']])
# def merge(self, cfg):
# """
# opencv shape order (height, width, channel)
# image coordinate system:
# |------------->x (width)
# |
# |
# |
# y (height)
# Returns:
#
#
# """
# base_image = Image.new('RGBA', self._layer[1]['image'].size, (0, 0, 0, 0))
# for layer in self._layer:
# y, x = layer['position']
# base_image.paste(layer['image'], (x, y), layer['image'])
# # base_image.show()
#
# for x in self._layer:
# if np.all(x['mask'] == 0):
# continue
# # obtain region of interest about roi(roi) and item-image(roi_image, roi_mask)
# roi, roi_mask, roi_image, signal = self.get_roi(dst=dst, image=x)
# temp_bg = np.expand_dims(cv2.bitwise_not(roi_mask), axis=2).repeat(3, axis=2)
# tmp1 = (roi * (temp_bg / 255)).astype(np.uint8)
# temp_fg = np.expand_dims(roi_mask, axis=2).repeat(3, axis=2)
# tmp2 = (roi_image * (temp_fg / 255)).astype(np.uint8)
#
# roi[:] = cv2.add(tmp1, tmp2)
# # show(cv2.resize(dst, (int(dst.shape[1] * 0.5), int(dst.shape[0] * 0.5)), interpolation=cv2.INTER_AREA),
# # win_name=x.get('name'))
# # crop image and get the central part
# if cfg.get('basic')['self_template'] == False:
# dst_roi = crop(dst)
# else:
# dst_roi = dst
# return dst_roi, signal
#
# @staticmethod
# def get_roi(dst, image):
# signal = False
# dst_y, dst_x = dst.shape[:2]
# roi_height, roi_width = image['mask'].shape
# roi_y0, roi_x0 = image['position']
#
# if roi_y0 < 0:
# roi_yin = 0
# mask_yin = -roi_y0
# signal = True
# else:
# roi_yin = roi_y0
# mask_yin = 0
# if roi_y0 + roi_height > dst_y:
# roi_yout = dst_y
# mask_yout = dst_y - roi_y0
# signal = True
# else:
# roi_yout = roi_height + roi_y0
# mask_yout = roi_height
# # x part
# if roi_x0 < 0:
# roi_xin = 0
# mask_xin = -roi_x0
# signal = True
# else:
# roi_xin = roi_x0
# mask_xin = 0
# if roi_x0 + roi_width > dst_x:
# roi_xout = dst_x
# mask_xout = dst_x - roi_x0
# signal = True
# else:
# roi_xout = roi_width + roi_x0
# mask_xout = roi_width
#
# roi = dst[roi_yin: roi_yout, roi_xin: roi_xout]
# roi_mask = image['mask'][mask_yin: mask_yout, mask_xin: mask_xout]
# roi_image = image['image'][mask_yin: mask_yout, mask_xin: mask_xout]
# return roi, roi_mask, roi_image, signal

View File

@@ -0,0 +1,45 @@
class Priority(object):
"""Item layer priority levels.
"""
def __init__(self, item_list):
self._priority = dict(
earring_front=99,
bag_front=98,
hairstyle_front=97,
outwear_front=20,
bottoms_front=19,
dress_front=18,
blouse_front=17,
skirt_front=16,
trousers_front=15,
tops_front=14,
shoes_right=1,
shoes_left=1,
body=0,
tops_back=-14,
trousers_back=-15,
skirt_back=-16,
blouse_back=-17,
dress_back=-18,
bottoms_back=-19,
outwear_back=-20,
hairstyle_back=-97,
bag_back=-98,
earring_back=-99,
)
self.clothing_start_num = 10
if not isinstance(item_list, list):
raise ValueError('item_list must be a list!')
for cate in item_list:
cate = cate.lower()
if cate not in ('outwear', 'dress', 'blouse', 'skirt', 'trousers', 'tops', 'bottoms'):
raise ValueError(f'Item type error. Cannot recognize {cate}')
for i, cate in enumerate(item_list):
cate = cate.lower()
self._priority[f'{cate}_front'] = self.clothing_start_num - i
self._priority[f'{cate}_back'] = -(self.clothing_start_num - i)
@property
def priority(self):
return self._priority

View File

@@ -0,0 +1,101 @@
{
"objects": [
{
"basic": {
"body_point": {
"waistband_right": [
1081,
1318
],
"hand_point_right": [
1200,
1857
],
"waistband_left": [
639,
1315
],
"hand_point_left": [
493,
1808
],
"shoulder_left": [
556,
582
],
"shoulder_right": [
1130,
576
]
},
"layer_order": false,
"scale_bag": 0.7,
"scale_earrings": 0.16,
"self_template": true,
"single_overall": "overall",
"switch_category": ""
},
"items": [
{
"color": "151 78 78",
"icon": "none",
"image_id": 67315,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/trousers/0628000325.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Trousers"
},
{
"color": "151 78 78",
"icon": "none",
"image_id": 92912,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/blouse/0825001943.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Blouse"
},
{
"color": "151 78 78",
"icon": "none",
"image_id": 91430,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/outwear/0825000856.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Outwear"
},
{
"body_path": "aida-users/89/models/female/5d39394e-9809-43c2-80b8-4e96497b1974.png",
"image_id": 69331,
"offset": [
1,
1
],
"resize_scale": 1.0,
"type": "Body"
}
]
}
],
"process_id": "7296013643475027"
}

View File

@@ -0,0 +1,684 @@
{
"objects": [
{
"basic": {
"body_point_test": {
"waistband_right": [
1081,
1318
],
"hand_point_right": [
1200,
1857
],
"waistband_left": [
639,
1315
],
"hand_point_left": [
493,
1808
],
"shoulder_left": [
556,
582
],
"shoulder_right": [
1130,
576
]
},
"layer_order": false,
"scale_bag": 0.7,
"scale_earrings": 0.16,
"self_template": true,
"single_overall": "overall",
"switch_category": ""
},
"items": [
{
"color": "151 78 78",
"icon": "none",
"image_id": 67315,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/trousers/0628000325.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Trousers"
},
{
"color": "151 78 78",
"icon": "none",
"image_id": 92912,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/blouse/0825001943.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Blouse"
},
{
"color": "151 78 78",
"icon": "none",
"image_id": 91430,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/outwear/0825000856.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Outwear"
},
{
"body_path": "aida-users/89/models/female/5d39394e-9809-43c2-80b8-4e96497b1974.png",
"image_id": 69331,
"offset": [
1,
1
],
"resize_scale": 1.0,
"type": "Body"
}
]
}
,
{
"basic": {
"body_point_test": {
"waistband_right": [
1081,
1318
],
"hand_point_right": [
1200,
1857
],
"waistband_left": [
639,
1315
],
"hand_point_left": [
493,
1808
],
"shoulder_left": [
556,
582
],
"shoulder_right": [
1130,
576
]
},
"layer_order": false,
"scale_bag": 0.7,
"scale_earrings": 0.16,
"self_template": true,
"single_overall": "overall",
"switch_category": ""
},
"items": [
{
"color": "151 78 78",
"icon": "none",
"image_id": 92913,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/dress/826000033.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Dress"
},
{
"body_path": "aida-users/89/models/female/5d39394e-9809-43c2-80b8-4e96497b1974.png",
"image_id": 69331,
"offset": [
1,
1
],
"resize_scale": 1.0,
"type": "Body"
}
]
}
,
{
"basic": {
"body_point_test": {
"waistband_right": [
1081,
1318
],
"hand_point_right": [
1200,
1857
],
"waistband_left": [
639,
1315
],
"hand_point_left": [
493,
1808
],
"shoulder_left": [
556,
582
],
"shoulder_right": [
1130,
576
]
},
"layer_order": false,
"scale_bag": 0.7,
"scale_earrings": 0.16,
"self_template": true,
"single_overall": "overall",
"switch_category": ""
},
"items": [
{
"color": "151 78 78",
"icon": "none",
"image_id": 92914,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/skirt/0902001788.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Skirt"
},
{
"color": "151 78 78",
"icon": "none",
"image_id": 92915,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/blouse/0902003817.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Blouse"
},
{
"body_path": "aida-users/89/models/female/5d39394e-9809-43c2-80b8-4e96497b1974.png",
"image_id": 69331,
"offset": [
1,
1
],
"resize_scale": 1.0,
"type": "Body"
}
]
}
,
{
"basic": {
"body_point_test": {
"waistband_right": [
1081,
1318
],
"hand_point_right": [
1200,
1857
],
"waistband_left": [
639,
1315
],
"hand_point_left": [
493,
1808
],
"shoulder_left": [
556,
582
],
"shoulder_right": [
1130,
576
]
},
"layer_order": false,
"scale_bag": 0.7,
"scale_earrings": 0.16,
"self_template": true,
"single_overall": "overall",
"switch_category": ""
},
"items": [
{
"color": "151 78 78",
"icon": "none",
"image_id": 92916,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/skirt/skirt_p4_838.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Skirt"
},
{
"color": "151 78 78",
"icon": "none",
"image_id": 84210,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/blouse/0916000703.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Blouse"
},
{
"body_path": "aida-users/89/models/female/5d39394e-9809-43c2-80b8-4e96497b1974.png",
"image_id": 69331,
"offset": [
1,
1
],
"resize_scale": 1.0,
"type": "Body"
}
]
}
,
{
"basic": {
"body_point_test": {
"waistband_right": [
1081,
1318
],
"hand_point_right": [
1200,
1857
],
"waistband_left": [
639,
1315
],
"hand_point_left": [
493,
1808
],
"shoulder_left": [
556,
582
],
"shoulder_right": [
1130,
576
]
},
"layer_order": false,
"scale_bag": 0.7,
"scale_earrings": 0.16,
"self_template": true,
"single_overall": "overall",
"switch_category": ""
},
"items": [
{
"color": "151 78 78",
"icon": "none",
"image_id": 62041,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/outwear/0902000232.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Outwear"
},
{
"color": "151 78 78",
"icon": "none",
"image_id": 67039,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/blouse/0902002591.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Blouse"
},
{
"color": "151 78 78",
"icon": "none",
"image_id": 78016,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/trousers/trousers_p4_302.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Trousers"
},
{
"body_path": "aida-users/89/models/female/5d39394e-9809-43c2-80b8-4e96497b1974.png",
"image_id": 69331,
"offset": [
1,
1
],
"resize_scale": 1.0,
"type": "Body"
}
]
}
,
{
"basic": {
"body_point_test": {
"waistband_right": [
1081,
1318
],
"hand_point_right": [
1200,
1857
],
"waistband_left": [
639,
1315
],
"hand_point_left": [
493,
1808
],
"shoulder_left": [
556,
582
],
"shoulder_right": [
1130,
576
]
},
"layer_order": false,
"scale_bag": 0.7,
"scale_earrings": 0.16,
"self_template": true,
"single_overall": "overall",
"switch_category": ""
},
"items": [
{
"color": "151 78 78",
"icon": "none",
"image_id": 92917,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/trousers/0902001403.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Trousers"
},
{
"color": "151 78 78",
"icon": "none",
"image_id": 92306,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/blouse/0902001766.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Blouse"
},
{
"body_path": "aida-users/89/models/female/5d39394e-9809-43c2-80b8-4e96497b1974.png",
"image_id": 69331,
"offset": [
1,
1
],
"resize_scale": 1.0,
"type": "Body"
}
]
}
,
{
"basic": {
"body_point_test": {
"waistband_right": [
1081,
1318
],
"hand_point_right": [
1200,
1857
],
"waistband_left": [
639,
1315
],
"hand_point_left": [
493,
1808
],
"shoulder_left": [
556,
582
],
"shoulder_right": [
1130,
576
]
},
"layer_order": false,
"scale_bag": 0.7,
"scale_earrings": 0.16,
"self_template": true,
"single_overall": "overall",
"switch_category": ""
},
"items": [
{
"color": "151 78 78",
"icon": "none",
"image_id": 86564,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/blouse/0916000038.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Blouse"
},
{
"color": "151 78 78",
"icon": "none",
"image_id": 92918,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/trousers/0628001561.jpeg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Trousers"
},
{
"color": "151 78 78",
"icon": "none",
"image_id": 92919,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/outwear/outwear_p3186.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Outwear"
},
{
"body_path": "aida-users/89/models/female/5d39394e-9809-43c2-80b8-4e96497b1974.png",
"image_id": 69331,
"offset": [
1,
1
],
"resize_scale": 1.0,
"type": "Body"
}
]
}
,
{
"basic": {
"body_point_test": {
"waistband_right": [
1081,
1318
],
"hand_point_right": [
1200,
1857
],
"waistband_left": [
639,
1315
],
"hand_point_left": [
493,
1808
],
"shoulder_left": [
556,
582
],
"shoulder_right": [
1130,
576
]
},
"layer_order": false,
"scale_bag": 0.7,
"scale_earrings": 0.16,
"self_template": true,
"single_overall": "overall",
"switch_category": ""
},
"items": [
{
"color": "151 78 78",
"icon": "none",
"image_id": 67009,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/blouse/0902002051.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Blouse"
},
{
"color": "151 78 78",
"icon": "none",
"image_id": 85028,
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/skirt/903000142.jpg",
"print": {
"IfSingle": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Skirt"
},
{
"body_path": "aida-users/89/models/female/5d39394e-9809-43c2-80b8-4e96497b1974.png",
"image_id": 69331,
"offset": [
1,
1
],
"resize_scale": 1.0,
"type": "Body"
}
]
}
],
"process_id": "7296013643475027"
}

View File

@@ -0,0 +1,69 @@
{
"basic": {
"body_point": {
"waistband_right": [
1081,
1318
],
"hand_point_right": [
1200,
1857
],
"waistband_left": [
639,
1315
],
"hand_point_left": [
493,
1808
],
"shoulder_left": [
556,
582
],
"shoulder_right": [
1130,
576
]
},
"layer_order": false,
"scale_bag": 0.7,
"scale_earrings": 0.16,
"self_template": true,
"single_overall": "single",
"switch_category": "Trousers",
"body_path": "aida-users/89/models/female/5d39394e-9809-43c2-80b8-4e96497b1974.png"
},
"item": [
{
"color": "151 78 78",
"image_id": "67315",
"offset": [
1,
1
],
"path": "aida-sys-image/images/female/trousers/0628000325.jpg",
"print": {
"if_single": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Trousers"
},
{
"color": "151 78 78",
"path": "aida-users/89/models/female/5d39394e-9809-43c2-80b8-4e96497b1974.png",
"image_id": 69331,
"offset": [
1,
1
],
"print": {
"if_single": false,
"print_path_list": []
},
"resize_scale": 1.0,
"type": "Body"
}
]
}

View File

@@ -0,0 +1,16 @@
from .builder import ITEMS, build_item
from .clothing import Clothing # 4.0 sec
from .body import Body
from .top import Top, Blouse, Outwear, Dress
from .bottom import Bottom, Trousers, Skirt
from .shoes import Shoes
from .bag import Bag
from .accessories import Hairstyle, Earring
__all__ = [
'ITEMS', 'build_item',
'Clothing', 'Body',
'Top', 'Blouse', 'Outwear', 'Dress',
'Bottom', 'Trousers', 'Skirt',
'Shoes', 'Bag', 'Hairstyle', 'Earring'
]

View File

@@ -0,0 +1,59 @@
from .builder import ITEMS
from .clothing import Clothing
@ITEMS.register_module()
class Hairstyle(Clothing):
def __init__(self, **kwargs):
pipeline = [
dict(type='LoadImageFromFile', path=kwargs['path']),
dict(type='KeypointDetection'),
dict(type='ContourDetection'),
dict(type='Painting'),
dict(type='Scaling'),
dict(type='Split'),
# dict(type='ImageShow', key=['image', 'mask', 'pattern_image']),
]
kwargs.update(pipeline=pipeline)
super(Hairstyle, self).__init__(**kwargs)
@staticmethod
def calculate_start_point(keypoint_type, scale, clothes_point, body_point):
"""
align up
Args:
keypoint_type: string, "head_point"
scale: float
clothes_point: dict{'left': [x1, y1, z1], 'right': [x2, y2, z2]}
body_point: dict, containing keypoint data of body figure
Returns:
start_point: tuple (x', y')
x' = y_body - y1 * scale
y' = x_body - x1 * scale
"""
side_indicator = f'{keypoint_type}_up'
# clothes_point = {k: tuple(map(lambda x: int(scale * x), v[0: 2])) for k, v in clothes_point.items()}
# logging.info(clothes_point[side_indicator])
start_point = (
int(body_point[side_indicator][1] - int(clothes_point[side_indicator].split("_")[1] * scale)),
int(body_point[side_indicator][0] - int(clothes_point[side_indicator].split("_")[0] * scale))
)
return start_point
@ITEMS.register_module()
class Earring(Clothing):
def __init__(self, **kwargs):
pipeline = [
dict(type='LoadImageFromFile', path=kwargs['path']),
dict(type='KeypointDetection'),
dict(type='ContourDetection'),
dict(type='Painting'),
dict(type='Scaling'),
dict(type='Split'),
# dict(type='ImageShow', key=['image', 'mask', 'pattern_image']),
]
kwargs.update(pipeline=pipeline)
super(Earring, self).__init__(**kwargs)

View File

@@ -0,0 +1,44 @@
from .builder import ITEMS
from .clothing import Clothing
import random
@ITEMS.register_module()
class Bag(Clothing):
def __init__(self, **kwargs):
pipeline = [
dict(type='LoadImageFromFile', path=kwargs['path'], color=kwargs['color']),
dict(type='KeypointDetection'),
dict(type='ContourDetection'),
dict(type='Painting'),
dict(type='Scaling'),
dict(type='Split'),
# dict(type='ImageShow', key=['image', 'mask', 'pattern_image']),
]
kwargs.update(pipeline=pipeline)
super(Bag, self).__init__(**kwargs)
@staticmethod
def calculate_start_point(keypoint_type, scale, clothes_point, body_point):
"""
align left
Args:
keypoint_type: string, "hand_point"
scale: float
clothes_point: dict{'left': [x1, y1, z1], 'right': [x2, y2, z2]}
body_point: dict, containing keypoint data of body figure
Returns:
start_point: tuple (y', x')
x' = y_body - y1 * scale
y' = x_body - x1 * scale
"""
location = random.choice(seq=['left', 'right'])
if location == 'left':
side_indicator = f'{keypoint_type}_left'
else:
side_indicator = f'{keypoint_type}_right'
# clothes_point = {k: tuple(map(lambda x: int(scale * x), v[0: 2])) for k, v in clothes_point.items()}
start_point = (body_point[side_indicator][1] - int(int(clothes_point[keypoint_type].split("_")[1]) * scale),
body_point[side_indicator][0] - int(int(clothes_point[keypoint_type].split("_")[0]) * scale))
return start_point

View File

@@ -0,0 +1,35 @@
import cv2
from .builder import ITEMS
from .pipelines import Compose
@ITEMS.register_module()
class Body(object):
def __init__(self, **kwargs):
pipeline = [
dict(type='LoadBodyImageFromFile', body_path=kwargs['body_path']),
# dict(type='ImageShow', key=['body_image', "body_mask"])
]
self.pipeline = Compose(pipeline)
self.result = dict()
def process(self):
self.pipeline(self.result)
pass
def organize(self, layer):
body_layer = dict(priority=0,
name=type(self).__name__.lower(),
image=self.result['body_image'],
image_url=self.result['image_url'],
mask_image=None,
mask_url=None,
sacle=1,
# mask=self.result['body_mask'],
position=(0, 0))
layer.insert(body_layer)
@staticmethod
def show(img):
cv2.imshow('', img)
cv2.waitKey(0)

View File

@@ -0,0 +1,38 @@
from .builder import ITEMS
from .clothing import Clothing
@ITEMS.register_module()
class Bottom(Clothing):
def __init__(self, pipeline, **kwargs):
if pipeline is None:
pipeline = [
dict(type='LoadImageFromFile', path=kwargs['path'], color=kwargs['color'], print_dict=kwargs['print']),
dict(type='KeypointDetection'),
dict(type='ContourDetection'),
dict(type='Painting', painting_flag=True),
dict(type='PrintPainting', print_flag=True),
dict(type='Scaling'),
dict(type='Split'),
# dict(type='ImageShow', key=['image', 'mask', 'pattern_image', 'print_image']),
]
kwargs.update(pipeline=pipeline)
super(Bottom, self).__init__(**kwargs)
@ITEMS.register_module()
class Trousers(Bottom):
def __init__(self, pipeline=None, **kwargs):
super(Trousers, self).__init__(pipeline, **kwargs)
@ITEMS.register_module()
class Skirt(Bottom):
def __init__(self, pipeline=None, **kwargs):
super(Skirt, self).__init__(pipeline, **kwargs)
@ITEMS.register_module()
class Bottoms(Bottom):
def __init__(self, pipeline=None, **kwargs):
super(Bottoms, self).__init__(pipeline, **kwargs)

View File

@@ -0,0 +1,9 @@
from mmcv.utils import Registry, build_from_cfg
ITEMS = Registry('item')
PIPELINES = Registry('pipeline')
def build_item(cfg, default_args=None):
item = build_from_cfg(cfg, ITEMS, default_args)
return item

View File

@@ -0,0 +1,96 @@
import cv2
from app.core.config import PRIORITY_DICT
from .builder import ITEMS
from .pipelines import Compose
@ITEMS.register_module()
class Clothing(object):
def __init__(self, pipeline, **kwargs):
self.pipeline = Compose(pipeline)
self.result = dict(name=type(self).__name__.lower(), **kwargs)
def process(self):
self.pipeline(self.result)
def apply_scale(self, img):
scale = self.result['scale']
height, width = img.shape[0: 2]
if len(img.shape) > 2:
height, width = img.shape[0: 2]
scaled_img = cv2.resize(img, (int(width * scale), int(height * scale)), interpolation=cv2.INTER_AREA)
return scaled_img
def organize(self, layer):
start_point = self.calculate_start_point(self.result['keypoint'], self.result['scale'], self.result['clothes_keypoint'], self.result['body_point_test'], self.result["offset"], self.result["resize_scale"])
front_layer = dict(priority=self.result.get("priority", None) if self.result.get("layer_order", False) else PRIORITY_DICT.get(f'{type(self).__name__.lower()}_front', None),
name=f'{type(self).__name__.lower()}_front',
image=self.result["front_image"],
# mask_image=self.result['front_mask_image'],
image_url=self.result['front_image_url'],
mask_url=self.result['front_mask_url'],
sacle=self.result['scale'],
clothes_keypoint=self.result['clothes_keypoint'],
position=start_point,
resize_scale=self.result["resize_scale"],
mask=cv2.resize(self.result['mask'], self.result["front_image"].size),
gradient_string=self.result['gradient_string'] if 'gradient_string' in self.result.keys() else ""
)
layer.insert(front_layer)
back_layer = dict(priority=-self.result.get("priority", 0) if self.result.get("layer_order", False) else PRIORITY_DICT.get(f'{type(self).__name__.lower()}_back', None),
name=f'{type(self).__name__.lower()}_back',
image=self.result["back_image"],
# mask_image=self.result['back_mask_image'],
image_url=self.result['back_image_url'],
mask_url=self.result['back_mask_url'],
sacle=self.result['scale'],
clothes_keypoint=self.result['clothes_keypoint'],
position=start_point,
resize_scale=self.result["resize_scale"],
mask=cv2.resize(self.result['mask'], self.result["front_image"].size),
gradient_string=self.result['gradient_string'] if 'gradient_string' in self.result.keys() else ""
)
layer.insert(back_layer)
@staticmethod
def calculate_start_point(keypoint_type, scale, clothes_point, body_point, offset, resize_scale):
"""
Align left
Args:
keypoint_type: string, "waistband" | "shoulder" | "ear_point"
scale: float
clothes_point: dict{'left': [x1, y1, z1], 'right': [x2, y2, z2]}
body_point: dict, containing keypoint data of body figure
Returns:
start_point: tuple (x', y')
x' = y_body - y1 * scale + offset
y' = x_body - x1 * scale + offset
"""
side_indicator = f'{keypoint_type}_left'
# if keypoint_type == "ear_point":
# start_point = (body_point[side_indicator][1] - int(int(clothes_point[side_indicator].split("_")[1]) * scale),
# body_point[side_indicator][0] - int(int(clothes_point[side_indicator].split("_")[0]) * scale))
# else:
# start_point = (
# int(body_point[side_indicator][1] + offset[1] - int(clothes_point[side_indicator].split("_")[0]) * scale), # y
# int(body_point[side_indicator][0] + offset[0] - int(clothes_point[side_indicator].split("_")[1]) * scale) # x
# )
# milvus_DB_keypoint_cache:
start_point = (
int(body_point[side_indicator][1] + offset[1] - int(clothes_point[side_indicator][0]) * scale), # y
int(body_point[side_indicator][0] + offset[0] - int(clothes_point[side_indicator][1]) * scale) # x
)
# start_point = (
# int(body_point[side_indicator][1] + offset[1] - int(clothes_point[side_indicator].split("_")[0]) * scale), # y
# int(body_point[side_indicator][0] + offset[0] - int(clothes_point[side_indicator].split("_")[1]) * scale) # x
# )
return start_point

View File

@@ -0,0 +1,19 @@
from .compose import Compose
from .loading import LoadImageFromFile, LoadBodyImageFromFile, ImageShow
from .keypoints import KeypointDetection
from .segmentation import Segmentation
from .painting import Painting, PrintPainting
from .scale import Scaling
from .contour_detection import ContourDetection
from .split import Split
__all__ = [
'Compose',
'LoadImageFromFile', 'LoadBodyImageFromFile', 'ImageShow',
'KeypointDetection',
'Segmentation',
'Painting', 'PrintPainting',
'Scaling',
'ContourDetection',
'split',
]

View File

@@ -0,0 +1,36 @@
import collections
from mmcv.utils import build_from_cfg
from ..builder import PIPELINES
@PIPELINES.register_module()
class Compose(object):
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
"""Call function to apply transforms sequentially.
Args:
data (dict): A result dict contains the data to transform.
Returns:
dict: Transformed data.
"""
for t in self.transforms:
data = t(data)
if data is None:
return None
return data

View File

@@ -0,0 +1,59 @@
import logging
from ..builder import PIPELINES
import cv2
import numpy as np
@PIPELINES.register_module()
class ContourDetection(object):
def __init__(self):
# logging.info("ContourDetection run ")
pass
#@ RunTime
def __call__(self, result):
# shoe diff
if result['name'] == 'shoes':
Contour = self.get_contours(result['image'])
Mask = np.zeros(result['image'].shape[:2], np.uint8)
for i in range(2):
Max_contour = Contour[i]
Epsilon = 0.001 * cv2.arcLength(Max_contour, True)
Approx = cv2.approxPolyDP(Max_contour, Epsilon, True)
cv2.drawContours(Mask, [Approx], -1, 255, -1)
if result['pre_mask'] is None:
result['mask'] = Mask
else:
result['mask'] = cv2.bitwise_and(Mask, result['pre_mask'])
else:
Contour = self.get_contours(result['image'])
Mask = np.zeros(result['image'].shape[:2], np.uint8)
if len(Contour):
Max_contour = Contour[0]
Epsilon = 0.001 * cv2.arcLength(Max_contour, True)
Approx = cv2.approxPolyDP(Max_contour, Epsilon, True)
cv2.drawContours(Mask, [Approx], -1, 255, -1)
else:
Mask = np.ones(result['image'].shape[:2], np.uint8) * 255
# TODO 修复部分图片出现透明的情况 下版本上线
# img2gray = cv2.cvtColor(result['image'], cv2.COLOR_BGR2GRAY)
# ret, Mask = cv2.threshold(img2gray, 126, 255, cv2.THRESH_BINARY)
# Mask = cv2.bitwise_not(Mask)
if result['pre_mask'] is None:
result['mask'] = Mask
else:
result['mask'] = cv2.bitwise_and(Mask, result['pre_mask'])
return result
@staticmethod
def get_contours(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
Edge = cv2.Canny(gray, 10, 150)
kernel = np.ones((5, 5), np.uint8)
Edge = cv2.dilate(Edge, kernel=kernel, iterations=1)
Edge = cv2.erode(Edge, kernel=kernel, iterations=1)
Contour, _ = cv2.findContours(Edge, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
Contour = sorted(Contour, key=cv2.contourArea, reverse=True)
return Contour

View File

@@ -0,0 +1,148 @@
import logging
import time
import numpy as np
from pymilvus import MilvusClient
from app.core.config import *
from ..builder import PIPELINES
from ...utils.design_ensemble import get_keypoint_result
@PIPELINES.register_module()
class KeypointDetection(object):
"""
path here: abstract path
"""
def __init__(self):
self.client = MilvusClient(
uri="http://10.1.1.240:19530",
token="root:Milvus",
db_name=MILVUS_ALIAS
)
def __del__(self):
# start_time = time.time()
self.client.close()
# print(f"client close time : {time.time() - start_time}")
# @ RunTime
def __call__(self, result):
# logging.info("KeypointDetection run ")
if result['name'] in ['blouse', 'skirt', 'dress', 'outwear', 'trousers', 'tops', 'bottoms']: # 查询是否有数据 且类别相同 相同则直接读 不同则推理后更新
# result['clothes_keypoint'] = self.infer_keypoint_result(result)
site = 'up' if result['name'] in ['blouse', 'outwear', 'dress', 'tops'] else 'down'
# keypoint_cache = search_keypoint_cache(result["image_id"], site)
# 取消向量查询 直接过模型推理
keypoint_cache = self.keypoint_cache(result, site)
# keypoint_cache = False
if keypoint_cache is False:
keypoint_infer_result, site = self.infer_keypoint_result(result)
result['clothes_keypoint'] = self.save_keypoint_cache(result["image_id"], keypoint_infer_result, site)
else:
result['clothes_keypoint'] = keypoint_cache
return result
@staticmethod
def infer_keypoint_result(result):
site = 'up' if result['name'] in ['blouse', 'outwear', 'dress', 'tops'] else 'down'
start_time = time.time()
keypoint_infer_result = get_keypoint_result(result["image"], site) # 推理结果
# logging.info(f"infer keypoint time : {time.time() - start_time}")
return keypoint_infer_result, site
@staticmethod
# @ RunTime
def save_keypoint_cache(keypoint_id, cache, site, KEYPOINT_RESULT_TABLE_FIELD_SET=None):
if site == "down":
zeros = np.zeros(20, dtype=int)
result = np.concatenate([zeros, cache.flatten()])
else:
zeros = np.zeros(4, dtype=int)
result = np.concatenate([cache.flatten(), zeros])
# 取消向量保存 直接拿结果
data = [
{"keypoint_id": keypoint_id,
"keypoint_site": site,
"keypoint_vector": result.tolist()
}
]
client = MilvusClient(
uri="http://10.1.1.240:19530",
token="root:Milvus",
db_name=MILVUS_ALIAS
)
try:
start_time = time.time()
res = client.upsert(
collection_name=MILVUS_TABLE_KEYPOINT,
data=data,
)
# logging.info(f"save keypoint time : {time.time() - start_time}")
return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, result.reshape(12, 2).astype(int).tolist()))
except Exception as e:
logging.info(f"save keypoint cache milvus error : {e}")
return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, result.reshape(12, 2).astype(int).tolist()))
finally:
client.close()
@staticmethod
def update_keypoint_cache(keypoint_id, infer_result, search_result, site):
if site == "up":
# 需要的是up 即推理出来的是up 那么查询的就是down
result = np.concatenate([infer_result.flatten(), search_result[-4:]])
else:
# 需要的是down 即推理出来的是down 那么查询的就是up
result = np.concatenate([search_result[:20], infer_result.flatten()])
data = [
{"keypoint_id": keypoint_id,
"keypoint_site": "all",
"keypoint_vector": result.tolist()
}
]
client = MilvusClient(
uri="http://10.1.1.240:19530",
token="root:Milvus",
db_name=MILVUS_ALIAS
)
try:
# connections.connect(alias=MILVUS_ALIAS, host=MILVUS_DB_HOST, port=MILVUS_PORT)
start_time = time.time()
# collection = Collection(MILVUS_TABLE_KEYPOINT) # Get an existing collection.
# mr = collection.upsert(data)
client.upsert(
collection_name=MILVUS_TABLE_KEYPOINT,
data=data
)
# logging.info(f"save keypoint time : {time.time() - start_time}")
return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, result.reshape(12, 2).astype(int).tolist()))
except Exception as e:
logging.info(f"save keypoint cache milvus error : {e}")
return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, result.reshape(12, 2).astype(int).tolist()))
# @ RunTime
def keypoint_cache(self, result, site):
try:
keypoint_id = result['image_id']
res = self.client.query(
collection_name=MILVUS_TABLE_KEYPOINT,
# ids=[keypoint_id],
filter=f"keypoint_id == {keypoint_id}",
output_fields=['keypoint_vector', 'keypoint_site']
)
if len(res) == 0:
# 没有结果 直接推理拿结果 并保存
keypoint_infer_result, site = self.infer_keypoint_result(result)
return self.save_keypoint_cache(result['image_id'], keypoint_infer_result, site)
elif res[0]["keypoint_site"] == "all" or res[0]["keypoint_site"] == site:
# 需要的类型和查询的类型一致或者查询的类型为all 则直接返回查询的结果
return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, np.array(res[0]['keypoint_vector']).astype(int).reshape(12, 2).tolist()))
elif res[0]["keypoint_site"] != site:
# 需要的类型和查询到的不一致则更新类型为all
keypoint_infer_result, site = self.infer_keypoint_result(result)
return self.update_keypoint_cache(result["image_id"], keypoint_infer_result, res[0]['keypoint_vector'], site)
except Exception as e:
logging.info(f"search keypoint cache milvus error {e}")
return False

View File

@@ -0,0 +1,143 @@
import io
import logging
import time
import cv2
import numpy as np
from PIL import Image
from minio import Minio
from app.core.config import *
from ..builder import PIPELINES
@PIPELINES.register_module()
class LoadImageFromFile(object):
def __init__(self, path, color=None, print_dict=None):
self.path = path
self.color = color
self.print_dict = print_dict
self.minio_client = Minio(
f"{MINIO_URL}",
access_key=MINIO_ACCESS,
secret_key=MINIO_SECRET,
secure=MINIO_SECURE)
def __call__(self, result):
result['image'], result['pre_mask'] = self.read_image(self.path)
result['gray'] = cv2.cvtColor(result['image'], cv2.COLOR_BGR2GRAY)
result['keypoint'] = self.get_keypoint(result['name'])
result['path'] = self.path
result['img_shape'] = result['image'].shape
result['ori_shape'] = result['image'].shape
result['color'] = self.color if self.color is not None else None
result['print_dict'] = self.print_dict
return result
@staticmethod
def get_keypoint(name):
if name == 'blouse' or name == 'outwear' or name == 'dress' or name == 'tops':
keypoint = 'shoulder'
elif name == 'trousers' or name == 'skirt' or name == 'bottoms':
keypoint = 'waistband'
elif name == 'bag':
keypoint = 'hand_point'
elif name == 'shoes':
keypoint = 'toe'
elif name == 'hairstyle':
keypoint = 'head_point'
elif name == 'earring':
keypoint = 'ear_point'
else:
raise KeyError(f"{name} does not belong to item category list: blouse, outwear, dress, trousers, skirt, "
f"bag, shoes, hairstyle, earring.")
return keypoint
def read_image(self, image_path):
image_mask = None
file = self.minio_client.get_object(image_path.split("/", 1)[0], image_path.split("/", 1)[1]).data
image = cv2.imdecode(np.frombuffer(file, np.uint8), 1)
if len(image.shape) == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
if image.shape[2] == 4: # 如果是四通道 mask
image_mask = image[:, :, 3]
image = image[:, :, :3]
return image, image_mask
@PIPELINES.register_module()
class LoadBodyImageFromFile(object):
def __init__(self, body_path):
self.body_path = body_path
self.minioClient = Minio(
f"{MINIO_URL}",
access_key=MINIO_ACCESS,
secret_key=MINIO_SECRET,
secure=MINIO_SECURE)
# response = self.minioClient.get_object("aida-mannequins", "model_1693218345.2714431.png")
# @ RunTime
def __call__(self, result):
result["image_url"] = result['body_path'] = self.body_path
result["name"] = "mannequin"
if not result['image_url'].lower().endswith(".png"):
logging.info(1)
bucket = self.body_path.split("/", 1)[0]
object_name = self.body_path.split("/", 1)[1]
new_object_name = f'{object_name[:object_name.rfind(".")]}.png'
image = self.minioClient.get_object(bucket, object_name)
image = Image.open(io.BytesIO(image.data))
image = image.convert("RGBA")
data = image.getdata()
#
new_data = []
for item in data:
if item[0] >= 230 and item[1] >= 230 and item[2] >= 230:
new_data.append((255, 255, 255, 0))
else:
new_data.append(item)
image.putdata(new_data)
image_data = io.BytesIO()
image.save(image_data, format='PNG')
image_data.seek(0)
image_bytes = image_data.read()
image_path = f"{bucket}/{self.minioClient.put_object(bucket, new_object_name, io.BytesIO(image_bytes), len(image_bytes), content_type='image/png').object_name}"
self.body_path = image_path
result["image_url"] = result['body_path'] = self.body_path
response = self.minioClient.get_object(self.body_path.split("/", 1)[0], self.body_path.split("/", 1)[1])
# put_image_time = time.time()
result['body_image'] = Image.open(io.BytesIO(response.read()))
# logging.info(f"Image.open time is : {time.time() - put_image_time}")
return result
@PIPELINES.register_module()
class ImageShow(object):
def __init__(self, key):
self.key = key
# @ RunTime
def __call__(self, result):
import matplotlib.pyplot as plt
if isinstance(self.key, list):
for key in self.key:
plt.imshow(result[key])
plt.title(key)
plt.show()
elif isinstance(self.key, str):
img = self._resize_img(result[self.key])
cv2.imshow(self.key, img)
cv2.waitKey(0)
else:
raise TypeError(f'key should be string but got type {type(self.key)}.')
return result
@staticmethod
def _resize_img(img):
shape = img.shape
if shape[0] > 400 or shape[1] > 400:
ratio = min(400 / shape[0], 400 / shape[1])
img = cv2.resize(img, (int(ratio * shape[1]), int(ratio * shape[0])))
return img

View File

@@ -0,0 +1,498 @@
import random
from io import BytesIO
import boto3
import cv2
import numpy as np
from PIL import Image
from ..builder import PIPELINES
# minio_client = Minio(
# f"{MINIO_IP}:{MINIO_PORT}",
# access_key=MINIO_ACCESS,
# secret_key=MINIO_SECRET,
# secure=MINIO_SECURE)
s3 = boto3.client(
's3',
aws_access_key_id="AKIAVD3OJIMF6UJFLSHZ",
aws_secret_access_key="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",
region_name="ap-east-1"
)
@PIPELINES.register_module()
class Painting(object):
def __init__(self, painting_flag=True):
self.painting_flag = painting_flag
# @ RunTime
def __call__(self, result):
if result['name'] not in ['hairstyle', 'earring'] and self.painting_flag and result['color'] != 'none':
dim_image_h, dim_image_w = result['image'].shape[0:2]
if "gradient" in result.keys() and result['gradient'] != "":
bucket_name = result['gradient'].split('/')[0]
object_name = result['gradient'][result['gradient'].find('/') + 1:]
pattern = self.get_gradient(bucket_name=bucket_name, object_name=object_name)
resize_pattern = cv2.resize(pattern, (dim_image_w, dim_image_h), interpolation=cv2.INTER_AREA)
else:
pattern = self.get_pattern(result['color'])
resize_pattern = cv2.resize(pattern, (dim_image_w, dim_image_h), interpolation=cv2.INTER_AREA)
closed_mo = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2)
gray_mo = np.expand_dims(result['gray'], axis=2).repeat(3, axis=2)
get_image_fir = resize_pattern * (closed_mo / 255) * (gray_mo / 255)
result['pattern_image'] = get_image_fir.astype(np.uint8)
result['final_image'] = result['pattern_image']
canvas = np.full_like(result['final_image'], 255)
temp_bg = np.expand_dims(cv2.bitwise_not(result['mask']), axis=2).repeat(3, axis=2)
tmp1 = (canvas * (temp_bg / 255)).astype(np.uint8)
temp_fg = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2)
tmp2 = (result['final_image'] * (temp_fg / 255)).astype(np.uint8)
result['single_image'] = cv2.add(tmp1, tmp2)
result['alpha'] = 100 / 255.0
else:
closed_mo = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2)
get_image_fir = result['image'] * (closed_mo / 255)
result['pattern_image'] = get_image_fir.astype(np.uint8)
result['final_image'] = result['pattern_image']
return result
@staticmethod
def get_gradient(bucket_name, object_name):
# image_data = minio_client.get_object(bucket_name, object_name)
image_data = s3.get_object(Bucket=bucket_name, Key=object_name)['Body']
# 从数据流中读取图像
image_bytes = image_data.read()
# 将图像数据转换为numpy数组
image_array = np.asarray(bytearray(image_bytes), dtype=np.uint8)
# 使用OpenCV解码图像数组
image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
return image
@staticmethod
def crop_image(image, image_size_h, image_size_w):
x_offset = np.random.randint(low=0, high=int(image_size_h / 5) - 6)
y_offset = np.random.randint(low=0, high=int(image_size_w / 5) - 6)
image = image[x_offset: x_offset + image_size_h, y_offset: y_offset + image_size_w, :]
return image
@staticmethod
def get_pattern(single_color):
if single_color is None:
raise False
R, G, B = single_color.split(' ')
pattern = np.zeros([1, 1, 3], np.uint8)
pattern[0, 0, 0] = int(B)
pattern[0, 0, 1] = int(G)
pattern[0, 0, 2] = int(R)
return pattern
@staticmethod
def gradient(image, angle_degrees, start_color, end_color):
height, width = image.shape[0], image.shape[1]
# 创建一个空白的图像
gradient_image = np.zeros((height, width, 3), dtype=np.uint8)
# 将角度限制在 0 到 360 度之间
angle_degrees = np.clip(angle_degrees, 0, 360)
# 将角度转换为弧度
angle_radians = np.radians(angle_degrees)
# 计算渐变的方向
dx = np.cos(angle_radians)
dy = np.sin(angle_radians)
# 创建网格
x_grid, y_grid = np.meshgrid(np.arange(width), np.arange(height))
# 计算每个像素在渐变方向上的位置
distance_along_gradient = (x_grid * dx + y_grid * dy) / np.sqrt(dx ** 2 + dy ** 2)
# 计算渐变的权重
weight = np.clip(distance_along_gradient / max(width, height), 0, 1)
# 计算渐变的颜色
gradient_image[:, :, 0] = (1 - weight) * start_color[0] + weight * end_color[0]
gradient_image[:, :, 1] = (1 - weight) * start_color[1] + weight * end_color[1]
gradient_image[:, :, 2] = (1 - weight) * start_color[2] + weight * end_color[2]
return gradient_image
@PIPELINES.register_module()
class PrintPainting(object):
def __init__(self, print_flag=True):
self.print_flag = print_flag
# @ RunTime
def __call__(self, result):
if "location" not in result['print'].keys():
result['print']["location"] = [[0, 0]]
elif result['print']["location"] == [] or result['print']["location"] is None:
result['print']["location"] = [[0, 0]]
if result['print']['IfSingle']:
if len(result['print']['print_path_list']) == 0:
raise ValueError('When there is no printing, ifsingle must be false')
print_background = np.zeros((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), dtype=np.uint8)
mask_background = np.zeros((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), dtype=np.uint8)
# print_background = np.full((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), 255, dtype=np.uint8)
for i in range(len(result['print']['print_path_list'])):
image, image_mode = self.read_image(result['print']['print_path_list'][i])
if image_mode == "RGBA":
new_size = (int(image.width * result['print']['print_scale_list'][i]), int(image.height * result['print']['print_scale_list'][i]))
mask = image.split()[3]
resized_source = image.resize(new_size)
resized_source_mask = mask.resize(new_size)
rotated_resized_source = resized_source.rotate(result['print']['print_angle_list'][i])
rotated_resized_source_mask = resized_source_mask.rotate(result['print']['print_angle_list'][i])
source_image_pil = Image.fromarray(print_background)
source_image_pil_mask = Image.fromarray(mask_background)
source_image_pil.paste(rotated_resized_source, (int(result['print']['location'][i][0]), int(result['print']['location'][i][1])), rotated_resized_source)
source_image_pil_mask.paste(rotated_resized_source_mask, (int(result['print']['location'][i][0]), int(result['print']['location'][i][1])), rotated_resized_source_mask)
print_background = np.array(source_image_pil)
mask_background = np.array(source_image_pil_mask)
print(1)
else:
mask = self.get_mask_inv(image)
mask = np.expand_dims(mask, axis=2)
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
mask = cv2.bitwise_not(mask)
# 旋转后的坐标需要重新算
rotate_mask, _ = self.img_rotate(mask, result['print']['print_angle_list'][i], result['print']['print_scale_list'][i])
rotate_image, rotated_new_size = self.img_rotate(image, result['print']['print_angle_list'][i], result['print']['print_scale_list'][i])
# x, y = int(result['print']['location'][i][0] - rotated_new_size[0] - (rotate_mask.shape[0] - image.shape[0]) / 2), int(result['print']['location'][i][1] - rotated_new_size[1] - (rotate_mask.shape[1] - image.shape[1]) / 2)
x, y = int(result['print']['location'][i][0] - rotated_new_size[0]), int(result['print']['location'][i][1] - rotated_new_size[1])
image_x = print_background.shape[1]
image_y = print_background.shape[0]
print_x = rotate_image.shape[1]
print_y = rotate_image.shape[0]
# 有bug
# if x + print_x > image_x:
# rotate_image = rotate_image[:, :x + print_x - image_x]
# rotate_mask = rotate_mask[:, :x + print_x - image_x]
# #
# if y + print_y > image_y:
# rotate_image = rotate_image[:y + print_y - image_y]
# rotate_mask = rotate_mask[:y + print_y - image_y]
# 不能是并行
# 当前第一轮的if 108以及115是判断有没有过下界和右界。第二轮的是判断左上有没有超出。 如果这个样子的话先裁了右边再左移region就会有问题
# 先挪 再判断 最后裁剪
# 如果print旋转了 或者 print贴边了 则需要判断 判断左界和上界是否小于0
if x <= 0:
rotate_image = rotate_image[:, -x:]
rotate_mask = rotate_mask[:, -x:]
start_x = x = 0
else:
start_x = x
if y <= 0:
rotate_image = rotate_image[-y:, :]
rotate_mask = rotate_mask[-y:, :]
start_y = y = 0
else:
start_y = y
# ------------------
# 如果print-size大于image-size 则需要裁剪print
if x + print_x > image_x:
rotate_image = rotate_image[:, :image_x - x]
rotate_mask = rotate_mask[:, :image_x - x]
if y + print_y > image_y:
rotate_image = rotate_image[:image_y - y, :]
rotate_mask = rotate_mask[:image_y - y, :]
# mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = cv2.bitwise_xor(mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]], rotate_mask)
# print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = cv2.add(print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]], rotate_image)
# mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = rotate_mask
# print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = rotate_image
mask_background = self.stack_prin(mask_background, result['pattern_image'], rotate_mask, start_y, y, start_x, x)
print_background = self.stack_prin(print_background, result['pattern_image'], rotate_image, start_y, y, start_x, x)
# gray_image = cv2.cvtColor(mask_background, cv2.COLOR_BGR2GRAY)
# print_background = cv2.bitwise_and(print_background, print_background, mask=gray_image)
print_mask = cv2.bitwise_and(result['mask'], cv2.cvtColor(mask_background, cv2.COLOR_BGR2GRAY))
img_fg = cv2.bitwise_or(print_background, print_background, mask=print_mask)
img_bg = cv2.bitwise_and(result['pattern_image'], result['pattern_image'], mask=cv2.bitwise_not(print_mask))
mask_mo = np.expand_dims(print_mask, axis=2).repeat(3, axis=2)
gray_mo = np.expand_dims(result['gray'], axis=2).repeat(3, axis=2)
img_fg = (img_fg * (mask_mo / 255) * (gray_mo / 255)).astype(np.uint8)
result['final_image'] = cv2.add(img_bg, img_fg)
canvas = np.full_like(result['final_image'], 255)
temp_bg = np.expand_dims(cv2.bitwise_not(result['mask']), axis=2).repeat(3, axis=2)
tmp1 = (canvas * (temp_bg / 255)).astype(np.uint8)
temp_fg = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2)
tmp2 = (result['final_image'] * (temp_fg / 255)).astype(np.uint8)
result['single_image'] = cv2.add(tmp1, tmp2)
return result
else:
painting_dict = {}
painting_dict['dim_image_h'], painting_dict['dim_image_w'] = result['pattern_image'].shape[0:2]
# no print
if len(result['print_dict']['print_path_list']) == 0 or not self.print_flag:
result['print_image'] = result['pattern_image']
# print
else:
painting_dict = self.painting_collection(painting_dict, result, print_trigger=True)
result['print_image'] = self.printpaint(result, painting_dict, print_=True)
result['final_image'] = result['print_image']
canvas = np.full_like(result['final_image'], 255)
temp_bg = np.expand_dims(cv2.bitwise_not(result['mask']), axis=2).repeat(3, axis=2)
tmp1 = (canvas * (temp_bg / 255)).astype(np.uint8)
temp_fg = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2)
tmp2 = (result['final_image'] * (temp_fg / 255)).astype(np.uint8)
result['single_image'] = cv2.add(tmp1, tmp2)
return result
@staticmethod
def stack_prin(print_background, pattern_image, rotate_image, start_y, y, start_x, x):
temp_print = np.zeros((pattern_image.shape[0], pattern_image.shape[1], 3), dtype=np.uint8)
temp_print[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = rotate_image
img2gray = cv2.cvtColor(print_background, cv2.COLOR_BGR2GRAY)
ret, mask_ = cv2.threshold(img2gray, 1, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask_)
img1_bg = cv2.bitwise_and(print_background, print_background, mask=mask_)
img2_fg = cv2.bitwise_and(temp_print, temp_print, mask=mask_inv)
print_background = img1_bg + img2_fg
return print_background
def painting_collection(self, painting_dict, result, print_trigger=False):
if print_trigger:
print_ = self.get_print(result['print_dict'])
painting_dict['Trigger'] = not print_['IfSingle']
painting_dict['location'] = print_['location'] if 'location' in print_.keys() else None
single_mask_inv_print = self.get_mask_inv(print_['image'])
dim_max = max(painting_dict['dim_image_h'], painting_dict['dim_image_w'])
dim_pattern = (int(dim_max * print_['scale'] / 5), int(dim_max * print_['scale'] / 5))
if not print_['IfSingle']:
self.random_seed = random.randint(0, 1000)
painting_dict['mask_inv_print'] = self.tile_image(single_mask_inv_print, dim_pattern, print_['scale'], painting_dict['dim_image_h'], painting_dict['dim_image_w'], painting_dict['location'], trigger=True)
painting_dict['tile_print'] = self.tile_image(print_['image'], dim_pattern, print_['scale'], painting_dict['dim_image_h'], painting_dict['dim_image_w'], painting_dict['location'], trigger=True)
else:
painting_dict['mask_inv_print'] = self.tile_image(single_mask_inv_print, dim_pattern, print_['scale'], painting_dict['dim_image_h'], painting_dict['dim_image_w'], painting_dict['location'])
painting_dict['tile_print'] = self.tile_image(print_['image'], dim_pattern, print_['scale'], painting_dict['dim_image_h'], painting_dict['dim_image_w'], painting_dict['location'])
painting_dict['dim_print_h'], painting_dict['dim_print_w'] = dim_pattern
return painting_dict
def tile_image(self, pattern, dim, scale, dim_image_h, dim_image_w, location, trigger=False):
if not trigger:
tile = cv2.resize(pattern, dim, interpolation=cv2.INTER_AREA)
else:
resize_pattern = cv2.resize(pattern, dim, interpolation=cv2.INTER_AREA)
if len(pattern.shape) == 2:
tile = np.tile(resize_pattern, (int((5 + 1) / scale) + 4, int((5 + 1) / scale) + 4))
if len(pattern.shape) == 3:
tile = np.tile(resize_pattern, (int((5 + 1) / scale) + 4, int((5 + 1) / scale) + 4, 1))
tile = self.crop_image(tile, dim_image_h, dim_image_w, location, resize_pattern.shape)
return tile
def get_mask_inv(self, print_):
if print_[0][0][0] == 255 and print_[0][0][1] == 255 and print_[0][0][2] == 255:
bg_color = cv2.cvtColor(print_, cv2.COLOR_BGR2LAB)[0][0]
print_tile = cv2.cvtColor(print_, cv2.COLOR_BGR2LAB)
bg_l, bg_a, bg_b = bg_color[0], bg_color[1], bg_color[2]
bg_L_high, bg_L_low = self.get_low_high_lab(bg_l, L=True)
bg_a_high, bg_a_low = self.get_low_high_lab(bg_a)
bg_b_high, bg_b_low = self.get_low_high_lab(bg_b)
lower = np.array([bg_L_low, bg_a_low, bg_b_low])
upper = np.array([bg_L_high, bg_a_high, bg_b_high])
mask_inv = cv2.inRange(print_tile, lower, upper)
return mask_inv
else:
# bg_color = cv2.cvtColor(print_, cv2.COLOR_BGR2LAB)[0][0]
# print_tile = cv2.cvtColor(print_, cv2.COLOR_BGR2LAB)
# bg_l, bg_a, bg_b = bg_color[0], bg_color[1], bg_color[2]
# bg_L_high, bg_L_low = self.get_low_high_lab(bg_l, L=True)
# bg_a_high, bg_a_low = self.get_low_high_lab(bg_a)
# bg_b_high, bg_b_low = self.get_low_high_lab(bg_b)
# lower = np.array([bg_L_low, bg_a_low, bg_b_low])
# upper = np.array([bg_L_high, bg_a_high, bg_b_high])
# print_tile = cv2.cvtColor(print_, cv2.COLOR_BGR2LAB)
# mask_inv = cv2.cvtColor(print_tile, cv2.COLOR_BGR2GRAY)
# mask_inv = cv2.cvtColor(print_, cv2.COLOR_BGR2GRAY)
mask_inv = np.zeros(print_.shape[:2], dtype=np.uint8)
return mask_inv
@staticmethod
def printpaint(result, painting_dict, print_=False):
if print_ and painting_dict['Trigger']:
print_mask = cv2.bitwise_and(result['mask'], cv2.bitwise_not(painting_dict['mask_inv_print']))
img_fg = cv2.bitwise_and(painting_dict['tile_print'], painting_dict['tile_print'], mask=print_mask)
else:
print_mask = result['mask']
img_fg = result['final_image']
if print_ and not painting_dict['Trigger']:
try:
index_ = len(painting_dict['location'])
except:
assert f'there must be parameter of location if choose IfSingle'
for i in range(index_):
start_h, start_w = int(painting_dict['location'][i][1]), int(painting_dict['location'][i][0])
length_h = min(start_h + painting_dict['dim_print_h'], img_fg.shape[0])
length_w = min(start_w + painting_dict['dim_print_w'], img_fg.shape[1])
change_region = img_fg[start_h: length_h, start_w: length_w, :]
# problem in change_mask
change_mask = print_mask[start_h: length_h, start_w: length_w]
# get real part into change mask
_, change_mask = cv2.threshold(change_mask, 220, 255, cv2.THRESH_BINARY)
mask = cv2.bitwise_not(painting_dict['mask_inv_print'])
img_fg[start_h:start_h + painting_dict['dim_print_h'], start_w:start_w + painting_dict['dim_print_w'], :] = change_region
clothes_mask_print = cv2.bitwise_not(print_mask)
img_bg = cv2.bitwise_and(result['pattern_image'], result['pattern_image'], mask=clothes_mask_print)
mask_mo = np.expand_dims(print_mask, axis=2).repeat(3, axis=2)
gray_mo = np.expand_dims(result['gray'], axis=2).repeat(3, axis=2)
img_fg = (img_fg * (mask_mo / 255) * (gray_mo / 255)).astype(np.uint8)
print_image = cv2.add(img_bg, img_fg)
return print_image
@staticmethod
def get_print(print_dict):
if not 'print_scale_list' in print_dict.keys() or print_dict['print_scale_list'][0] < 0.3:
print_dict['scale'] = 0.3
else:
print_dict['scale'] = print_dict['print_scale_list'][0]
if not 'IfSingle' in print_dict.keys():
print_dict['IfSingle'] = False
# data = minio_client.get_object(print_dict['print_path_list'][0].split("/", 1)[0], print_dict['print_path_list'][0].split("/", 1)[1])
data = s3.get_object(Bucket=print_dict['print_path_list'][0].split("/", 1)[0], Key=print_dict['print_path_list'][0].split("/", 1)[1])['Body']
data_bytes = BytesIO(data.read())
image = Image.open(data_bytes)
image_mode = image.mode
# 判断图片格式如果是RGBA 则贴在一张纯白图片上 防止透明转黑
if image_mode == "RGBA":
new_background = Image.new('RGB', image.size, (255, 255, 255))
new_background.paste(image, mask=image.split()[3])
image = new_background
print_dict['image'] = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
# file = minio_client.get_object(print_dict['print_path_list'][0].split("/", 1)[0], print_dict['print_path_list'][0].split("/", 1)[1]).data
# print_dict['image'] = cv2.imdecode(np.fromstring(file, np.uint8), 1)
# image = cv2.imdecode(np.frombuffer(file, np.uint8), 1)
# return image
return print_dict
def crop_image(self, image, image_size_h, image_size_w, location, print_shape):
print_w = print_shape[1]
print_h = print_shape[0]
random.seed(self.random_seed)
# logging.info(f'overall print location : {location}')
# x_offset = random.randint(0, image.shape[0] - image_size_h)
# y_offset = random.randint(0, image.shape[1] - image_size_w)
# 1.拿到偏移量后和resize后的print宽高取余 得到真正偏移量
x_offset = print_w - int(location[0][1] % print_w)
y_offset = print_w - int(location[0][0] % print_h)
# y_offset = int(location[0][0])
# x_offset = int(location[0][1])
if len(image.shape) == 2:
image = image[x_offset: x_offset + image_size_h, y_offset: y_offset + image_size_w]
elif len(image.shape) == 3:
image = image[x_offset: x_offset + image_size_h, y_offset: y_offset + image_size_w, :]
return image
@staticmethod
def get_low_high_lab(Lab_value, L=False):
if L:
high = Lab_value + 30 if Lab_value + 30 < 255 else 255
low = Lab_value - 30 if Lab_value - 30 > 0 else 0
else:
high = Lab_value + 30 if Lab_value + 30 < 255 else 255
low = Lab_value - 30 if Lab_value - 30 > 0 else 0
return high, low
@staticmethod
def img_rotate(image, angel, scale):
"""顺时针旋转图像任意角度
Args:
image (np.array): [原始图像]
angel (float): [逆时针旋转的角度]
Returns:
[array]: [旋转后的图像]
"""
h, w = image.shape[:2]
center = (w // 2, h // 2)
# if type(angel) is not int:
# angel = 0
M = cv2.getRotationMatrix2D(center, -angel, scale)
# 调整旋转后的图像长宽
rotated_h = int((w * np.abs(M[0, 1]) + (h * np.abs(M[0, 0]))))
rotated_w = int((h * np.abs(M[0, 1]) + (w * np.abs(M[0, 0]))))
M[0, 2] += (rotated_w - w) // 2
M[1, 2] += (rotated_h - h) // 2
# 旋转图像
rotated_img = cv2.warpAffine(image, M, (rotated_w, rotated_h))
return rotated_img, ((rotated_img.shape[1] - image.shape[1] * scale) // 2, (rotated_img.shape[0] - image.shape[0] * scale) // 2)
# return rotated_img, (0, 0)
@staticmethod
def read_image(image_url):
# data = minio_client.get_object(image_url.split("/", 1)[0], image_url.split("/", 1)[1])
data = s3.get_object(Bucket=image_url.split("/", 1)[0], Key=image_url.split("/", 1)[1])['Body']
data_bytes = BytesIO(data.read())
image = Image.open(data_bytes)
image_mode = image.mode
# 判断图片格式如果是RGBA 则贴在一张纯白图片上 防止透明转黑
if image_mode == "RGBA":
# new_background = Image.new('RGB', image.size, (255, 255, 255))
# new_background.paste(image, mask=image.split()[3])
# image = new_background
return image, image_mode
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
return image, image_mode
# @staticmethod
# def read_image(image_url):
# response = requests.get(image_url)
# image_data = np.frombuffer(response.content, np.uint8)
#
# # 解码图像
# image = cv2.imdecode(image_data, 3)
# return image

View File

@@ -0,0 +1,54 @@
from ..builder import PIPELINES
import math
import cv2
@PIPELINES.register_module()
class Scaling(object):
def __init__(self):
pass
# @ RunTime
def __call__(self, result):
if result['keypoint'] in ['waistband', 'shoulder', 'head_point']:
# milvus_db_keypoint_cache
distance_clo = math.sqrt(
(int(result['clothes_keypoint'][result['keypoint'] + '_left'][0]) - int(result['clothes_keypoint'][result['keypoint'] + '_right'][0])) ** 2
+
(int(result['clothes_keypoint'][result['keypoint'] + '_left'][1]) - int(result['clothes_keypoint'][result['keypoint'] + '_right'][1])) ** 2)
distance_bdy = math.sqrt((int(result['body_point_test'][result['keypoint'] + '_left'][0]) - int(result['body_point_test'][result['keypoint'] + '_right'][0])) ** 2 + 1)
# distance_clo = math.sqrt(
# (int(result['clothes_keypoint'][result['keypoint'] + '_left'].split("_")[0]) - int(result['clothes_keypoint'][result['keypoint'] + '_right'].split("_")[0])) ** 2
# +
# (int(result['clothes_keypoint'][result['keypoint'] + '_left'].split("_")[1]) - int(result['clothes_keypoint'][result['keypoint'] + '_right'].split("_")[1])) ** 2)
#
# distance_bdy = math.sqrt((int(result['body_point_test'][result['keypoint'] + '_left'][0]) - int(result['body_point_test'][result['keypoint'] + '_right'][0])) ** 2 + 1)
if distance_clo == 0:
result['scale'] = 10
else:
result['scale'] = distance_bdy / distance_clo
elif result['keypoint'] == 'toe':
distance_bdy = math.sqrt(
(int(result['body_point_test']['foot_length'][0]) - int(result['body_point_test']['foot_length'][2])) ** 2
+
(int(result['body_point_test']['foot_length'][1]) - int(result['body_point_test']['foot_length'][3])) ** 2
)
Blur = cv2.GaussianBlur(result['gray'], (3, 3), 0)
Edge = cv2.Canny(Blur, 10, 200)
Edge = cv2.dilate(Edge, None)
Edge = cv2.erode(Edge, None)
Contour, _ = cv2.findContours(Edge, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
Contours = sorted(Contour, key=cv2.contourArea, reverse=True)
Max_contour = Contours[0]
x, y, w, h = cv2.boundingRect(Max_contour)
width = w
distance_clo = width
result['scale'] = distance_bdy / distance_clo
elif result['keypoint'] == 'hand_point':
result['scale'] = result['scale_bag']
elif result['keypoint'] == 'ear_point':
result['scale'] = result['scale_earrings']
return result

View File

@@ -0,0 +1,14 @@
from ..builder import PIPELINES
from ...utils.design_ensemble import get_seg_result
@PIPELINES.register_module()
class Segmentation(object):
def __init__(self, device='cpu', show=False, debug=None):
self.show = show
self.device = device
self.debug = debug
def __call__(self, result):
result['seg_result'] = get_seg_result(result["image_id"], result['image'])
return result

View File

@@ -0,0 +1,115 @@
import logging
import cv2
import numpy as np
from cv2 import cvtColor, COLOR_BGR2RGBA
from app.service.utils.generate_uuid import generate_uuid
from ..builder import PIPELINES
from PIL import Image
from ...utils.conversion_image import rgb_to_rgba
from ...utils.upload_image import upload_png_mask
@PIPELINES.register_module()
class Split(object):
"""
Split image into front and back layer according to the segmentation result
"""
# KNet
def __call__(self, result):
try:
if 'mask' not in result.keys():
raise KeyError(f'Cannot find mask in result dict, please check ContourDetection is included in process pipelines.')
if 'seg_result' not in result.keys(): # 没过seg模型
result['front_mask'] = result['mask'].copy()
result['back_mask'] = np.zeros_like(result['mask'])
else:
temp_front = result['seg_result'] == 1
result['front_mask'] = (result['mask'] * (temp_front + 0).astype(np.uint8))
temp_back = result['seg_result'] == 2
result['back_mask'] = (result['mask'] * (temp_back + 0).astype(np.uint8))
if result['name'] in ('outwear', 'dress', 'blouse', 'skirt', 'trousers', 'tops', 'bottoms'):
if len(result['front_mask'].shape) > 2:
front_mask = result['front_mask'][0]
else:
front_mask = result['front_mask']
if len(result['back_mask'].shape) > 2:
back_mask = result['back_mask'][0]
else:
back_mask = result['back_mask']
rgba_image = rgb_to_rgba((result['final_image'].shape[0], result['final_image'].shape[1]), result['final_image'], result['mask'])
result_front_image = np.zeros_like(rgba_image)
result_front_image[front_mask != 0] = rgba_image[front_mask != 0]
result_front_image_pil = Image.fromarray(cvtColor(result_front_image, COLOR_BGR2RGBA))
front_new_size = (int(result_front_image_pil.width * result["scale"] * result["resize_scale"]), int(result_front_image_pil.height * result["scale"] * result["resize_scale"]))
result_front_image_pil = result_front_image_pil.resize(front_new_size, Image.LANCZOS)
front_mask = cv2.resize(front_mask, front_new_size)
result['front_image'], result["front_image_url"], result["front_mask_url"] = upload_png_mask(result_front_image_pil, f'{generate_uuid()}', mask=front_mask)
if result["name"] in ('blouse', 'dress', 'outwear', 'tops'):
result_back_image = np.zeros_like(rgba_image)
result_back_image[back_mask != 0] = rgba_image[back_mask != 0]
result_back_image_pil = Image.fromarray(cvtColor(result_back_image, COLOR_BGR2RGBA))
back_new_size = (int(result_back_image_pil.width * result["scale"] * result["resize_scale"]), int(result_back_image_pil.height * result["scale"] * result["resize_scale"]))
result_back_image_pil = result_back_image_pil.resize(back_new_size, Image.LANCZOS)
back_mask = cv2.resize(back_mask, back_new_size)
result['back_image'], result["back_image_url"], result["back_mask_url"] = upload_png_mask(result_back_image_pil, f'{generate_uuid()}', mask=back_mask)
else:
result['back_image'] = None
result["back_image_url"] = None
result["back_mask_url"] = None
return result
except Exception as e:
logging.warning(f"split runtime exception : {e} image_id : {result['image_id']}")
# @ RunTime
# def __call__(self, result):
# try:
# if 'mask' not in result.keys():
# raise KeyError(f'Cannot find mask in result dict, please check ContourDetection is included in process pipelines.')
# if 'seg_result' not in result.keys(): # 没过seg模型
# result['front_mask'] = result['mask'].copy()
# result['back_mask'] = np.zeros_like(result['mask'])
# else:
# temp_front = result['seg_result'] == 1
# result['front_mask'] = (result['mask'] * (temp_front + 0).astype(np.uint8))
# temp_back = result['seg_result'] == 2
# result['back_mask'] = (result['mask'] * (temp_back + 0).astype(np.uint8))
#
# if result['name'] in ('outwear', 'dress', 'blouse', 'skirt', 'trousers', 'tops', 'bottoms'):
# if len(result['front_mask'].shape) > 2:
# front_mask = result['front_mask'][0]
# else:
# front_mask = result['front_mask']
#
# rgba_image = rgb_to_rgba((result['final_image'].shape[0], result['final_image'].shape[1]), result['final_image'], result['mask'])
# result_front_image = np.zeros_like(rgba_image)
# result_front_image[front_mask != 0] = rgba_image[front_mask != 0]
#
# result_front_image_pil = Image.fromarray(cvtColor(result_front_image, COLOR_BGR2RGBA))
# front_new_size = (int(result_front_image_pil.width * result["scale"] * result["resize_scale"]), int(result_front_image_pil.height * result["scale"] * result["resize_scale"]))
# result_front_image_pil = result_front_image_pil.resize(front_new_size, Image.LANCZOS)
# front_mask = cv2.resize(front_mask, front_new_size)
# result['front_image'], result["front_image_url"], result["front_mask_url"] = upload_png_mask(result_front_image_pil, f'{generate_uuid()}', mask=front_mask)
#
# if result["name"] in ('blouse', 'dress', 'outwear', 'tops'):
# result_back_image = np.zeros_like(rgba_image)
# result_back_image[result['back_mask'] != 0] = rgba_image[result['back_mask'] != 0]
#
# result_back_image_pil = Image.fromarray(cvtColor(result_back_image, COLOR_BGR2RGBA))
# back_new_size = (int(result_back_image_pil.width * result["scale"] * result["resize_scale"]), int(result_back_image_pil.height * result["scale"] * result["resize_scale"]))
# result_back_image_pil = result_back_image_pil.resize(back_new_size, Image.LANCZOS)
# back_mask = cv2.resize(result['back_mask'], back_new_size)
# result['back_image'], result["back_image_url"], result["back_mask_url"] = upload_png_mask(result_back_image_pil, f'{generate_uuid()}', mask=back_mask)
# else:
# result['back_image'] = None
# result["back_image_url"] = None
# result["back_mask_url"] = None
# return result
# except Exception as e:
# logging.warning(f"split runtime exception : {e} image_id : {result['image_id']}")

View File

@@ -0,0 +1,126 @@
import io
import logging
import time
import cv2
import numpy as np
from .builder import ITEMS
from .clothing import Clothing
from PIL import Image
from ..utils.conversion_image import rgb_to_rgba
from ..utils.upload_image import upload_png_mask
from ...utils.generate_uuid import generate_uuid
@ITEMS.register_module()
class Shoes(Clothing):
# TODO location of shoes has little mismatch
def __init__(self, **kwargs):
pipeline = [
dict(type='LoadImageFromFile', path=kwargs['path'], color=kwargs['color']),
dict(type='KeypointDetection'),
dict(type='ContourDetection'),
dict(type='Painting'),
dict(type='Scaling'),
dict(type='Split'),
# dict(type='ImageShow', key=['image', 'mask', 'pattern_image']),
]
kwargs.update(pipeline=pipeline)
super(Shoes, self).__init__(**kwargs)
def organize(self, layer):
left_shoe_mask, right_shoe_mask = self.cut()
left_layer = dict(name=f'{type(self).__name__.lower()}_left',
image=self.result['shoes_left'],
image_url=self.result['left_image_url'],
mask_url=self.result['left_mask_url'],
sacle=self.result['scale'],
clothes_keypoint=self.result['clothes_keypoint'],
position=self.calculate_start_point(self.result['keypoint'],
self.result['scale'],
self.result['clothes_keypoint'],
self.result['body_point'],
'left'))
layer.insert(left_layer)
right_layer = dict(name=f'{type(self).__name__.lower()}_right',
image=self.result['shoes_right'],
image_url=self.result['right_image_url'],
mask_url=self.result['right_mask_url'],
sacle=self.result['scale'],
clothes_keypoint=self.result['clothes_keypoint'],
position=self.calculate_start_point(self.result['keypoint'],
self.result['scale'],
self.result['clothes_keypoint'],
self.result['body_point'],
'right'))
layer.insert(right_layer)
def cut(self):
"""
Cut shoes mask into two pieces
Returns:
"""
contour, _ = cv2.findContours(self.result['mask'], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contour, key=cv2.contourArea, reverse=True)
bounding_boxes = [cv2.boundingRect(c) for c in contours[:2]]
(contours, bounding_boxes) = zip(*sorted(zip(contours[:2], bounding_boxes), key=lambda x: x[1][0], reverse=False))
epsilon_left = 0.001 * cv2.arcLength(contours[0], True)
approx_left = cv2.approxPolyDP(contours[0], epsilon_left, True)
mask_left = np.zeros(self.result['final_image'].shape[:2], np.uint8)
cv2.drawContours(mask_left, [approx_left], -1, 255, -1)
item_mask_left = cv2.GaussianBlur(mask_left, (5, 5), 0)
rgba_image = rgb_to_rgba((self.result['final_image'].shape[0], self.result['final_image'].shape[1]), self.result['final_image'], item_mask_left)
result_image = np.zeros_like(rgba_image)
result_image[self.result['front_mask'] != 0] = rgba_image[self.result['front_mask'] != 0]
result_left_image_pil = Image.fromarray(result_image, 'RGBA')
result_left_image_pil = result_left_image_pil.resize((int(result_left_image_pil.width * self.result["scale"]), int(result_left_image_pil.height * self.result["scale"])), Image.LANCZOS)
self.result['shoes_left'], self.result["left_image_url"], self.result["left_mask_url"] = upload_png_mask(result_left_image_pil, f"{generate_uuid()}")
epsilon_right = 0.001 * cv2.arcLength(contours[1], True)
approx_right = cv2.approxPolyDP(contours[1], epsilon_right, True)
mask_right = np.zeros(self.result['final_image'].shape[:2], np.uint8)
cv2.drawContours(mask_right, [approx_right], -1, 255, -1)
item_mask_right = cv2.GaussianBlur(mask_right, (5, 5), 0)
rgba_image = rgb_to_rgba((self.result['final_image'].shape[0], self.result['final_image'].shape[1]), self.result['final_image'], item_mask_right)
result_image = np.zeros_like(rgba_image)
result_image[self.result['front_mask'] != 0] = rgba_image[self.result['front_mask'] != 0]
result_right_image_pil = Image.fromarray(result_image, 'RGBA')
result_right_image_pil = result_right_image_pil.resize((int(result_right_image_pil.width * self.result["scale"]), int(result_right_image_pil.height * self.result["scale"])), Image.LANCZOS)
self.result['shoes_right'], self.result["right_image_url"], self.result["right_mask_url"] = upload_png_mask(result_right_image_pil, f"{generate_uuid()}")
return item_mask_left, item_mask_right
@staticmethod
def calculate_start_point(keypoint_type, scale, clothes_point, body_point, location):
"""
left shoes align left
right shoes align right
Args:
keypoint_type: string, "toe"
scale: float
clothes_point: dict{'left': [x1, y1, z1], 'right': [x2, y2, z2]}
body_point: dict, containing keypoint data of body figure
location: string, indicates whether the start point belongs to right or left shoe
Returns:
start_point: tuple (x', y')
x' = y_body - y1 * scale
y' = x_body - x1 * scale
"""
if location not in ['left', 'right']:
raise KeyError(f'location value must be left or right but got {location}')
side_indicator = f'{keypoint_type}_{location}'
# clothes_point = {k: tuple(map(lambda x: int(scale * x), v[0: 2])) for k, v in clothes_point.items()}
start_point = (body_point[side_indicator][1] - int(int(clothes_point[side_indicator].split("_")[1]) * scale),
body_point[side_indicator][0] - int(int(clothes_point[side_indicator].split("_")[0]) * scale))
return start_point

View File

@@ -0,0 +1,46 @@
from .builder import ITEMS
from .clothing import Clothing
@ITEMS.register_module()
class Top(Clothing):
def __init__(self, pipeline, **kwargs):
if pipeline is None:
pipeline = [
dict(type='LoadImageFromFile', path=kwargs['path'], color=kwargs['color'], print_dict=kwargs['print']),
dict(type='KeypointDetection'),
dict(type='ContourDetection'),
dict(type='Segmentation', device='cpu', show=False, debug=kwargs['debug']),
dict(type='Painting', painting_flag=True),
dict(type='PrintPainting', print_flag=True),
# dict(type='ImageShow', key=['image', 'mask', 'seg_visualize', 'pattern_image']),
dict(type='Scaling'),
dict(type='Split'),
]
kwargs.update(pipeline=pipeline)
super(Top, self).__init__(**kwargs)
@ITEMS.register_module()
class Blouse(Top):
def __init__(self, pipeline=None, **kwargs):
super(Blouse, self).__init__(pipeline, **kwargs)
@ITEMS.register_module()
class Outwear(Top):
def __init__(self, pipeline=None, **kwargs):
super(Outwear, self).__init__(pipeline, **kwargs)
@ITEMS.register_module()
class Dress(Top):
def __init__(self, pipeline=None, **kwargs):
super(Dress, self).__init__(pipeline, **kwargs)
# Men's clothing
@ITEMS.register_module()
class Tops(Top):
def __init__(self, pipeline=None, **kwargs):
super(Tops, self).__init__(pipeline, **kwargs)

View File

@@ -0,0 +1,130 @@
from app.core.config import PRIORITY_DICT
from app.service.design.core.layer import Layer
from app.service.design.items import build_item
from app.service.design.utils.redis_utils import Redis
from app.service.design.utils.synthesis_item import synthesis, synthesis_single
import concurrent.futures
def process_item(item, layers):
# logging.info("process running.........")
item.process()
item.organize(layers)
if item.result['name'] == "mannequin":
return item.result['body_image'].size
def update_progress(process_id, total):
r = Redis()
progress = r.read(key=process_id)
if progress and total != 1:
if int(progress) <= 100:
r.write(key=process_id, value=int(progress) + int(100 / total))
else:
r.write(key=process_id, value=100)
return progress
elif total == 1:
r.write(key=process_id, value=100)
return progress
else:
r.write(key=process_id, value=int(100 / total))
return progress
def final_progress(process_id):
r = Redis()
progress = r.read(key=process_id)
r.write(key=process_id, value=100)
return progress
def generate(request_data):
return_response = {}
request_data = request_data.dict()
assert "process_id" in request_data.keys(), "Need process_id parameters"
objects = request_data['objects']
# insert_keypoint_cache(objects)
process_id = request_data['process_id']
with concurrent.futures.ThreadPoolExecutor() as executor:
# 提交每个对象的处理任务
futures = {executor.submit(process_object, cfg, process_id, len(objects)): obj for obj, cfg in enumerate(objects)}
# 获取处理结果
for future in concurrent.futures.as_completed(futures):
obj = futures[future]
result = future.result()
return_response[obj] = result
final_progress(process_id)
return return_response
def process_object(cfg, process_id, total):
basic_info = cfg.get('basic')
items_response = {
'layers': []
}
if cfg.get('basic')['single_overall'] == 'overall':
basic_info['debug'] = False
items = [build_item(x, default_args=basic_info) for x in cfg.get('items')]
layers = Layer()
body_size = None
futures = []
for item in items:
futures = [process_item(item, layers)]
for future in futures:
if future is not None:
body_size = future
# 是否自定义排序
if basic_info.get('layer_order', False):
layers = sorted(layers.layer, key=lambda s: s.get("priority", float('inf')))
else:
layers = sorted(layers.layer, key=lambda x: PRIORITY_DICT.get(x['name'], float('inf')))
# 合成
items_response['synthesis_url'] = synthesis(layers, body_size)
for lay in layers:
items_response['layers'].append({
'image_category': lay['name'],
'position': lay['position'],
'priority': lay.get("priority", None),
'resize_scale': lay['resize_scale'] if "resize_scale" in lay.keys() else None,
'image_size': lay['image'] if lay['image'] is None else lay['image'].size,
'gradient_string': lay['gradient_string'] if 'gradient_string' in lay.keys() else "",
'mask_url': lay['mask_url'],
'image_url': lay['image_url'] if 'image_url' in lay.keys() else None,
# 'image': lay['image'],
# 'mask_image': lay['mask_image'],
})
elif cfg.get('basic')['single_overall'] == 'single':
assert cfg.get('basic')['switch_category'] in [x['type'] for x in cfg.get('items')], "Lack of switch_category parameters "
basic_info['debug'] = False
for item in cfg.get('items'):
if item['type'] == cfg.get('basic')['switch_category']:
item = build_item(item, default_args=cfg.get('basic'))
item.process()
items_response['layers'].append({
'image_category': f"{item.result['name']}_front",
'image_size': item.result['back_image'].size if item.result['back_image'] else None,
'position': None,
'priority': 0,
'image_url': item.result['front_image_url'],
'mask_url': item.result['front_mask_url'],
"gradient_string": item.result['gradient_string'] if 'gradient_string' in item.result.keys() else ""
})
items_response['layers'].append({
'image_category': f"{item.result['name']}_back",
'image_size': item.result['front_image'].size if item.result['front_image'] else None,
'position': None,
'priority': 0,
'image_url': item.result['back_image_url'],
'mask_url': item.result['back_mask_url'],
"gradient_string": item.result['gradient_string'] if 'gradient_string' in item.result.keys() else ""
})
items_response['synthesis_url'] = synthesis_single(item.result['front_image'], item.result['back_image'])
break
update_progress(process_id, total)
return items_response

View File

View File

@@ -0,0 +1,23 @@
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project trinity_client
@File conversion_image.py
@Author :周成融
@Date 2023/8/21 10:40:29
@detail
"""
import numpy as np
def rgb_to_rgba(rgb_size, rgb_image, mask):
alpha_channel = np.full(rgb_size, 255, dtype=np.uint8)
# 创建四通道的结果图像
rgba_image = np.dstack((rgb_image, alpha_channel))
alpha_channel = np.where(mask > 0, 255, 0)
# 更新RGBA图像的透明度通道
rgba_image[:, :, 3] = alpha_channel
return rgba_image
if __name__ == '__main__':
image = open("")

View File

@@ -0,0 +1,138 @@
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project trinity_client
@File design_ensemble.py
@Author :周成融
@Date 2023/8/16 19:36:21
@detail :发起请求 获取推理结果
"""
import logging
import cv2
import mmcv
import numpy as np
import tritonclient.http as httpclient
import torch
import torch.nn.functional as F
from app.core.config import *
"""
keypoint
预处理 推理 后处理
"""
def keypoint_preprocess(img_path):
img = mmcv.imread(img_path)
img_scale = (256, 256)
img, w_scale, h_scale = mmcv.imresize(img, img_scale, return_scale=True)
img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
return preprocessed_img, (w_scale, h_scale)
# @ RunTime
# 推理
def get_keypoint_result(image, site):
keypoint_result = None
try:
image, scale_factor = keypoint_preprocess(image)
client = httpclient.InferenceServerClient(url=KEYPOINT_MODEL_URL)
transformed_img = image.astype(np.float32)
inputs = [httpclient.InferInput(f"input", transformed_img.shape, datatype="FP32")]
inputs[0].set_data_from_numpy(transformed_img, binary_data=True)
outputs = [httpclient.InferRequestedOutput(f"output", binary_data=True)]
results = client.infer(model_name=f"keypoint_{site}_ocrnet_hr18", inputs=inputs, outputs=outputs)
inference_output = torch.from_numpy(results.as_numpy(f'output'))
keypoint_result = keypoint_postprocess(inference_output, scale_factor)
except Exception as e:
logging.warning(f"get_keypoint_result : {e}")
return keypoint_result
def keypoint_postprocess(output, scale_factor):
max_indices = torch.argmax(output.view(output.size(0), output.size(1), -1), dim=2).unsqueeze(dim=2)
max_coords = torch.cat((max_indices / output.size(3), max_indices % output.size(3)), dim=2)
segment_result = max_coords.numpy()
scale_factor = [1 / x for x in scale_factor[::-1]]
scale_matrix = np.diag(scale_factor)
nan = np.isinf(scale_matrix)
scale_matrix[nan] = 0
return np.ceil(np.dot(segment_result, scale_matrix) * 4)
"""
seg
预处理 推理 后处理
"""
# KNet
def seg_preprocess(img_path):
img = mmcv.imread(img_path)
ori_shape = img.shape[:2]
img_scale_w, img_scale_h = ori_shape
if ori_shape[0] > 1024:
img_scale_w = 1024
if ori_shape[1] > 1024:
img_scale_h = 1024
scale_factor = []
img, x, y = mmcv.imresize(img, (img_scale_w, img_scale_h), return_scale=True)
scale_factor.append(x)
scale_factor.append(y)
img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
return preprocessed_img, ori_shape
# @ RunTime
def get_seg_result(image_id, image):
image, ori_shape = seg_preprocess(image)
client = httpclient.InferenceServerClient(url=f"{DESIGN_MODEL_URL}")
transformed_img = image.astype(np.float32)
# 输入集
inputs = [
httpclient.InferInput(SEGMENTATION['input'], transformed_img.shape, datatype="FP32")
]
inputs[0].set_data_from_numpy(transformed_img, binary_data=True)
# 输出集
outputs = [
httpclient.InferRequestedOutput(SEGMENTATION['output'], binary_data=True),
]
results = client.infer(model_name=SEGMENTATION['new_model_name'], inputs=inputs, outputs=outputs)
# 推理
# 取结果
inference_output1 = results.as_numpy(SEGMENTATION['output'])
seg_result = seg_postprocess(int(image_id), inference_output1, ori_shape)
return seg_result
# no cache
def seg_postprocess(image_id, output, ori_shape):
seg_logit = F.interpolate(torch.tensor(output).float(), size=ori_shape, scale_factor=None, mode='bilinear', align_corners=False)
seg_pred = seg_logit.cpu().numpy()
return seg_pred[0]
def key_point_show(image_path, key_point_result=None):
img = cv2.imread(image_path)
points_list = key_point_result
point_size = 1
point_color = (0, 0, 255) # BGR
thickness = 4 # 可以为 0 、4、8
for point in points_list:
cv2.circle(img, point[::-1], point_size, point_color, thickness)
cv2.imshow("0", img)
cv2.waitKey(0)
if __name__ == '__main__':
image = cv2.imread("./14162b58-f259-4833-98cb-89b9b496b251.jfif")
a = get_keypoint_result(image, "up")
new_list = []
print(list)
for i in a[0]:
new_list.append((int(i[0]), int(i[1])))
key_point_show("./14162b58-f259-4833-98cb-89b9b496b251.jfif", new_list)
# a = get_seg_result(1, image)
print(a)

View File

@@ -0,0 +1,99 @@
import redis
from app.core.config import REDIS_HOST, REDIS_PORT
class Redis(object):
"""
redis数据库操作
"""
@staticmethod
def _get_r():
host = REDIS_HOST
port = REDIS_PORT
db = 0
r = redis.StrictRedis(host, port, db)
return r
@classmethod
def write(cls, key, value, expire=None):
"""
写入键值对
"""
# 判断是否有过期时间,没有就设置默认值
if expire:
expire_in_seconds = expire
else:
expire_in_seconds = 100
r = cls._get_r()
r.set(key, value, ex=expire_in_seconds)
@classmethod
def read(cls, key):
"""
读取键值对内容
"""
r = cls._get_r()
value = r.get(key)
return value.decode('utf-8') if value else value
@classmethod
def hset(cls, name, key, value):
"""
写入hash表
"""
r = cls._get_r()
r.hset(name, key, value)
@classmethod
def hget(cls, name, key):
"""
读取指定hash表的键值
"""
r = cls._get_r()
value = r.hget(name, key)
return value.decode('utf-8') if value else value
@classmethod
def hgetall(cls, name):
"""
获取指定hash表所有的值
"""
r = cls._get_r()
return r.hgetall(name)
@classmethod
def delete(cls, *names):
"""
删除一个或者多个
"""
r = cls._get_r()
r.delete(*names)
@classmethod
def hdel(cls, name, key):
"""
删除指定hash表的键值
"""
r = cls._get_r()
r.hdel(name, key)
@classmethod
def expire(cls, name, expire=None):
"""
设置过期时间
"""
if expire:
expire_in_seconds = expire
else:
expire_in_seconds = 100
r = cls._get_r()
r.expire(name, expire_in_seconds)
if __name__ == '__main__':
redis_client = Redis()
# print(redis_client.write(key="1230", value=0))
redis_client.write(key="1230", value=10)
# print(redis_client.read(key="1230"))

View File

@@ -0,0 +1,174 @@
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project trinity_client
@File synthesis_item.py
@Author :周成融
@Date 2023/8/26 14:13:04
@detail
"""
import io
import logging
import time
import boto3
import cv2
import numpy as np
from PIL import Image
from minio import Minio
from app.service.utils.decorator import RunTime
from app.service.utils.generate_uuid import generate_uuid
# minio_client = Minio(
# f"{MINIO_IP}:{MINIO_PORT}",
# access_key=MINIO_ACCESS,
# secret_key=MINIO_SECRET,
# secure=MINIO_SECURE)
s3 = boto3.client(
's3',
aws_access_key_id="AKIAVD3OJIMF6UJFLSHZ",
aws_secret_access_key="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",
region_name="ap-east-1"
)
def positioning(all_mask_shape, mask_shape, offset):
all_start = 0
all_end = 0
mask_start = 0
mask_end = 0
if offset == 0:
all_start = 0
all_end = min(all_mask_shape, mask_shape)
mask_start = 0
mask_end = min(all_mask_shape, mask_shape)
elif offset > 0:
all_start = min(offset, all_mask_shape)
all_end = min(offset + mask_shape, all_mask_shape)
mask_start = 0
mask_end = 0 if offset > all_mask_shape else min(all_mask_shape - offset, mask_shape)
elif offset < 0:
if abs(offset) > mask_shape:
all_start = 0
all_end = 0
else:
all_start = 0
if mask_shape - abs(offset) > all_mask_shape:
all_end = min(mask_shape - abs(offset), all_mask_shape)
else:
all_end = mask_shape - abs(offset)
if abs(offset) > mask_shape:
mask_start = mask_shape
mask_end = mask_shape
else:
mask_start = abs(offset)
if mask_shape - abs(offset) >= all_mask_shape:
mask_end = all_mask_shape + abs(offset)
else:
mask_end = mask_shape
return all_start, all_end, mask_start, mask_end
@RunTime
def synthesis(data, size):
# 创建底图
base_image = Image.new('RGBA', size, (0, 0, 0, 0))
try:
all_mask_shape = (size[1], size[0])
top_outer_mask = np.zeros(all_mask_shape, dtype=np.uint8)
bottom_outer_mask = np.zeros(all_mask_shape, dtype=np.uint8)
top = True
bottom = True
i = len(data)
while i:
i -= 1
if top and data[i]['name'] in ["blouse_front", "outwear_front", "dress_front", "tops_front"]:
top = False
mask_shape = data[i]['mask'].shape
y_offset, x_offset = data[i]['position']
# 初始化叠加区域的起始和结束位置
all_y_start, all_y_end, mask_y_start, mask_y_end = positioning(all_mask_shape=all_mask_shape[0], mask_shape=mask_shape[0], offset=y_offset)
all_x_start, all_x_end, mask_x_start, mask_x_end = positioning(all_mask_shape=all_mask_shape[1], mask_shape=mask_shape[1], offset=x_offset)
# 将叠加区域赋值为相应的像素值
top_outer_mask[all_y_start:all_y_end, all_x_start:all_x_end] = data[i]['mask'][mask_y_start:mask_y_end, mask_x_start:mask_x_end]
elif bottom and data[i]['name'] in ["trousers_front", "skirt_front", "bottoms_front"]:
bottom = False
mask_shape = data[i]['mask'].shape
y_offset, x_offset = data[i]['position']
# 初始化叠加区域的起始和结束位置
all_y_start, all_y_end, mask_y_start, mask_y_end = positioning(all_mask_shape=all_mask_shape[0], mask_shape=mask_shape[0], offset=y_offset)
all_x_start, all_x_end, mask_x_start, mask_x_end = positioning(all_mask_shape=all_mask_shape[1], mask_shape=mask_shape[1], offset=x_offset)
# 将叠加区域赋值为相应的像素值
bottom_outer_mask[all_y_start:all_y_end, all_x_start:all_x_end] = data[i]['mask'][mask_y_start:mask_y_end, mask_x_start:mask_x_end]
elif bottom is False and top is False:
break
all_mask = cv2.bitwise_or(top_outer_mask, bottom_outer_mask)
for layer in data:
if layer['image'] is not None:
if layer['name'] != "body":
test_image = Image.new('RGBA', size, (0, 0, 0, 0))
test_image.paste(layer['image'], (layer['position'][1], layer['position'][0]), layer['image'])
mask_data = np.where(all_mask > 0, 255, 0).astype(np.uint8)
mask_alpha = Image.fromarray(mask_data)
cropped_image = Image.composite(test_image, Image.new("RGBA", test_image.size, (255, 255, 255, 0)), mask_alpha)
base_image.paste(cropped_image, (0, 0), cropped_image)
else:
base_image.paste(layer['image'], (layer['position'][1], layer['position'][0]), layer['image'])
result_image = base_image
with io.BytesIO() as output:
result_image.save(output, format='PNG')
data = output.getvalue()
# image_data = io.BytesIO()
# result_image.save(image_data, format='PNG')
# image_data.seek(0)
# image_bytes = image_data.read()
# return f"aida-results/{minio_client.put_object('aida-results', f'result_{generate_uuid()}.png', io.BytesIO(image_bytes), len(image_bytes), content_type='image/png').object_name}"
object_name = f'result_{generate_uuid()}.png'
response = s3.put_object(Bucket="aida-results", Key=object_name, Body=data, ContentType='image/png')
object_url = f"aida-results/{object_name}"
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return object_url
else:
return ""
except Exception as e:
logging.warning(f"synthesis runtime exception : {e}")
def synthesis_single(front_image, back_image):
result_image = None
if front_image:
result_image = front_image
if back_image:
result_image.paste(back_image, (0, 0), back_image)
with io.BytesIO() as output:
result_image.save(output, format='PNG')
data = output.getvalue()
# image_data = io.BytesIO()
# result_image.save(image_data, format='PNG')
# image_data.seek(0)
# image_bytes = image_data.read()
# return f"aida-results/{minio_client.put_object('aida-results', f'result_{generate_uuid()}.png', io.BytesIO(image_bytes), len(image_bytes), content_type='image/png').object_name}"
object_name = f'result_{generate_uuid()}.png'
response = s3.put_object(Bucket="aida-results", Key=object_name, Body=data, ContentType='image/png')
object_url = f"aida-results/{object_name}"
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return object_url
else:
return ""

View File

@@ -0,0 +1,160 @@
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project trinity_client
@File upload_image.py
@Author :周成融
@Date 2023/8/28 13:49:20
@detail
"""
import io
import logging
import time
import boto3
import cv2
from minio import Minio
from app.core.config import *
from app.service.utils.decorator import RunTime
minio_client = Minio(
f"{MINIO_URL}",
access_key=MINIO_ACCESS,
secret_key=MINIO_SECRET,
secure=MINIO_SECURE)
"""S3 上传"""
s3 = boto3.client(
's3',
aws_access_key_id="AKIAVD3OJIMF6UJFLSHZ",
aws_secret_access_key="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",
region_name="ap-east-1"
)
@RunTime
def upload_png_mask(front_image, object_name, mask=None):
start_time = time.time()
mask_url = None
if mask is not None:
# 反转掩模
mask_inverted = cv2.bitwise_not(mask)
# 将掩模转换为 RGBA 格式
rgba_image = cv2.cvtColor(mask_inverted, cv2.COLOR_BGR2BGRA)
rgba_image[rgba_image[:, :, 0] == 0] = [0, 0, 0, 0]
# 将图像数据保存到内存中的 BytesIO 对象中
image_bytes = io.BytesIO()
image_bytes.write(cv2.imencode('.png', rgba_image)[1].tobytes())
image_bytes.seek(0)
try:
key = f"mask/mask_{object_name}.png"
mask_url = f"{AIDA_CLOTHING}/{key}"
s3.put_object(Bucket=AIDA_CLOTHING, Key=key, Body=image_bytes, ContentType='image/png')
except Exception as e:
print(f'上传到 S3 失败: {e}')
with io.BytesIO() as output:
front_image.save(output, format='PNG')
data = output.getvalue()
# 创建一个 S3 客户端
try:
key = f"image/image_{object_name}.png"
image_url = f"{AIDA_CLOTHING}/{key}"
s3.put_object(Bucket=AIDA_CLOTHING, Key=key, Body=data, ContentType='image/png')
return front_image, image_url, mask_url
except Exception as e:
print(f'上传到 S3 失败: {e}')
@RunTime
def upload_layer_image(image, object_name):
with io.BytesIO() as output:
image.save(output, format='PNG')
data = output.getvalue()
# 创建一个 S3 客户端
try:
key = f"image/image_{object_name}.png"
image_url = f"{AIDA_CLOTHING}/{key}"
s3.put_object(Bucket=AIDA_CLOTHING, Key=key, Body=data, ContentType='image/png')
return image_url
except Exception as e:
print(f'上传到 S3 失败: {e}')
@RunTime
def upload_mask_image(mask, object_name):
# 反转掩模
mask_inverted = cv2.bitwise_not(mask)
# 将掩模转换为 RGBA 格式
rgba_image = cv2.cvtColor(mask_inverted, cv2.COLOR_BGR2BGRA)
rgba_image[rgba_image[:, :, 0] == 0] = [0, 0, 0, 0]
# 将图像数据保存到内存中的 BytesIO 对象中
image_bytes = io.BytesIO()
image_bytes.write(cv2.imencode('.png', rgba_image)[1].tobytes())
image_bytes.seek(0)
try:
key = f"mask/mask_{object_name}.png"
mask_url = f"{AIDA_CLOTHING}/{key}"
s3.put_object(Bucket=AIDA_CLOTHING, Key=key, Body=image_bytes, ContentType='image/png')
return mask_url
except Exception as e:
print(f'上传到 S3 失败: {e}')
"""minio 上传"""
# @RunTime
# def upload_png_mask(front_image, object_name, mask=None):
# start_time = time.time()
# try:
# mask_url = None
# if mask is not None:
# mask_inverted = cv2.bitwise_not(mask)
# # 将掩模的3通道转换为4通道白色部分不透明黑色部分透明
# rgba_image = cv2.cvtColor(mask_inverted, cv2.COLOR_BGR2BGRA)
# rgba_image[rgba_image[:, :, 0] == 0] = [0, 0, 0, 0]
# image_bytes = io.BytesIO()
# image_bytes.write(cv2.imencode('.png', rgba_image)[1].tobytes())
#
# image_bytes.seek(0)
# mask_url = f"{AIDA_CLOTHING}/{minio_client.put_object('aida-clothing', f'mask/mask_{object_name}.png', image_bytes, len(image_bytes.getvalue()), content_type='image/png').object_name}"
#
# image_data = io.BytesIO()
# front_image.save(image_data, format='PNG')
# image_data.seek(0)
# image_bytes = image_data.read()
# image_url = f"{AIDA_CLOTHING}/{minio_client.put_object('aida-clothing', f'image/image_{object_name}.png', io.BytesIO(image_bytes), len(image_bytes), content_type='image/png').object_name}"
# # print(f"upload_png_mask {object_name} = {time.time() - start_time}")
# return front_image, image_url, mask_url
# except Exception as e:
# logging.warning(f"upload_png_mask runtime exception : {e}")
#
#
# @RunTime
# def upload_layer_image(image, object_name):
# try:
# image_data = io.BytesIO()
# image.save(image_data, format='PNG')
# image_data.seek(0)
# image_bytes = image_data.read()
# image_url = f"{AIDA_CLOTHING}/{minio_client.put_object('aida-clothing', f'image/image_{object_name}.png', io.BytesIO(image_bytes), len(image_bytes), content_type='image/png').object_name}"
# return image_url
# except Exception as e:
# logging.warning(f"upload_png_mask runtime exception : {e}")
#
#
# @RunTime
# def upload_mask_image(mask, object_name):
# try:
# mask_inverted = cv2.bitwise_not(mask)
# # 将掩模的3通道转换为4通道白色部分不透明黑色部分透明
# rgba_image = cv2.cvtColor(mask_inverted, cv2.COLOR_BGR2BGRA)
# rgba_image[rgba_image[:, :, 0] == 0] = [0, 0, 0, 0]
# image_bytes = io.BytesIO()
# image_bytes.write(cv2.imencode('.png', rgba_image)[1].tobytes())
#
# image_bytes.seek(0)
# mask_url = f"{AIDA_CLOTHING}/{minio_client.put_object('aida-clothing', f'mask/mask_{object_name}.png', image_bytes, len(image_bytes.getvalue()), content_type='image/png').object_name}"
# return mask_url
# except Exception as e:
# logging.warning(f"upload_png_mask runtime exception : {e}")