135 lines
5.7 KiB
Python
135 lines
5.7 KiB
Python
import concurrent.futures
|
|
|
|
from app.core.config import PRIORITY_DICT
|
|
from app.service.design.core.layer import Layer
|
|
from app.service.design.items import build_item
|
|
from app.service.design.utils.redis_utils import Redis
|
|
from app.service.design.utils.synthesis_item import synthesis, synthesis_single
|
|
from app.service.utils.decorator import RunTime
|
|
|
|
|
|
def process_item(item, layers):
|
|
# logging.info("process running.........")
|
|
item.process()
|
|
item.organize(layers)
|
|
if item.result['name'] == "mannequin":
|
|
return item.result['body_image'].size
|
|
|
|
|
|
def update_progress(process_id, total):
|
|
r = Redis()
|
|
progress = r.read(key=process_id)
|
|
if progress and total != 1:
|
|
if int(progress) <= 100:
|
|
r.write(key=process_id, value=int(progress) + int(100 / total))
|
|
else:
|
|
r.write(key=process_id, value=100)
|
|
return progress
|
|
elif total == 1:
|
|
r.write(key=process_id, value=100)
|
|
return progress
|
|
else:
|
|
r.write(key=process_id, value=int(100 / total))
|
|
return progress
|
|
|
|
|
|
def final_progress(process_id):
|
|
r = Redis()
|
|
progress = r.read(key=process_id)
|
|
r.write(key=process_id, value=100)
|
|
return progress
|
|
|
|
|
|
@RunTime
|
|
def generate(request_data):
|
|
return_response = {}
|
|
request_data = request_data.dict()
|
|
assert "process_id" in request_data.keys(), "Need process_id parameters"
|
|
|
|
objects = request_data['objects']
|
|
# insert_keypoint_cache(objects)
|
|
process_id = request_data['process_id']
|
|
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
# 提交每个对象的处理任务
|
|
futures = {executor.submit(process_object, cfg, process_id, len(objects)): obj for obj, cfg in enumerate(objects)}
|
|
# 获取处理结果
|
|
for future in concurrent.futures.as_completed(futures):
|
|
obj = futures[future]
|
|
|
|
result = future.result()
|
|
return_response[obj] = result
|
|
final_progress(process_id)
|
|
return return_response
|
|
|
|
|
|
def process_object(cfg, process_id, total):
|
|
basic_info = cfg.get('basic')
|
|
items_response = {
|
|
'layers': []
|
|
}
|
|
if cfg.get('basic')['single_overall'] == 'overall':
|
|
basic_info['debug'] = False
|
|
items = [build_item(x, default_args=basic_info) for x in cfg.get('items')]
|
|
layers = Layer()
|
|
body_size = None
|
|
futures = []
|
|
for item in items:
|
|
futures = [process_item(item, layers)]
|
|
for future in futures:
|
|
if future is not None:
|
|
body_size = future
|
|
# 是否自定义排序
|
|
if basic_info.get('layer_order', False):
|
|
layers = sorted(layers.layer, key=lambda s: s.get("priority", float('inf')))
|
|
else:
|
|
layers = sorted(layers.layer, key=lambda x: PRIORITY_DICT.get(x['name'], float('inf')))
|
|
# 合成
|
|
items_response['synthesis_url'] = synthesis(layers, body_size)
|
|
|
|
for lay in layers:
|
|
items_response['layers'].append({
|
|
'image_category': lay['name'],
|
|
'position': lay['position'],
|
|
'priority': lay.get("priority", None),
|
|
'resize_scale': lay['resize_scale'] if "resize_scale" in lay.keys() else None,
|
|
'image_size': lay['image'] if lay['image'] is None else lay['image'].size,
|
|
'gradient_string': lay['gradient_string'] if 'gradient_string' in lay.keys() else "",
|
|
'mask_url': lay['mask_url'],
|
|
'image_url': lay['image_url'] if 'image_url' in lay.keys() else None,
|
|
'pattern_image_url': lay['pattern_image_url'] if 'pattern_image_url' in lay.keys() else None,
|
|
|
|
# 'image': lay['image'],
|
|
# 'mask_image': lay['mask_image'],
|
|
})
|
|
elif cfg.get('basic')['single_overall'] == 'single':
|
|
assert cfg.get('basic')['switch_category'] in [x['type'] for x in cfg.get('items')], "Lack of switch_category parameters "
|
|
basic_info['debug'] = False
|
|
for item in cfg.get('items'):
|
|
if item['type'] == cfg.get('basic')['switch_category']:
|
|
item = build_item(item, default_args=cfg.get('basic'))
|
|
item.process()
|
|
items_response['layers'].append({
|
|
'image_category': f"{item.result['name']}_front",
|
|
'image_size': item.result['back_image'].size if item.result['back_image'] else None,
|
|
'position': None,
|
|
'priority': 0,
|
|
'image_url': item.result['front_image_url'],
|
|
'mask_url': item.result['front_mask_url'],
|
|
"gradient_string": item.result['gradient_string'] if 'gradient_string' in item.result.keys() else "",
|
|
'pattern_image_url': item.result['pattern_image_url'] if 'pattern_image_url' in item.result.keys() else None,
|
|
})
|
|
items_response['layers'].append({
|
|
'image_category': f"{item.result['name']}_back",
|
|
'image_size': item.result['front_image'].size if item.result['front_image'] else None,
|
|
'position': None,
|
|
'priority': 0,
|
|
'image_url': item.result['back_image_url'],
|
|
'mask_url': item.result['back_mask_url'],
|
|
"gradient_string": item.result['gradient_string'] if 'gradient_string' in item.result.keys() else "",
|
|
'pattern_image_url': item.result['pattern_image_url'] if 'pattern_image_url' in item.result.keys() else None,
|
|
})
|
|
items_response['synthesis_url'] = synthesis_single(item.result['front_image'], item.result['back_image'])
|
|
break
|
|
update_progress(process_id, total)
|
|
return items_response
|