Merge remote-tracking branch 'origin/develop' into develop
This commit is contained in:
@@ -53,7 +53,7 @@ class Segmentation(object):
|
||||
file_path = f"{SEG_CACHE_PATH}{image_id}.npy"
|
||||
try:
|
||||
np.save(file_path, seg_result)
|
||||
logger.info(f"保存成功 :{os.path.abspath(file_path)}")
|
||||
logger.debug(f"保存成功 :{os.path.abspath(file_path)}")
|
||||
except Exception as e:
|
||||
logger.error(f"保存失败: {e}")
|
||||
|
||||
@@ -64,7 +64,7 @@ class Segmentation(object):
|
||||
seg_result = np.load(file_path)
|
||||
return True, seg_result
|
||||
except FileNotFoundError:
|
||||
logger.warning("文件不存在")
|
||||
# logger.warning("文件不存在")
|
||||
return False, None
|
||||
except Exception as e:
|
||||
logger.error(f"加载失败: {e}")
|
||||
|
||||
@@ -51,19 +51,19 @@ class Segmentation:
|
||||
file_path = f"seg_cache/{image_id}.npy"
|
||||
try:
|
||||
np.save(file_path, seg_result)
|
||||
logger.info(f"保存成功 :{os.path.abspath(file_path)}")
|
||||
logger.debug(f"保存成功 :{os.path.abspath(file_path)}")
|
||||
except Exception as e:
|
||||
logger.error(f"保存失败: {e}")
|
||||
|
||||
@staticmethod
|
||||
def load_seg_result(image_id):
|
||||
file_path = f"seg_cache/{image_id}.npy"
|
||||
logger.info(f"load seg file name is :{SEG_CACHE_PATH}{image_id}.npy")
|
||||
# logger.info(f"load seg file name is :{SEG_CACHE_PATH}{image_id}.npy")
|
||||
try:
|
||||
seg_result = np.load(file_path)
|
||||
return True, seg_result
|
||||
except FileNotFoundError:
|
||||
logger.warning("文件不存在")
|
||||
# logger.warning("文件不存在")
|
||||
return False, None
|
||||
except Exception as e:
|
||||
logger.error(f"加载失败: {e}")
|
||||
|
||||
@@ -207,7 +207,7 @@ def design_generate_v2(request_data):
|
||||
'Connection': "keep-alive",
|
||||
'Content-Type': "application/json"
|
||||
}
|
||||
logger.info(items_response)
|
||||
# logger.info(items_response)
|
||||
response = post_request(url, json_data=items_response, headers=headers)
|
||||
if response:
|
||||
# 打印结果
|
||||
|
||||
@@ -36,11 +36,11 @@ class Segmentation:
|
||||
# preview 过模型 不缓存
|
||||
if "preview_submit" in result.keys() and result['preview_submit'] == "preview":
|
||||
# 推理获得seg 结果
|
||||
seg_result = get_seg_result(result["image_id"], result['image'])[0]
|
||||
seg_result = get_seg_result(result["image_id"], result['image'])
|
||||
# submit 过模型 缓存
|
||||
elif "preview_submit" in result.keys() and result['preview_submit'] == "submit":
|
||||
# 推理获得seg 结果
|
||||
seg_result = get_seg_result(result["image_id"], result['image'])[0]
|
||||
seg_result = get_seg_result(result["image_id"], result['image'])
|
||||
self.save_seg_result(seg_result, result['image_id'])
|
||||
# null 正常流程 加载本地缓存 无缓存则过模型
|
||||
else:
|
||||
@@ -49,14 +49,14 @@ class Segmentation:
|
||||
# 判断缓存和实际图片size是否相同
|
||||
if not _ or result["image"].shape[:2] != seg_result.shape:
|
||||
# 推理获得seg 结果
|
||||
seg_result = get_seg_result(result["image_id"], result['image'])[0]
|
||||
seg_result = get_seg_result(result["image_id"], result['image'])
|
||||
self.save_seg_result(seg_result, result['image_id'])
|
||||
result['seg_result'] = seg_result
|
||||
|
||||
# 处理前片后片
|
||||
temp_front = seg_result == 1.0
|
||||
temp_front = seg_result == 1
|
||||
result['front_mask'] = (255 * (temp_front + 0).astype(np.uint8))
|
||||
temp_back = seg_result == 2.0
|
||||
temp_back = seg_result == 2
|
||||
result['back_mask'] = (255 * (temp_back + 0).astype(np.uint8))
|
||||
result['mask'] = result['front_mask'] + result['back_mask']
|
||||
return result
|
||||
@@ -66,19 +66,19 @@ class Segmentation:
|
||||
file_path = f"{SEG_CACHE_PATH}{image_id}.npy"
|
||||
try:
|
||||
np.save(file_path, seg_result)
|
||||
logger.info(f"保存成功 :{os.path.abspath(file_path)}")
|
||||
logger.debug(f"保存成功 :{os.path.abspath(file_path)}")
|
||||
except Exception as e:
|
||||
logger.error(f"保存失败: {e}")
|
||||
|
||||
@staticmethod
|
||||
def load_seg_result(image_id):
|
||||
file_path = f"{SEG_CACHE_PATH}{image_id}.npy"
|
||||
logger.info(f"load seg file name is :{SEG_CACHE_PATH}{image_id}.npy")
|
||||
# logger.info(f"load seg file name is :{SEG_CACHE_PATH}{image_id}.npy")
|
||||
try:
|
||||
seg_result = np.load(file_path)
|
||||
return True, seg_result
|
||||
except FileNotFoundError:
|
||||
logger.warning("文件不存在")
|
||||
# logger.warning("文件不存在")
|
||||
return False, None
|
||||
except Exception as e:
|
||||
logger.error(f"加载失败: {e}")
|
||||
|
||||
@@ -13,7 +13,6 @@ import cv2
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
import tritonclient.http as httpclient
|
||||
|
||||
from app.core.config import *
|
||||
@@ -85,7 +84,10 @@ def seg_preprocess(img_path):
|
||||
if ori_shape != (img_scale_w, img_scale_h):
|
||||
# mmcv.imresize(img, img_scale_h, img_scale_w) # 老代码 引以为戒!哈哈哈~ h和w写反了
|
||||
img = cv2.resize(img, (img_scale_h, img_scale_w))
|
||||
# img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
|
||||
# 扩充25的白边
|
||||
img = cv2.copyMakeBorder(img, 25, 25, 25, 25, cv2.BORDER_CONSTANT, value=[255, 255, 255])
|
||||
img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
|
||||
return preprocessed_img, ori_shape
|
||||
|
||||
@@ -114,9 +116,9 @@ def get_seg_result(image_id, image):
|
||||
|
||||
# no cache
|
||||
def seg_postprocess(image_id, output, ori_shape):
|
||||
seg_logit = F.interpolate(torch.tensor(output).float(), size=ori_shape, scale_factor=None, mode='bilinear', align_corners=False)
|
||||
seg_pred = seg_logit.cpu().numpy()
|
||||
return seg_pred[0]
|
||||
seg_logit = cv2.resize(output[0][0].astype(np.uint8), (ori_shape[1] + 50, ori_shape[0] + 50))
|
||||
seg_logit = seg_logit[25: - 25, 25: - 25]
|
||||
return seg_logit
|
||||
|
||||
|
||||
def key_point_show(image_path, key_point_result=None):
|
||||
|
||||
@@ -266,7 +266,7 @@ class DesignPreprocessing:
|
||||
seg_result = np.load(file_path)
|
||||
return True, seg_result
|
||||
except FileNotFoundError:
|
||||
logging.info("文件不存在")
|
||||
# logging.info("文件不存在")
|
||||
return False, None
|
||||
except Exception as e:
|
||||
logging.warning(f"加载失败: {e}")
|
||||
@@ -277,7 +277,7 @@ class DesignPreprocessing:
|
||||
file_path = f"{SEG_CACHE_PATH}{image_id}.npy"
|
||||
try:
|
||||
np.save(file_path, seg_result)
|
||||
logging.info(f"保存成功,{os.path.abspath(file_path)}")
|
||||
logging.debug(f"保存成功,{os.path.abspath(file_path)}")
|
||||
except Exception as e:
|
||||
logging.warning(f"保存失败: {e}")
|
||||
|
||||
|
||||
126
app/service/generate_image/service_generate_multi_view.py
Normal file
126
app/service/generate_image/service_generate_multi_view.py
Normal file
@@ -0,0 +1,126 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: UTF-8 -*-
|
||||
"""
|
||||
@Project :trinity_client
|
||||
@File :service_att_recognition.py
|
||||
@Author :周成融
|
||||
@Date :2023/7/26 12:01:05
|
||||
@detail :
|
||||
"""
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
import redis
|
||||
import tritonclient.grpc as grpcclient
|
||||
|
||||
from app.core.config import *
|
||||
from app.schemas.generate_image import GenerateMultiViewModel
|
||||
from app.service.generate_image.utils.upload_sd_image import upload_png_sd
|
||||
from app.service.utils.oss_client import oss_get_image
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
||||
|
||||
class GenerateMultiView:
|
||||
def __init__(self, request_data):
|
||||
if DEBUG is False:
|
||||
self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS))
|
||||
self.channel = self.connection.channel()
|
||||
# self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS))
|
||||
# self.channel = self.connection.channel()
|
||||
# self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE)
|
||||
self.grpc_client = grpcclient.InferenceServerClient(url=GMV_MODEL_URL)
|
||||
|
||||
self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
|
||||
self.image = self.get_image(request_data.image_url)
|
||||
self.tasks_id = request_data.tasks_id
|
||||
self.user_id = self.tasks_id[self.tasks_id.rfind('-') + 1:]
|
||||
self.generate_data = {'tasks_id': self.tasks_id, 'status': 'PENDING', 'message': "pending", 'image_url': ''}
|
||||
self.redis_client.set(self.tasks_id, json.dumps(self.generate_data))
|
||||
self.redis_client.expire(self.tasks_id, 600)
|
||||
|
||||
def get_image(self, image_url):
|
||||
try:
|
||||
image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL")
|
||||
return image
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
|
||||
def callback(self, result, error):
|
||||
if error:
|
||||
self.generate_data['status'] = "FAILURE"
|
||||
self.generate_data['message'] = str(error)
|
||||
# self.generate_data['data'] = str(error)
|
||||
self.redis_client.set(self.tasks_id, json.dumps(self.generate_data))
|
||||
else:
|
||||
# pil图像转成numpy数组
|
||||
images = result.as_numpy("generated_image")
|
||||
# for id, img in enumerate(images):
|
||||
# cv2.imwrite(f"{id}.png", img)
|
||||
# image_url = ""
|
||||
image_url = upload_png_sd(images[6], user_id=self.user_id, category="multi_view", file_name=f"{self.tasks_id}.png")
|
||||
# logger.info(f"upload image SUCCESS : {image_url}")
|
||||
self.generate_data['status'] = "SUCCESS"
|
||||
self.generate_data['message'] = "success"
|
||||
self.generate_data['image_url'] = str(image_url)
|
||||
self.redis_client.set(self.tasks_id, json.dumps(self.generate_data))
|
||||
|
||||
def read_tasks_status(self):
|
||||
status_data = self.redis_client.get(self.tasks_id)
|
||||
return json.loads(status_data), status_data
|
||||
|
||||
def get_result(self):
|
||||
try:
|
||||
images = [np.array(self.image).astype(np.uint8)] * 1
|
||||
|
||||
image_obj = np.array(images, dtype=np.uint8)
|
||||
|
||||
input_image = grpcclient.InferInput("input_image", image_obj.shape, "UINT8")
|
||||
|
||||
input_image.set_data_from_numpy(image_obj)
|
||||
|
||||
inputs = [input_image]
|
||||
ctx = self.grpc_client.async_infer(model_name=GMV_MODEL_NAME, inputs=inputs, callback=self.callback)
|
||||
|
||||
time_out = 600
|
||||
generate_data = None
|
||||
while time_out > 0:
|
||||
generate_data, _ = self.read_tasks_status()
|
||||
if generate_data['status'] in ["REVOKED", "FAILURE"]:
|
||||
ctx.cancel()
|
||||
break
|
||||
elif generate_data['status'] == "SUCCESS":
|
||||
break
|
||||
time_out -= 1
|
||||
time.sleep(0.1)
|
||||
return generate_data
|
||||
except Exception as e:
|
||||
self.generate_data['status'] = "FAILURE"
|
||||
self.generate_data['message'] = str(e)
|
||||
self.redis_client.set(self.tasks_id, json.dumps(self.generate_data))
|
||||
raise Exception(str(e))
|
||||
finally:
|
||||
dict_generate_data, str_generate_data = self.read_tasks_status()
|
||||
if DEBUG is False:
|
||||
self.channel.basic_publish(exchange='', routing_key=GMV_RABBITMQ_QUEUES, body=str_generate_data)
|
||||
# self.channel.basic_publish(exchange='', routing_key=GI_RABBITMQ_QUEUES, body=str_generate_data)
|
||||
logger.info(f" [x] Sent {json.dumps(dict_generate_data, indent=4)}")
|
||||
|
||||
|
||||
def infer_cancel(tasks_id):
|
||||
redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
|
||||
data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'}
|
||||
generate_data = json.dumps(data)
|
||||
redis_client.set(tasks_id, generate_data)
|
||||
return data
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
rd = GenerateMultiViewModel(
|
||||
tasks_id="123-89",
|
||||
image_url="aida-sys-image/images/female/outwear/0628000123.jpg",
|
||||
)
|
||||
server = GenerateMultiView(rd)
|
||||
print(server.get_result())
|
||||
@@ -7,9 +7,9 @@ def RunTime(func):
|
||||
t1 = time.time()
|
||||
res = func(*args, **kwargs)
|
||||
t2 = time.time()
|
||||
# if t2 - t1 > 0.05:
|
||||
# logging.info(f"function:【{func.__name__}】,runtime:【{str(t2 - t1)}】s")
|
||||
logging.info(f"function:【{func.__name__}】,runtime:【{str(t2 - t1)}】s")
|
||||
if t2 - t1 > 0.05:
|
||||
logging.info(f"function:【{func.__name__}】,runtime:【{str(t2 - t1)}】s")
|
||||
# logging.info(f"function:【{func.__name__}】,runtime:【{str(t2 - t1)}】s")
|
||||
return res
|
||||
|
||||
return wrapper
|
||||
@@ -22,7 +22,8 @@ def ClassCallRunTime(func):
|
||||
end_time = time.time()
|
||||
execution_time = end_time - start_time
|
||||
class_name = args[0].__class__.__name__ # 获取类名
|
||||
print(f"class name: {class_name} , run time is : {execution_time} s")
|
||||
if execution_time > 0.05:
|
||||
logging.info(f"class name: {class_name} , run time is : {execution_time} s")
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
|
||||
@@ -82,7 +82,7 @@ if __name__ == '__main__':
|
||||
# url = "aida-users/89/sketchboard/female/Dress/e6724ab7-8d3f-4677-abe0-c3e42ab7af85.jpeg"
|
||||
# url = "aida-users/87/print/956614a2-7e75-4fbe-9ed0-c1831e37a2c9-4-87.png"
|
||||
# url = "aida-users/89/single_logo/123-89.png"
|
||||
url = "aida-users/89/test/123-89.png"
|
||||
url = "aida-users/89/123-89.png"
|
||||
|
||||
# url = "aida-collection-element/12148/Sketchboard/95ea577b-305b-4a62-b30a-39c0dd3ddb3f.png"
|
||||
read_type = "2"
|
||||
|
||||
Reference in New Issue
Block a user