feat : 代码梳理 移除所有敏感密钥 通过环境变量方式配置
All checks were successful
git commit AiDA python develop 分支构建部署 / scheduled_deploy (push) Has been skipped

This commit is contained in:
zcr
2025-12-30 16:49:08 +08:00
parent 1be716e414
commit 18024a2d70
167 changed files with 5283 additions and 10464 deletions

View File

@@ -1,19 +1,22 @@
import logging
import os
import time
import cv2
import numpy as np
import torch
import tritonclient.grpc as grpcclient
from minio import Minio
from pymilvus import MilvusClient
from urllib3.exceptions import ResponseError
from app.core.config import *
from app.core.config import settings, SR_MODEL_NAME, SR_TRITON_URL, MILVUS_TABLE_KEYPOINT, KEYPOINT_RESULT_TABLE_FIELD_SET
from app.schemas.pre_processing import DesignPreProcessingModel
from app.service.design_fast.utils.design_ensemble import get_seg_result, get_keypoint_result
from app.service.utils.oss_client import oss_get_image, oss_upload_image
from app.service.utils.new_oss_client import oss_get_image, oss_upload_image
logger = logging.getLogger()
minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE)
class DesignPreprocessing:
@@ -46,11 +49,12 @@ class DesignPreprocessing:
del d['keypoint_result']
return result
def read_image(self, image_list):
@staticmethod
def read_image(image_list):
for obj in image_list:
# file = self.minio_client.get_object(obj['image_url'].split("/", 1)[0], obj['image_url'].split("/", 1)[1]).data
# image = cv2.imdecode(np.frombuffer(file, np.uint8), 1)
image = oss_get_image(bucket=obj['image_url'].split("/", 1)[0], object_name=obj['image_url'].split("/", 1)[1], data_type="cv2")
image = oss_get_image(oss_client=minio_client, bucket=obj['image_url'].split("/", 1)[0], object_name=obj['image_url'].split("/", 1)[1], data_type="cv2")
if len(image.shape) == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
elif image.shape[2] == 4: # 如果是四通道 mask
@@ -59,7 +63,8 @@ class DesignPreprocessing:
return image_list
# @ RunTime
def bounding_box(self, image_list):
@staticmethod
def bounding_box(image_list):
for item in image_list:
image = item['image_obj']
height, width = image.shape[:2]
@@ -77,11 +82,6 @@ class DesignPreprocessing:
x_max = max(x_max, x + w)
y_max = max(y_max, y + h)
if IF_DEBUG_SHOW:
image_with_big_rect = cv2.rectangle(image.copy(), (x_min, y_min), (x_max, y_max), (0, 255, 0), 2)
cv2.imshow("bounding_box image", image_with_big_rect)
cv2.waitKey(0)
# 根据大矩形的坐标来裁剪原始图像
if len(contours) > 0:
cropped_image = image[y_min:y_max, x_min:x_max]
@@ -107,7 +107,8 @@ class DesignPreprocessing:
item['obj'] = padded_image
return image_list
def super_resolution(self, image_list):
@staticmethod
def super_resolution(image_list):
for item in image_list:
# 判断 两边是否同时都小于512 因为此处做四倍超分
if item['obj'].shape[0] <= 512 and item['obj'].shape[1] <= 512:
@@ -136,7 +137,7 @@ class DesignPreprocessing:
# self.minio_client.put_object(item['image_url'].split("/", 1)[0], item['image_url'].split("/", 1)[1], io.BytesIO(image_bytes), len(image_bytes), content_type="image/jpeg", )
bucket_name = item['image_url'].split("/", 1)[0]
object_name = item['image_url'].split("/", 1)[1]
oss_upload_image(bucket=bucket_name, object_name=object_name, image_bytes=image_bytes)
oss_upload_image(oss_client=minio_client, bucket=bucket_name, object_name=object_name, image_bytes=image_bytes)
logging.info(f"Object '{item['image_url'].split('/', 1)[1]}' overwritten successfully.")
except ResponseError as err:
logging.warning(f"Error: {err}")
@@ -144,7 +145,6 @@ class DesignPreprocessing:
# @ RunTime
def infer_image(self, image_list):
seg_result = None
for sketch in image_list:
# 小写
image_category = sketch['image_category'].lower()
@@ -156,36 +156,17 @@ class DesignPreprocessing:
_, seg_cache = self.load_seg_result(sketch['image_id'])
if not _:
# 推理获得seg 结果
seg_result = get_seg_result(sketch["image_id"], sketch['obj'])[0]
seg_result = get_seg_result(sketch['obj'])[0]
self.save_seg_result(seg_result, sketch['image_id'])
logger.info(f"{sketch['image_id']} image size is :{sketch['obj'].shape} , seg cache size is :{seg_result.shape}")
else:
logger.info(f"{sketch['image_id']} image size is :{sketch['obj'].shape} , seg cache size is :{seg_cache.shape}")
if IF_DEBUG_SHOW:
debug_show_image = sketch['obj'].copy()
points_list = []
point_size = 1
point_color = (0, 0, 255) # BGR
thickness = 4 # 可以为 0 、4、8
for i in sketch['keypoint_result'].values():
points_list.append((int(i[1]), int(i[0])))
for point in points_list:
cv2.circle(debug_show_image, point, point_size, point_color, thickness)
cv2.imshow("seg_result", seg_result)
cv2.imshow("", debug_show_image)
cv2.waitKey(0)
# # 关键点在上部则推理seg
# if sketch["site"] == "up":
# # 判断seg缓存是否存在,是否与当前图片shape一致
# seg_result = self.search_seg_result(sketch["image_id"], sketch["obj"].shape)
# if seg_result is False:
# # 推理seg + 保存
# seg_result = get_seg_result(sketch['image_id'], sketch['obj'])
return image_list
# @ RunTime
def composing_image(self, image_list):
@staticmethod
def composing_image(image_list):
for image in image_list:
''' 比例相同 整合上下装代码'''
image_width = image['obj'].shape[1]
@@ -194,21 +175,18 @@ class DesignPreprocessing:
if waist_width / scale >= image_width:
add_width = int((waist_width / scale - image_width) / 2)
ret = cv2.copyMakeBorder(image['obj'], 0, 0, add_width, add_width, cv2.BORDER_CONSTANT, value=(256, 256, 256))
if IF_DEBUG_SHOW:
cv2.imshow("composing_image", ret)
cv2.waitKey(0)
image_bytes = cv2.imencode(".jpg", ret)[1].tobytes()
# image['show_image_url'] = f"{image['image_url'].split('/', 1)[0]}/{self.minio_client.put_object(image['image_url'].split('/', 1)[0], image['image_url'].split('/', 1)[1].replace('.', '-show.'), io.BytesIO(image_bytes), len(image_bytes), content_type='image/jpeg').object_name}"
bucket_name = image['image_url'].split('/', 1)[0]
object_name = image['image_url'].split('/', 1)[1].replace('.', '-show.')
oss_upload_image(bucket=bucket_name, object_name=object_name, image_bytes=image_bytes)
oss_upload_image(oss_client=minio_client, bucket=bucket_name, object_name=object_name, image_bytes=image_bytes)
image['show_image_url'] = f"{bucket_name}/{object_name}"
else:
image_bytes = cv2.imencode(".jpg", image['obj'])[1].tobytes()
# image['show_image_url'] = f"{image['image_url'].split('/', 1)[0]}/{self.minio_client.put_object(image['image_url'].split('/', 1)[0], image['image_url'].split('/', 1)[1].replace('.', '-show.'), io.BytesIO(image_bytes), len(image_bytes), content_type='image/jpeg').object_name}"
bucket_name = image['image_url'].split('/', 1)[0]
object_name = image['image_url'].split('/', 1)[1].replace('.', '-show.')
oss_upload_image(bucket=bucket_name, object_name=object_name, image_bytes=image_bytes)
oss_upload_image(oss_client=minio_client, bucket=bucket_name, object_name=object_name, image_bytes=image_bytes)
image['show_image_url'] = f"{bucket_name}/{object_name}"
# if image['site'] == 'down':
@@ -261,7 +239,7 @@ class DesignPreprocessing:
@staticmethod
def load_seg_result(image_id):
file_path = f"{SEG_CACHE_PATH}{image_id}.npy"
file_path = f"{settings.SEG_CACHE_PATH}{image_id}.npy"
try:
seg_result = np.load(file_path)
return True, seg_result
@@ -274,7 +252,7 @@ class DesignPreprocessing:
@staticmethod
def save_seg_result(seg_result, image_id):
file_path = f"{SEG_CACHE_PATH}{image_id}.npy"
file_path = f"{settings.SEG_CACHE_PATH}{image_id}.npy"
try:
np.save(file_path, seg_result)
logging.debug(f"保存成功,{os.path.abspath(file_path)}")
@@ -283,7 +261,7 @@ class DesignPreprocessing:
def keypoint_cache(self, sketch):
try:
client = MilvusClient(uri=MILVUS_URL, token=MILVUS_TOKEN, db_name=MILVUS_ALIAS)
client = MilvusClient(uri=settings.MILVUS_URL, token=settings.MILVUS_TOKEN, db_name=settings.MILVUS_ALIAS)
keypoint_id = sketch['image_id']
res = client.query(
collection_name=MILVUS_TABLE_KEYPOINT,
@@ -307,7 +285,8 @@ class DesignPreprocessing:
return False
# @ RunTime
def infer_keypoint_result(self, sketch):
@staticmethod
def infer_keypoint_result(sketch):
keypoint_infer_result = get_keypoint_result(sketch["obj"], sketch['site']) # 推理结果
return keypoint_infer_result
@@ -320,14 +299,14 @@ class DesignPreprocessing:
else:
zeros = np.zeros(4, dtype=int)
result = np.concatenate([keypoint_infer_result.flatten(), zeros])
data = [
[int(sketch['image_id'])],
[sketch['site']],
[result.tolist()]
]
# [
# [int(sketch['image_id'])],
# [sketch['site']],
# [result.tolist()]
# ]
try:
# connections.connect(alias=MILVUS_ALIAS, host=MILVUS_DB_HOST, port=MILVUS_PORT)
start_time = time.time()
time.time()
# collection = Collection(MILVUS_TABLE_KEYPOINT) # Get an existing collection.
# mr = collection.insert(data)
# logging.info(f"save keypoint time : {time.time() - start_time}")
@@ -344,11 +323,11 @@ class DesignPreprocessing:
else:
# 需要的是down 即推理出来的是down 那么查询的就是up
result = np.concatenate([search_result[:20], infer_result.flatten()])
data = [
[int(sketch['image_id'])],
["all"],
[result.tolist()]
]
# [
# [int(sketch['image_id'])],
# ["all"],
# [result.tolist()]
# ]
try:
# connections.connect(alias=MILVUS_ALIAS, host=MILVUS_DB_HOST, port=MILVUS_PORT)
# start_time = time.time()