feat : 代码梳理 移除所有敏感密钥 通过环境变量方式配置
All checks were successful
git commit AiDA python develop 分支构建部署 / scheduled_deploy (push) Has been skipped

This commit is contained in:
zcr
2025-12-30 16:49:08 +08:00
parent 1be716e414
commit 18024a2d70
167 changed files with 5283 additions and 10464 deletions

View File

@@ -7,7 +7,7 @@ import numpy as np
import torch
import tritonclient.http as httpclient
from app.core.config import *
from app.core.config import settings, DESIGN_MODEL_URL, DESIGN_MODEL_NAME
from app.service.generate_image.utils.upload_sd_image import upload_stain_png_sd, upload_face_png_sd
logger = logging.getLogger()
@@ -65,40 +65,40 @@ def get_contours(image):
# transformed_img = image.astype(np.float32)
# # 输入集
# inputs = [
# httpclient.InferInput(SEGMENTATION['input'], transformed_img.shape, datatype="FP32")
# httpclient.InferInput(DESIGN_MODEL_NAME, transformed_img.shape, datatype="FP32")
# ]
# inputs[0].set_data_from_numpy(transformed_img, binary_data=True)
# # 输出集
# outputs = [
# httpclient.InferRequestedOutput(SEGMENTATION['output'], binary_data=True),
# httpclient.InferRequestedOutput("seg_input__0", binary_data=True),
# ]
# results = client.infer(model_name=SEGMENTATION['name'], inputs=inputs, outputs=outputs)
# # 推理
# # 取结果
# inference_output1 = torch.from_numpy(results.as_numpy(SEGMENTATION['output']))
# inference_output1 = torch.from_numpy(results.as_numpy("seg_input__0"))
# seg_result = seg_postprocess(inference_output1, ori_shape)
# return seg_result
def seg_infer_image(image_obj):
image, ori_shape = seg_preprocess(image_obj)
client = httpclient.InferenceServerClient(url=f"{DESIGN_MODEL_URL}")
client = httpclient.InferenceServerClient(url=DESIGN_MODEL_URL)
transformed_img = image.astype(np.float32)
# 输入集
inputs = [
httpclient.InferInput(SEGMENTATION['input'], transformed_img.shape, datatype="FP32")
httpclient.InferInput("seg_input__0", transformed_img.shape, datatype="FP32")
]
inputs[0].set_data_from_numpy(transformed_img, binary_data=True)
# 输出集
outputs = [
httpclient.InferRequestedOutput(SEGMENTATION['output'], binary_data=True),
httpclient.InferRequestedOutput("seg_output__0", binary_data=True),
]
start_time = time.time()
results = client.infer(model_name=SEGMENTATION['new_model_name'], inputs=inputs, outputs=outputs)
results = client.infer(model_name=DESIGN_MODEL_NAME, inputs=inputs, outputs=outputs)
print(f"KNet infer time is :{time.time() - start_time}")
# 推理
# 取结果
inference_output1 = results.as_numpy(SEGMENTATION['output'])
seg_result = seg_postprocess(inference_output1, ori_shape)
inference_output1 = results.as_numpy("seg_output__0")
seg_result = seg_postprocess(inference_output1)
return seg_result
@@ -110,7 +110,7 @@ def seg_infer_image(image_obj):
# return seg_pred
# KNet
def seg_postprocess(output, ori_shape):
def seg_postprocess(output):
# seg_logit = F.interpolate(torch.tensor(output).float(), size=ori_shape, scale_factor=None, mode='bilinear', align_corners=False)
# seg_logit = F.softmax(seg_logit, dim=1)
# seg_pred = seg_logit.argmax(dim=1)
@@ -201,7 +201,7 @@ def stain_detection(image, user_id, category, tasks_id, spot_size=100):
# 如果有连续的纯白区域存在
if filtered_contours:
# 将纯白区域替换为灰色
if DEBUG:
if settings.DEBUG:
for cnt in filtered_contours:
x, y, w, h = cv2.boundingRect(cnt)
# 在原始图像上进行替换
@@ -216,7 +216,7 @@ def stain_detection(image, user_id, category, tasks_id, spot_size=100):
if is_pure_white:
return False, None
if DEBUG:
if settings.DEBUG:
for corner_coords in [
(0, 0),
# (0, width - spot_size),
@@ -236,7 +236,7 @@ def stain_detection(image, user_id, category, tasks_id, spot_size=100):
]:
cv2.rectangle(dst, corner_coords, (corner_coords[0] + spot_size, corner_coords[1] + spot_size), (0, 0, 255), 2)
cv2.rectangle(dst, (center_x - spot_size // 2, center_y - spot_size // 2), (center_x + spot_size // 2, center_y + spot_size // 2), (0, 255, 0), 2) # 在原始图像上绘制矩形框
image_url = upload_stain_png_sd(dst, user_id=user_id, category=f"{category}", object_name=f"{tasks_id}.png")
upload_stain_png_sd(dst, user_id=user_id, category=f"{category}", object_name=f"{tasks_id}.png")
return True, image
@@ -262,10 +262,10 @@ def generate_category_recognition(image, gender):
scores = inference_output.detach().numpy()
import pandas as pd
attr_type = pd.read_csv(CATEGORY_PATH)
attr_type = pd.read_csv(settings.CATEGORY_PATH)
colattr = list(attr_type['labelName'])
task = attr_type['taskName'][0]
# attr_type['taskName'][0]
maxsc = np.max(scores[0][:5])
indexs = np.argwhere(scores == maxsc)[:, 1]
@@ -321,12 +321,13 @@ def face_detect_pic(image, user_id, category, tasks_id):
# cv2.imshow("gray", gray)
# 2、训练一组人脸
FACE_CLASSIFIER = ""
face_detector = cv2.CascadeClassifier(FACE_CLASSIFIER)
# 3、检测人脸用灰度图检测返回人脸矩形坐标(4个角)
faces_rect = face_detector.detectMultiScale(gray, 1.05, 3)
if DEBUG:
if settings.DEBUG:
dst = image.copy()
for x, y, w, h in faces_rect:
cv2.rectangle(dst, (x, y), (x + w, y + h), (0, 0, 255), 3) # 画出矩形框
@@ -336,7 +337,7 @@ def face_detect_pic(image, user_id, category, tasks_id):
dst = image.copy()
for x, y, w, h in faces_rect:
cv2.rectangle(dst, (x, y), (x + w, y + h), (0, 0, 255), 3) # 画出矩形框
image_url = upload_face_png_sd(dst, user_id=user_id, category=f"{category}", object_name=f"{tasks_id}.png")
upload_face_png_sd(dst, user_id=user_id, category=f"{category}", object_name=f"{tasks_id}.png")
return len(faces_rect)