feat : 代码梳理 移除所有敏感密钥 通过环境变量方式配置
All checks were successful
git commit AiDA python develop 分支构建部署 / scheduled_deploy (push) Has been skipped

This commit is contained in:
zcr
2025-12-30 16:49:08 +08:00
parent 1be716e414
commit 18024a2d70
167 changed files with 5283 additions and 10464 deletions

View File

@@ -8,25 +8,24 @@
@detail
"""
import logging
import time
import uuid
import cv2
import mmcv
import numpy as np
import pandas as pd
import torch
import tritonclient.http as httpclient
import cv2
import numpy as np
import tritonclient.grpc as grpcclient
import tritonclient.http as httpclient
from minio import Minio
from tritonclient.utils import np_to_triton_dtype
from app.core.config import *
from app.core.config import settings, FAST_GI_MODEL_URL, GI_MODEL_URL, DESIGN_MODEL_URL, FAST_GI_MODEL_NAME, GI_MODEL_NAME
from app.service.utils.new_oss_client import oss_upload_image
logger = logging.getLogger()
minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE)
minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE)
class AgentToolGenerateImage:
@@ -85,7 +84,8 @@ class AgentToolGenerateImage:
self.grpc_client.close()
self.triton_client.close()
def preprocess(self, img):
@staticmethod
def preprocess(img):
img = mmcv.imread(img)
img_scale = (224, 224)
img = cv2.resize(img, img_scale)
@@ -126,7 +126,7 @@ class AgentToolGenerateImage:
return category_list
attr_type = pd.read_csv(CATEGORY_PATH)
attr_type = pd.read_csv(settings.CATEGORY_PATH)
if __name__ == '__main__':
request_data = {

View File

@@ -16,16 +16,18 @@ import minio
import numpy as np
import redis
import tritonclient.grpc as grpcclient
from minio import Minio
from tritonclient.utils import np_to_triton_dtype
from app.core.config import *
from app.core.config import settings, FAST_GI_MODEL_URL, GI_MODEL_URL, FAST_GI_MODEL_NAME, GI_MODEL_NAME, GI_RABBITMQ_QUEUES
from app.schemas.generate_image import GenerateImageModel
from app.service.generate_image.utils.image_processing import remove_background, stain_detection, generate_category_recognition, autoLevels, luminance_adjust
from app.service.generate_image.utils.mq import publish_status
from app.service.generate_image.utils.upload_sd_image import upload_png_sd
from app.service.utils.oss_client import oss_get_image
from app.service.utils.new_oss_client import oss_get_image
logger = logging.getLogger()
minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE)
class GenerateImage:
@@ -36,7 +38,7 @@ class GenerateImage:
else:
self.grpc_client = grpcclient.InferenceServerClient(url=GI_MODEL_URL)
self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
self.redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True)
if request_data.mode == "img2img":
# cv2 读图片是BGR PIL读图片是RGB
self.image = self.get_image(request_data.image_url)
@@ -67,8 +69,7 @@ class GenerateImage:
# image_array = np.asarray(bytearray(image_file.read()), dtype=np.uint8)
# image_cv2 = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
# image_rbg = cv2.cvtColor(image_cv2, cv2.COLOR_BGR2RGB)
image_cv2 = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="cv2")
image_cv2 = oss_get_image(oss_client=minio_client, bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="cv2")
image_rbg = cv2.cvtColor(image_cv2, cv2.COLOR_BGR2RGB)
image = cv2.resize(image_rbg, (1024, 1024))
except minio.error.S3Error:
@@ -120,7 +121,7 @@ class GenerateImage:
else: # 有污点 保存图片到本地 测试用
self.generate_data['status'] = "SUCCESS"
self.generate_data['message'] = "success"
self.generate_data['image_url'] = str(GI_SYS_IMAGE_URL)
self.generate_data['image_url'] = "aida-sys-image/generate_image/white_image.jpg"
self.redis_client.set(self.tasks_id, json.dumps(self.generate_data))
# logger.info(f"stain_detection result : {self.generate_data}")
@@ -171,12 +172,12 @@ class GenerateImage:
raise Exception(str(e))
finally:
dict_generate_data, str_generate_data = self.read_tasks_status()
if not DEBUG:
if not settings.DEBUG:
publish_status(str_generate_data, GI_RABBITMQ_QUEUES)
def infer_cancel(tasks_id):
redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True)
data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'}
generate_data = json.dumps(data)
redis_client.set(tasks_id, generate_data)
@@ -186,12 +187,12 @@ def infer_cancel(tasks_id):
if __name__ == '__main__':
rd = GenerateImageModel(
tasks_id="123-89",
prompt="Women's clothing ,dress,technical drawing style, clean line art, no shading, no texture, flat sketch, no human body, no face, centered composition, pure white background, single garmentsingle garment only, front flat view",
image_url="aida-collection-element/87/Printboard/842c09cf-7297-42d9-9e6e-9c17d4a13cb5.jpg",
mode='txt2img',
category="test",
gender="male",
version="high"
prompt="a single item of sketch of dress, 4k, white background",
image_url="aida-collection-element/89/Sketchboard/95f20cdc-e059-435c-b8b1-d04cc9e80c3d.png",
mode='img2img',
category="sketch",
gender="Female",
version="fast"
)
server = GenerateImage(rd)
print(server.get_result())

View File

@@ -15,11 +15,11 @@ import numpy as np
import redis
import tritonclient.grpc as grpcclient
from app.core.config import *
from app.core.config import settings, GMV_MODEL_URL, GMV_MODEL_NAME, GMV_RABBITMQ_QUEUES
from app.schemas.generate_image import GenerateMultiViewModel
from app.service.generate_image.utils.mq import publish_status
from app.service.generate_image.utils.upload_sd_image import upload_png_sd
from app.service.utils.oss_client import oss_get_image
from app.service.utils.new_oss_client import oss_get_image
logger = logging.getLogger()
@@ -27,7 +27,7 @@ logger = logging.getLogger()
class GenerateMultiView:
def __init__(self, request_data):
self.grpc_client = grpcclient.InferenceServerClient(url=GMV_MODEL_URL)
self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
self.redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True)
self.image = self.get_image(request_data.image_url)
self.tasks_id = request_data.tasks_id
self.user_id = self.tasks_id[self.tasks_id.rfind('-') + 1:]
@@ -35,7 +35,8 @@ class GenerateMultiView:
self.redis_client.set(self.tasks_id, json.dumps(self.generate_data))
self.redis_client.expire(self.tasks_id, 600)
def get_image(self, image_url):
@staticmethod
def get_image(image_url):
try:
image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL")
return image
@@ -92,12 +93,12 @@ class GenerateMultiView:
raise Exception(str(e))
finally:
dict_generate_data, str_generate_data = self.read_tasks_status()
if not DEBUG:
if not settings.DEBUG:
publish_status(str_generate_data, GMV_RABBITMQ_QUEUES)
def infer_cancel(tasks_id):
redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True)
data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'}
generate_data = json.dumps(data)
redis_client.set(tasks_id, generate_data)

View File

@@ -35,7 +35,7 @@
# # self.channel = self.connection.channel()
# # self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE)
# self.grpc_client = grpcclient.InferenceServerClient(url=GPI_MODEL_URL)
# self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
# self.redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True)
# self.category = "product_image"
# self.image_strength = request_data.image_strength
# self.batch_size = 1
@@ -126,7 +126,7 @@
#
#
# def infer_cancel(tasks_id):
# redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
# redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True)
# data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'}
# gen_product_data = json.dumps(data)
# redis_client.set(tasks_id, gen_product_data)
@@ -208,21 +208,23 @@ import numpy as np
import redis
import tritonclient.grpc as grpcclient
from PIL import Image
from minio import Minio
from tritonclient.utils import np_to_triton_dtype
from app.core.config import *
from app.core.config import settings, GPI_MODEL_URL, GPI_MODEL_NAME_SINGLE, GPI_MODEL_NAME_OVERALL, GPI_RABBITMQ_QUEUES
from app.schemas.generate_image import GenerateProductImageModel
from app.service.generate_image.utils.mq import publish_status
from app.service.generate_image.utils.upload_sd_image import upload_SDXL_image
from app.service.utils.oss_client import oss_get_image
from app.service.utils.new_oss_client import oss_get_image
logger = logging.getLogger()
minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE)
class GenerateProductImage:
def __init__(self, request_data):
self.grpc_client = grpcclient.InferenceServerClient(url=GPI_MODEL_URL)
self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
self.redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True)
self.category = "product_image"
self.image_strength = request_data.image_strength
self.batch_size = 1
@@ -313,12 +315,12 @@ class GenerateProductImage:
raise Exception(str(e))
finally:
dict_gen_product_data, str_gen_product_data = self.read_tasks_status()
if not DEBUG:
if not settings.DEBUG:
publish_status(str_gen_product_data, GPI_RABBITMQ_QUEUES)
def infer_cancel(tasks_id):
redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True)
data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'}
gen_product_data = json.dumps(data)
redis_client.set(tasks_id, gen_product_data)
@@ -326,7 +328,7 @@ def infer_cancel(tasks_id):
def pre_processing_image(image_url):
image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL")
image = oss_get_image(oss_client=minio_client, bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL")
# 目标图片的尺寸
target_width = 512
target_height = 768

View File

@@ -18,11 +18,11 @@ import tritonclient.grpc as grpcclient
from PIL import Image
from tritonclient.utils import np_to_triton_dtype
from app.core.config import *
from app.core.config import settings, GRI_MODEL_URL, GRI_MODEL_NAME_SINGLE, GRI_MODEL_NAME_OVERALL, GRI_RABBITMQ_QUEUES
from app.schemas.generate_image import GenerateRelightImageModel
from app.service.generate_image.utils.mq import publish_status
from app.service.generate_image.utils.upload_sd_image import upload_SDXL_image
from app.service.utils.oss_client import oss_get_image
from app.service.utils.new_oss_client import oss_get_image
logger = logging.getLogger()
@@ -30,7 +30,7 @@ logger = logging.getLogger()
class GenerateRelightImage:
def __init__(self, request_data):
self.grpc_client = grpcclient.InferenceServerClient(url=GRI_MODEL_URL)
self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
self.redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True)
self.category = "relight_image"
self.batch_size = 1
self.prompt = request_data.prompt
@@ -134,9 +134,10 @@ class GenerateRelightImage:
raise Exception(str(e))
finally:
dict_gen_product_data, str_gen_product_data = self.read_tasks_status()
if not DEBUG:
if not settings.DEBUG:
publish_status(str_gen_product_data, GRI_RABBITMQ_QUEUES)
def pre_processing_image(image_url):
image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL")
# 目标图片的尺寸
@@ -178,8 +179,9 @@ def pre_processing_image(image_url):
# image = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA)
return image
def infer_cancel(tasks_id):
redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True)
data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'}
gen_product_data = json.dumps(data)
redis_client.set(tasks_id, gen_product_data)

View File

@@ -11,18 +11,16 @@ import json
import logging
import time
import cv2
import numpy as np
import redis
import tritonclient.grpc as grpcclient
from PIL import Image
from minio import Minio
from tritonclient.utils import np_to_triton_dtype
from app.core.config import *
import tritonclient.grpc as grpcclient
from app.core.config import settings, GI_RABBITMQ_QUEUES, GSL_MODEL_NAME, GSL_MODEL_URL
from app.schemas.generate_image import GenerateSingleLogoImageModel
from app.service.generate_image.utils.mq import publish_status
from app.service.generate_image.utils.upload_sd_image import upload_png_sd, upload_SDXL_image
from app.service.generate_image.utils.upload_sd_image import upload_SDXL_image
logger = logging.getLogger()
@@ -30,7 +28,7 @@ logger = logging.getLogger()
class GenerateSingleLogoImage:
def __init__(self, request_data):
self.grpc_client = grpcclient.InferenceServerClient(url=GSL_MODEL_URL)
self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
self.redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True)
self.batch_size = 1
self.category = "single_logo"
self.negative_prompts = "bad, ugly"
@@ -93,12 +91,12 @@ class GenerateSingleLogoImage:
raise Exception(str(e))
finally:
dict_generate_data, str_generate_data = self.read_tasks_status()
if not DEBUG:
if not settings.DEBUG:
publish_status(str_generate_data, GI_RABBITMQ_QUEUES)
def infer_cancel(tasks_id):
redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True)
data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'}
generate_data = json.dumps(data)
redis_client.set(tasks_id, generate_data)

View File

@@ -17,21 +17,23 @@ import numpy as np
import redis
import tritonclient.grpc as grpcclient
from PIL import Image
from minio import Minio
from tritonclient.utils import np_to_triton_dtype
from app.core.config import *
from app.core.config import settings, PS_RABBITMQ_QUEUES, PT_MODEL_URL
from app.schemas.pose_transform import PoseTransformModel
from app.service.generate_image.utils.mq import publish_status
from app.service.generate_image.utils.pose_transform_upload import upload_gif, upload_video, upload_first_image
from app.service.utils.oss_client import oss_get_image
from app.service.utils.new_oss_client import oss_get_image
logger = logging.getLogger()
minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE)
class PoseTransformService:
def __init__(self, request_data):
self.grpc_client = grpcclient.InferenceServerClient(url=PT_MODEL_URL)
self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
self.redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True)
self.category = "pose_transform"
self.image_url = request_data.image_url
self.pose_num = request_data.pose_id
@@ -115,16 +117,14 @@ class PoseTransformService:
raise Exception(str(e))
finally:
dict_pose_transform_data, str_pose_transform_data = self.read_tasks_status()
if not DEBUG:
if not settings.DEBUG:
publish_status(json.dumps(str_pose_transform_data), PS_RABBITMQ_QUEUES)
logger.info(
f" [x] Sent to {PS_RABBITMQ_QUEUES} data@@@@ {json.dumps(dict_pose_transform_data, indent=4)}")
def infer_cancel(tasks_id):
redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True)
data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'}
pose_transform_data = json.dumps(data)
redis_client.set(tasks_id, pose_transform_data)
@@ -132,8 +132,7 @@ def infer_cancel(tasks_id):
def pre_processing_image(image_url):
image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:],
data_type="PIL")
image = oss_get_image(oss_client=minio_client, bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL")
# 目标图片的尺寸
target_width = 512
target_height = 768

View File

@@ -1,177 +0,0 @@
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project trinity_client
@File service_att_recognition.py
@Author :周成融
@Date 2023/7/26 12:01:05
@detail
"""
import json
import logging
import time
from io import BytesIO
import cv2
import minio
import redis
import tritonclient.grpc as grpcclient
import numpy as np
from minio import Minio
from tritonclient.utils import np_to_triton_dtype
from app.core.config import *
from app.schemas.generate_image import GenerateImageModel
from app.service.generate_image.utils.adjust_contrast import adjust_contrast
from app.service.generate_image.utils.image_processing import remove_background, stain_detection
from app.service.generate_image.utils.upload_sd_image import upload_png_sd
logger = logging.getLogger()
class GenerateImage:
def __init__(self, request_data):
if DEBUG is False:
self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS))
self.channel = self.connection.channel()
# self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS))
# self.channel = self.connection.channel()
self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE)
self.grpc_client = grpcclient.InferenceServerClient(url=GI_MODEL_URL)
self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
if request_data.mode == "img2img":
self.image = self.get_image(request_data.image_url)
self.prompt = request_data.prompt
else:
self.image = np.random.randint(0, 256, (1024, 1024, 3), dtype=np.uint8)
self.prompt = request_data.prompt
self.tasks_id = request_data.tasks_id
self.user_id = self.tasks_id[self.tasks_id.rfind('-') + 1:]
self.mode = request_data.mode
self.batch_size = 1
self.category = request_data.category
self.index = 0
self.generate_data = {'tasks_id': self.tasks_id, 'status': 'PENDING', 'message': "pending", 'data': ''}
self.redis_client.set(self.tasks_id, json.dumps(self.generate_data))
self.redis_client.expire(self.tasks_id, 600)
def get_image(self, image_url):
# Get data of an object.
# Read data from response.
try:
response = self.minio_client.get_object(image_url.split('/')[0], image_url[image_url.find('/') + 1:])
image_file = BytesIO(response.data)
image_array = np.asarray(bytearray(image_file.read()), dtype=np.uint8)
image_cv2 = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
image = cv2.resize(image_cv2, (1024, 1024))
except minio.error.S3Error:
image = np.random.randint(0, 256, (1024, 1024, 3), dtype=np.uint8)
return image
def callback(self, result, error):
if error:
self.generate_data['status'] = "FAILURE"
self.generate_data['message'] = str(error)
self.generate_data['data'] = str(error)
self.redis_client.set(self.tasks_id, json.dumps(self.generate_data))
else:
image_result = result.as_numpy("generated_image")[0]
is_smudge = True
if self.category == "sketch":
# 去背景
remove_bg_image = remove_background(np.asarray(image_result))
# 污点检测
is_smudge, not_smudge_image = stain_detection(remove_bg_image)
image_result = not_smudge_image
if is_smudge: # 无污点
image_result = adjust_contrast(image_result)
image_url = upload_png_sd(image_result, user_id=self.user_id, category=f"{self.category}", object_name=f"{self.tasks_id}.png")
# logger.info(f"upload image SUCCESS {image_url}")
self.generate_data['status'] = "SUCCESS"
self.generate_data['message'] = "success"
self.generate_data['data'] = str(image_url)
self.redis_client.set(self.tasks_id, json.dumps(self.generate_data))
else: # 有污点
self.generate_data['status'] = "SUCCESS"
self.generate_data['message'] = "success"
self.generate_data['data'] = str(GI_SYS_IMAGE_URL)
self.redis_client.set(self.tasks_id, json.dumps(self.generate_data))
# logger.info(f"stain_detection result : {self.generate_data}")
def read_tasks_status(self):
status_data = self.redis_client.get(self.tasks_id)
return json.loads(status_data), status_data
def infer(self, inputs):
return self.grpc_client.infer(
model_name=GI_MODEL_NAME,
inputs=inputs,
# callback=self.callback
)
def get_result(self):
try:
prompts = [self.prompt] * self.batch_size
modes = [self.mode] * self.batch_size
images = [self.image.astype(np.float16)] * self.batch_size
text_obj = np.array(prompts, dtype="object").reshape((-1, 1))
mode_obj = np.array(modes, dtype="object").reshape((-1, 1))
image_obj = np.array(images, dtype=np.float16).reshape((-1, 1024, 1024, 3))
input_text = grpcclient.InferInput("prompt", text_obj.shape, np_to_triton_dtype(text_obj.dtype))
input_image = grpcclient.InferInput("input_image", image_obj.shape, "FP16")
input_mode = grpcclient.InferInput("mode", mode_obj.shape, np_to_triton_dtype(text_obj.dtype))
input_text.set_data_from_numpy(text_obj)
input_image.set_data_from_numpy(image_obj)
input_mode.set_data_from_numpy(mode_obj)
inputs = [input_text, input_image, input_mode]
ctx = self.infer(inputs)
time_out = 600
generate_data = None
while time_out > 0:
generate_data, _ = self.read_tasks_status()
# logger.info(generate_data)
if generate_data['status'] in ["REVOKED", "FAILURE"]:
ctx.cancel()
break
elif generate_data['status'] == "SUCCESS":
break
time_out -= 1
time.sleep(0.1)
# logger.info(time_out, generate_data)
return generate_data
except Exception as e:
# self.generate_data['status'] = "FAILURE"
# self.generate_data['message'] = "failure"
# self.generate_data['data'] = str(e)
# self.redis_client.set(self.tasks_id, json.dumps(self.generate_data))
raise Exception(str(e))
# finally:
# dict_generate_data, str_generate_data = self.read_tasks_status()
# if DEBUG is False:
# self.channel.basic_publish(exchange='', routing_key=GI_RABBITMQ_QUEUES, body=str_generate_data)
# logger.info(f" [x] Sent {json.dumps(dict_generate_data, indent=4)}")
def infer_cancel(tasks_id):
redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'}
generate_data = json.dumps(data)
redis_client.set(tasks_id, generate_data)
return data
if __name__ == '__main__':
rd = GenerateImageModel(
tasks_id="123-89",
prompt='skeleton sitting by the side of a river looking soulful, concert poster, 4k, artistic',
image_url="",
mode='txt2img',
category="test"
)
server = GenerateImage(rd)
print(server.get_result())

View File

@@ -7,7 +7,7 @@ import numpy as np
import torch
import tritonclient.http as httpclient
from app.core.config import *
from app.core.config import settings, DESIGN_MODEL_URL, DESIGN_MODEL_NAME
from app.service.generate_image.utils.upload_sd_image import upload_stain_png_sd, upload_face_png_sd
logger = logging.getLogger()
@@ -65,40 +65,40 @@ def get_contours(image):
# transformed_img = image.astype(np.float32)
# # 输入集
# inputs = [
# httpclient.InferInput(SEGMENTATION['input'], transformed_img.shape, datatype="FP32")
# httpclient.InferInput(DESIGN_MODEL_NAME, transformed_img.shape, datatype="FP32")
# ]
# inputs[0].set_data_from_numpy(transformed_img, binary_data=True)
# # 输出集
# outputs = [
# httpclient.InferRequestedOutput(SEGMENTATION['output'], binary_data=True),
# httpclient.InferRequestedOutput("seg_input__0", binary_data=True),
# ]
# results = client.infer(model_name=SEGMENTATION['name'], inputs=inputs, outputs=outputs)
# # 推理
# # 取结果
# inference_output1 = torch.from_numpy(results.as_numpy(SEGMENTATION['output']))
# inference_output1 = torch.from_numpy(results.as_numpy("seg_input__0"))
# seg_result = seg_postprocess(inference_output1, ori_shape)
# return seg_result
def seg_infer_image(image_obj):
image, ori_shape = seg_preprocess(image_obj)
client = httpclient.InferenceServerClient(url=f"{DESIGN_MODEL_URL}")
client = httpclient.InferenceServerClient(url=DESIGN_MODEL_URL)
transformed_img = image.astype(np.float32)
# 输入集
inputs = [
httpclient.InferInput(SEGMENTATION['input'], transformed_img.shape, datatype="FP32")
httpclient.InferInput("seg_input__0", transformed_img.shape, datatype="FP32")
]
inputs[0].set_data_from_numpy(transformed_img, binary_data=True)
# 输出集
outputs = [
httpclient.InferRequestedOutput(SEGMENTATION['output'], binary_data=True),
httpclient.InferRequestedOutput("seg_output__0", binary_data=True),
]
start_time = time.time()
results = client.infer(model_name=SEGMENTATION['new_model_name'], inputs=inputs, outputs=outputs)
results = client.infer(model_name=DESIGN_MODEL_NAME, inputs=inputs, outputs=outputs)
print(f"KNet infer time is :{time.time() - start_time}")
# 推理
# 取结果
inference_output1 = results.as_numpy(SEGMENTATION['output'])
seg_result = seg_postprocess(inference_output1, ori_shape)
inference_output1 = results.as_numpy("seg_output__0")
seg_result = seg_postprocess(inference_output1)
return seg_result
@@ -110,7 +110,7 @@ def seg_infer_image(image_obj):
# return seg_pred
# KNet
def seg_postprocess(output, ori_shape):
def seg_postprocess(output):
# seg_logit = F.interpolate(torch.tensor(output).float(), size=ori_shape, scale_factor=None, mode='bilinear', align_corners=False)
# seg_logit = F.softmax(seg_logit, dim=1)
# seg_pred = seg_logit.argmax(dim=1)
@@ -201,7 +201,7 @@ def stain_detection(image, user_id, category, tasks_id, spot_size=100):
# 如果有连续的纯白区域存在
if filtered_contours:
# 将纯白区域替换为灰色
if DEBUG:
if settings.DEBUG:
for cnt in filtered_contours:
x, y, w, h = cv2.boundingRect(cnt)
# 在原始图像上进行替换
@@ -216,7 +216,7 @@ def stain_detection(image, user_id, category, tasks_id, spot_size=100):
if is_pure_white:
return False, None
if DEBUG:
if settings.DEBUG:
for corner_coords in [
(0, 0),
# (0, width - spot_size),
@@ -236,7 +236,7 @@ def stain_detection(image, user_id, category, tasks_id, spot_size=100):
]:
cv2.rectangle(dst, corner_coords, (corner_coords[0] + spot_size, corner_coords[1] + spot_size), (0, 0, 255), 2)
cv2.rectangle(dst, (center_x - spot_size // 2, center_y - spot_size // 2), (center_x + spot_size // 2, center_y + spot_size // 2), (0, 255, 0), 2) # 在原始图像上绘制矩形框
image_url = upload_stain_png_sd(dst, user_id=user_id, category=f"{category}", object_name=f"{tasks_id}.png")
upload_stain_png_sd(dst, user_id=user_id, category=f"{category}", object_name=f"{tasks_id}.png")
return True, image
@@ -262,10 +262,10 @@ def generate_category_recognition(image, gender):
scores = inference_output.detach().numpy()
import pandas as pd
attr_type = pd.read_csv(CATEGORY_PATH)
attr_type = pd.read_csv(settings.CATEGORY_PATH)
colattr = list(attr_type['labelName'])
task = attr_type['taskName'][0]
# attr_type['taskName'][0]
maxsc = np.max(scores[0][:5])
indexs = np.argwhere(scores == maxsc)[:, 1]
@@ -321,12 +321,13 @@ def face_detect_pic(image, user_id, category, tasks_id):
# cv2.imshow("gray", gray)
# 2、训练一组人脸
FACE_CLASSIFIER = ""
face_detector = cv2.CascadeClassifier(FACE_CLASSIFIER)
# 3、检测人脸用灰度图检测返回人脸矩形坐标(4个角)
faces_rect = face_detector.detectMultiScale(gray, 1.05, 3)
if DEBUG:
if settings.DEBUG:
dst = image.copy()
for x, y, w, h in faces_rect:
cv2.rectangle(dst, (x, y), (x + w, y + h), (0, 0, 255), 3) # 画出矩形框
@@ -336,7 +337,7 @@ def face_detect_pic(image, user_id, category, tasks_id):
dst = image.copy()
for x, y, w, h in faces_rect:
cv2.rectangle(dst, (x, y), (x + w, y + h), (0, 0, 255), 3) # 画出矩形框
image_url = upload_face_png_sd(dst, user_id=user_id, category=f"{category}", object_name=f"{tasks_id}.png")
upload_face_png_sd(dst, user_id=user_id, category=f"{category}", object_name=f"{tasks_id}.png")
return len(faces_rect)

View File

@@ -3,7 +3,7 @@ import json
import pika
import logging
from app.core.config import RABBITMQ_PARAMS
from app.core.rabbit_mq_config import RABBITMQ_PARAMS
logger = logging.getLogger(__name__)

View File

@@ -3,19 +3,13 @@ import logging
import os.path
import numpy as np
# import boto3
from minio import Minio
from moviepy.video.io.ImageSequenceClip import ImageSequenceClip
from app.core.config import *
from app.core.config import settings
from app.service.utils.new_oss_client import oss_upload_image
# minio 配置
MINIO_URL = "www.minio-api.aida.com.hk"
MINIO_ACCESS = 'vXKFLSJkYeEq2DrSZvkB'
MINIO_SECRET = 'uKTZT3x7C43WvPN9QTc99DiRkwddWZrG9Uh3JVlR'
MINIO_SECURE = True
minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE)
minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE)
def upload_first_image(image, user_id, category, file_name):
@@ -25,7 +19,7 @@ def upload_first_image(image, user_id, category, file_name):
image_data.seek(0)
image_bytes = image_data.read()
object_name = f'{user_id}/{category}/{file_name}'
req = oss_upload_image(oss_client=minio_client, bucket=GI_MINIO_BUCKET, object_name=object_name, image_bytes=image_bytes)
oss_upload_image(oss_client=minio_client, bucket="aida-users", object_name=object_name, image_bytes=image_bytes)
image_url = f"aida-users/{object_name}"
return image_url
except Exception as e:
@@ -35,7 +29,7 @@ def upload_first_image(image, user_id, category, file_name):
def upload_gif(gif_buffer, user_id, category, file_name):
try:
object_name = f'{user_id}/{category}/{file_name}'
req = minio_client.put_object(
minio_client.put_object(
"aida-users",
object_name,
gif_buffer,
@@ -62,8 +56,8 @@ def upload_video(frames, user_id, category, file_name):
logging.warning(f"upload_video runtime exception : {e}")
def ndarray_to_video(images, output_path, frame_size=(512, 768), fps=9):
save_path = os.path.join(POSE_TRANSFORM_VIDEO_PATH, output_path)
def ndarray_to_video(images, output_path, fps=9):
save_path = os.path.join("../pose_transform_video/", output_path)
clip = ImageSequenceClip([frame for frame in images], fps=fps)
clip.write_videofile(save_path, codec='libx264')

View File

@@ -9,16 +9,13 @@
"""
import io
import logging
# import boto3
import cv2
from PIL import Image
from minio import Minio
from app.core.config import *
from app.service.utils.oss_client import oss_upload_image
from app.core.config import settings
from app.service.utils.new_oss_client import oss_upload_image
minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE)
minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE)
# s3 = boto3.client('s3', aws_access_key_id=S3_ACCESS_KEY, aws_secret_access_key=S3_AWS_SECRET_ACCESS_KEY, region_name=S3_REGION_NAME)
@@ -52,7 +49,7 @@ def upload_SDXL_image(image, user_id, category, file_name):
# content_type='image/jpeg'
# )
object_name = f'{user_id}/{category}/{file_name}'
req = oss_upload_image(bucket=GI_MINIO_BUCKET, object_name=object_name, image_bytes=image_bytes)
oss_upload_image(oss_client=minio_client, bucket="aida-users", object_name=object_name, image_bytes=image_bytes)
image_url = f"aida-users/{object_name}"
return image_url
except Exception as e:
@@ -63,7 +60,7 @@ def upload_png_sd(image, user_id, category, file_name):
try:
_, img_byte_array = cv2.imencode('.jpg', image)
object_name = f'{user_id}/{category}/{file_name}'
req = oss_upload_image(bucket=GI_MINIO_BUCKET, object_name=object_name, image_bytes=img_byte_array)
oss_upload_image(oss_client=minio_client, bucket="aida-users", object_name=object_name, image_bytes=img_byte_array)
image_url = f"aida-users/{object_name}"
return image_url
except Exception as e: