From d92c59383b772e8e7c4ffd11721aa5bb24003b22 Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Fri, 8 Nov 2024 14:05:09 +0800 Subject: [PATCH] =?UTF-8?q?feat=20=20=20=20design=E7=9B=B8=E5=85=B3?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E8=BF=81=E7=A7=BB4090=E6=B5=8B=E8=AF=95=20fi?= =?UTF-8?q?x?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/core/config.py | 6 +----- app/service/attribute/service_att_recognition.py | 2 +- app/service/attribute/service_category_recognition.py | 2 +- app/service/generate_image/utils/image_processing.py | 4 ++-- 4 files changed, 5 insertions(+), 9 deletions(-) diff --git a/app/core/config.py b/app/core/config.py index 35c12b7..5909a3a 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -93,9 +93,6 @@ OPENAI_MODEL_LIST = {"gpt-3.5-turbo-0613", "gpt-4-0613", "gpt-4-32k-0613", } -# attribute service config -ATT_TRITON_URL = "10.1.1.240:10000" - # SR service config SR_MODEL_NAME = "super_resolution" SR_TRITON_URL = "10.1.1.240:10031" @@ -132,7 +129,6 @@ GRI_MODEL_NAME_SINGLE = 'stable_diffusion_1_5_relight' GRI_MODEL_URL = '10.1.1.240:10051' # SEG service config -SEG_MODEL_URL = '10.1.1.240:10000' SEGMENTATION = { "new_model_name": "seg_knet", "name": "seg_ocrnet_hr18", @@ -141,7 +137,7 @@ SEGMENTATION = { } # DESIGN config -DESIGN_MODEL_URL = '10.1.1.240:10000' +DESIGN_MODEL_URL = '10.1.1.243:10000' AIDA_CLOTHING = "aida-clothing" KEYPOINT_RESULT_TABLE_FIELD_SET = ('neckline_left', 'neckline_right', 'shoulder_left', 'shoulder_right', 'armpit_left', 'armpit_right', 'cuff_left_in', 'cuff_left_out', 'cuff_right_in', 'cuff_right_out', 'waistband_left', 'waistband_right') diff --git a/app/service/attribute/service_att_recognition.py b/app/service/attribute/service_att_recognition.py index 1251891..f93146e 100644 --- a/app/service/attribute/service_att_recognition.py +++ b/app/service/attribute/service_att_recognition.py @@ -28,7 +28,7 @@ class AttributeRecognition: } ) self.const = const - self.triton_client = httpclient.InferenceServerClient(url=f"{ATT_TRITON_URL}") + self.triton_client = httpclient.InferenceServerClient(url=f"{DESIGN_MODEL_URL}") def get_result(self): for sketch in self.request_data: diff --git a/app/service/attribute/service_category_recognition.py b/app/service/attribute/service_category_recognition.py index f917af2..7c277c9 100644 --- a/app/service/attribute/service_category_recognition.py +++ b/app/service/attribute/service_category_recognition.py @@ -26,7 +26,7 @@ class CategoryRecognition: self.attr_type = pd.read_csv(CATEGORY_PATH) # self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) self.request_data = [] - self.triton_client = httpclient.InferenceServerClient(url=ATT_TRITON_URL) + self.triton_client = httpclient.InferenceServerClient(url=DESIGN_MODEL_URL) for sketch in request_data: self.request_data.append( { diff --git a/app/service/generate_image/utils/image_processing.py b/app/service/generate_image/utils/image_processing.py index af36188..02d8bee 100644 --- a/app/service/generate_image/utils/image_processing.py +++ b/app/service/generate_image/utils/image_processing.py @@ -81,7 +81,7 @@ def get_contours(image): def seg_infer_image(image_obj): image, ori_shape = seg_preprocess(image_obj) - client = httpclient.InferenceServerClient(url=f"{SEG_MODEL_URL}") + client = httpclient.InferenceServerClient(url=f"{DESIGN_MODEL_URL}") transformed_img = image.astype(np.float32) # 输入集 inputs = [ @@ -250,7 +250,7 @@ def generate_category_recognition(image, gender): return preprocessed_img preprocessed_img = preprocess(image) - triton_client = httpclient.InferenceServerClient(url=ATT_TRITON_URL) + triton_client = httpclient.InferenceServerClient(url=DESIGN_MODEL_URL) inputs = [ httpclient.InferInput("input__0", preprocessed_img.shape, datatype="FP32")