From c3404e5c96f973804dd9c7343cc58acf357695af Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Thu, 13 Feb 2025 17:35:00 +0800 Subject: [PATCH 001/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20dev=20=E4=BB=A3=E7=A0=81=E5=90=8C=E6=AD=A5=20do?= =?UTF-8?q?cs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20ref?= =?UTF-8?q?actor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E?= =?UTF-8?q?=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 代码回溯 --- app/service/design_fast/pipeline/print_painting.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/app/service/design_fast/pipeline/print_painting.py b/app/service/design_fast/pipeline/print_painting.py index 2f97474..42ca588 100644 --- a/app/service/design_fast/pipeline/print_painting.py +++ b/app/service/design_fast/pipeline/print_painting.py @@ -465,8 +465,11 @@ class PrintPainting: angle: 旋转的角度 crop: 是否需要进行裁剪,布尔向量 """ + if not isinstance(crop, bool): + raise ValueError("The 'crop' parameter must be a boolean.") + crop_image = lambda img, x0, y0, w, h: img[y0:y0 + h, x0:x0 + w] - w, h = img.shape[:2] + h, w = img.shape[:2] # 旋转角度的周期是360° angle %= 360 # 计算仿射变换矩阵 @@ -478,7 +481,7 @@ class PrintPainting: if crop: # 裁剪角度的等效周期是180° angle_crop = angle % 180 - if angle > 90: + if angle_crop > 90: angle_crop = 180 - angle_crop # 转化角度为弧度 theta = angle_crop * np.pi / 180 From c9f63a1f12fb9078863a2384c0c43b3abb2afe2e Mon Sep 17 00:00:00 2001 From: xupei Date: Thu, 20 Feb 2025 17:14:57 +0800 Subject: [PATCH 002/101] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E8=AF=AD=E8=A8=80?= =?UTF-8?q?=E5=88=A4=E6=96=AD=E8=B0=83=E7=94=A8=E7=9A=84=E6=96=B9=E6=B3=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/chat_robot/script/service/CallQWen.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/app/service/chat_robot/script/service/CallQWen.py b/app/service/chat_robot/script/service/CallQWen.py index 780d206..9e9f1a5 100644 --- a/app/service/chat_robot/script/service/CallQWen.py +++ b/app/service/chat_robot/script/service/CallQWen.py @@ -200,6 +200,18 @@ def get_response(messages): return response +def get_assistant_response(messages): + response = Generation.call( + model='qwen-max', + api_key=QWEN_API_KEY, + messages=messages, + # seed=random.randint(1, 10000), # 设置随机数种子seed,如果没有设置,则随机数种子默认为1234 + result_format='message', # 将输出设置为message形式 + enable_search='false' + ) + return response + + def call_with_messages(message, gender): global tool_info user_input = message @@ -329,7 +341,7 @@ def get_language(message: str) -> str: } ] - first_response = get_response(messages) + first_response = get_assistant_response(messages) assistant_output = first_response.output.choices[0].message.content logging.info(f"大模型输出信息:{first_response}\n判断用户输入的语言为:{assistant_output}") # print(f"大模型输出信息:{first_response}\n判断用户输入的语言为:{assistant_output}") From e5a42133339e47df7d77cdec8a8339df09861140 Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Fri, 21 Feb 2025 10:00:40 +0800 Subject: [PATCH 003/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4?= =?UTF-8?q?=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20te?= =?UTF-8?q?st(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):=20=E6=9C=BA=E5=99=A8?= =?UTF-8?q?=E4=BA=BA=E6=95=B0=E6=8D=AE=E5=BA=93=E6=90=9C=E7=B4=A2=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 代码回溯 --- app/service/chat_robot/script/service/CallQWen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/service/chat_robot/script/service/CallQWen.py b/app/service/chat_robot/script/service/CallQWen.py index 9e9f1a5..d2f28a0 100644 --- a/app/service/chat_robot/script/service/CallQWen.py +++ b/app/service/chat_robot/script/service/CallQWen.py @@ -283,7 +283,7 @@ def call_with_messages(message, gender): elif assistant_output.tool_calls[0]['function']['name'] == 'get_image_from_vector_db': content = json.loads(assistant_output.tool_calls[0]['function']['arguments']) tool_info = {"name": "get_image_from_vector_db", "role": "tool", - 'content': get_image_from_vector_db(gender, content['parameters']['content'])} + 'content': get_image_from_vector_db(gender, content['parameters']['content'] if "parameters" in content.keys() else content['content'])} flag = False result_content = tool_info['content'] response_type = "image" From c5d1eabcc7d4bafb45822d0f7f005800dc9f7a7b Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Tue, 25 Feb 2025 09:45:05 +0800 Subject: [PATCH 004/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20design=20print=5Fpainting.py=20?= =?UTF-8?q?=E6=96=B0=E5=A2=9E=E9=80=89=E5=8C=BA=E5=A1=AB=E5=85=85=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs?= =?UTF-8?q?=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refac?= =?UTF-8?q?tor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E?= =?UTF-8?q?=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../design_fast/pipeline/print_painting.py | 41 ++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/app/service/design_fast/pipeline/print_painting.py b/app/service/design_fast/pipeline/print_painting.py index 42ca588..878bcb4 100644 --- a/app/service/design_fast/pipeline/print_painting.py +++ b/app/service/design_fast/pipeline/print_painting.py @@ -15,6 +15,7 @@ class PrintPainting: single_print = result['print']['single'] overall_print = result['print']['overall'] element_print = result['print']['element'] + partial_path = result['print']['partial'] result['single_image'] = None result['print_image'] = None # TODO 给result['pattern_image'] resize 到resize_scale的大小 @@ -262,6 +263,45 @@ class PrintPainting: temp_fg = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2) tmp2 = (result['final_image'] * (temp_fg / 255)).astype(np.uint8) result['single_image'] = cv2.add(tmp1, tmp2) + + if partial_path: + print_background = np.zeros((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), dtype=np.uint8) + mask_background = np.zeros((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), dtype=np.uint8) + image, image_mode = self.read_image(partial_path) + if image_mode == "RGBA": + new_size = (result['pattern_image'].shape[1], result['pattern_image'].shape[0]) + + mask = image.split()[3] + resized_source = image.resize(new_size) + resized_source_mask = mask.resize(new_size) + + # rotated_resized_source = resized_source.rotate(-partial_print['print_angle_list'][i]) + # rotated_resized_source_mask = resized_source_mask.rotate(-partial_print['print_angle_list'][i]) + + source_image_pil = Image.fromarray(cv2.cvtColor(print_background, cv2.COLOR_BGR2RGB)) + source_image_pil_mask = Image.fromarray(cv2.cvtColor(mask_background, cv2.COLOR_BGR2RGB)) + + source_image_pil.paste(resized_source, (0, 0), resized_source) + source_image_pil_mask.paste(resized_source_mask, (0, 0), resized_source_mask) + + print_background = cv2.cvtColor(np.array(source_image_pil), cv2.COLOR_RGBA2BGR) + mask_background = cv2.cvtColor(np.array(source_image_pil_mask), cv2.COLOR_RGBA2BGR) + ret, mask_background = cv2.threshold(mask_background, 124, 255, cv2.THRESH_BINARY) + print_mask = cv2.bitwise_and(result['mask'], cv2.cvtColor(mask_background, cv2.COLOR_BGR2GRAY)) + img_fg = cv2.bitwise_or(print_background, print_background, mask=print_mask) + # TODO element 丢失信息 + three_channel_image = cv2.merge([cv2.bitwise_not(print_mask), cv2.bitwise_not(print_mask), cv2.bitwise_not(print_mask)]) + img_bg = cv2.bitwise_and(result['final_image'], three_channel_image) + # mask_mo = np.expand_dims(print_mask, axis=2).repeat(3, axis=2) + # gray_mo = np.expand_dims(result['gray'], axis=2).repeat(3, axis=2) + # img_fg = (img_fg * (mask_mo / 255) * (gray_mo / 255)).astype(np.uint8) + result['final_image'] = cv2.add(img_bg, img_fg) + canvas = np.full_like(result['final_image'], 255) + temp_bg = np.expand_dims(cv2.bitwise_not(result['mask']), axis=2).repeat(3, axis=2) + tmp1 = (canvas * (temp_bg / 255)).astype(np.uint8) + temp_fg = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2) + tmp2 = (result['final_image'] * (temp_fg / 255)).astype(np.uint8) + result['single_image'] = cv2.add(tmp1, tmp2) return result @staticmethod @@ -414,7 +454,6 @@ class PrintPainting: # y_offset = int(location[0][0]) # x_offset = int(location[0][1]) - if len(image.shape) == 2: image = image[x_offset: x_offset + image_size_h, y_offset: y_offset + image_size_w] elif len(image.shape) == 3: From 2dda3d11ae1a254f299629b93121b00e9b25b0d0 Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Tue, 25 Feb 2025 10:22:22 +0800 Subject: [PATCH 005/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20=20=E5=A2=9E=E5=BC=BAdesign=20=E9=80=89?= =?UTF-8?q?=E5=8C=BA=E6=95=B0=E6=8D=AE=E7=BB=93=E6=9E=84=E5=88=A4=E6=96=AD?= =?UTF-8?q?=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:?= =?UTF-8?q?=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(?= =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/design_fast/pipeline/print_painting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/service/design_fast/pipeline/print_painting.py b/app/service/design_fast/pipeline/print_painting.py index 878bcb4..1534f9c 100644 --- a/app/service/design_fast/pipeline/print_painting.py +++ b/app/service/design_fast/pipeline/print_painting.py @@ -15,7 +15,7 @@ class PrintPainting: single_print = result['print']['single'] overall_print = result['print']['overall'] element_print = result['print']['element'] - partial_path = result['print']['partial'] + partial_path = result['print']['partial'] if 'partial' in result['print'] else None result['single_image'] = None result['print_image'] = None # TODO 给result['pattern_image'] resize 到resize_scale的大小 From bec89629d5aa6dddf15e3b50cc252fbad8ef08b4 Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Tue, 25 Feb 2025 17:26:00 +0800 Subject: [PATCH 006/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20=20design=20=E7=94=A8=E6=88=B7=E8=87=AA?= =?UTF-8?q?=E5=AE=9A=E4=B9=89=E8=92=99=E7=89=88=E5=88=86=E5=89=B2=20docs?= =?UTF-8?q?=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refac?= =?UTF-8?q?tor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E?= =?UTF-8?q?=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/design_fast/item.py | 2 +- app/service/design_fast/pipeline/split.py | 72 ++++++++++++++--------- 2 files changed, 45 insertions(+), 29 deletions(-) diff --git a/app/service/design_fast/item.py b/app/service/design_fast/item.py index ec18b17..5ddfdc7 100644 --- a/app/service/design_fast/item.py +++ b/app/service/design_fast/item.py @@ -57,7 +57,7 @@ class BottomItem(BaseItem): LoadImage(minio_client), KeyPoint(), ContourDetection(), - # Segmentation(), + Segmentation(minio_client), # BackPerspective(minio_client), Color(minio_client), PrintPainting(minio_client), diff --git a/app/service/design_fast/pipeline/split.py b/app/service/design_fast/pipeline/split.py index 115f814..88e8e75 100644 --- a/app/service/design_fast/pipeline/split.py +++ b/app/service/design_fast/pipeline/split.py @@ -65,35 +65,51 @@ class Split(object): mask_image = np.zeros((height, width, 3)) mask_image[front_mask != 0] = [0, 0, 255] - if result["name"] in ('blouse', 'dress', 'outwear', 'tops'): - result_back_image = np.zeros_like(rgba_image) - back_mask = cv2.resize(back_mask, new_size) - result_back_image[back_mask != 0] = rgba_image[back_mask != 0] - result_back_image_pil = Image.fromarray(cvtColor(result_back_image, COLOR_BGR2RGBA)) - result['back_image'], result["back_image_url"], _ = upload_png_mask(self.minio_client, result_back_image_pil, f'{generate_uuid()}', mask=None) - mask_image[back_mask != 0] = [0, 255, 0] + # if result["name"] in ('blouse', 'dress', 'outwear', 'tops'): + # result_back_image = np.zeros_like(rgba_image) + # back_mask = cv2.resize(back_mask, new_size) + # result_back_image[back_mask != 0] = rgba_image[back_mask != 0] + # result_back_image_pil = Image.fromarray(cvtColor(result_back_image, COLOR_BGR2RGBA)) + # result['back_image'], result["back_image_url"], _ = upload_png_mask(self.minio_client, result_back_image_pil, f'{generate_uuid()}', mask=None) + # mask_image[back_mask != 0] = [0, 255, 0] + # + # rbga_mask = rgb_to_rgba(mask_image, front_mask + back_mask) + # mask_pil = Image.fromarray(cvtColor(rbga_mask.astype(np.uint8), COLOR_BGR2RGBA)) + # image_data = io.BytesIO() + # mask_pil.save(image_data, format='PNG') + # image_data.seek(0) + # image_bytes = image_data.read() + # req = oss_upload_image(oss_client=self.minio_client, bucket=AIDA_CLOTHING, object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) + # result['mask_url'] = req.bucket_name + "/" + req.object_name + # else: + # rbga_mask = rgb_to_rgba(mask_image, front_mask) + # mask_pil = Image.fromarray(cvtColor(rbga_mask.astype(np.uint8), COLOR_BGR2RGBA)) + # image_data = io.BytesIO() + # mask_pil.save(image_data, format='PNG') + # image_data.seek(0) + # image_bytes = image_data.read() + # req = oss_upload_image(oss_client=self.minio_client, bucket=AIDA_CLOTHING, object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) + # result['mask_url'] = req.bucket_name + "/" + req.object_name + # result['back_image'] = None + # result["back_image_url"] = None + # # result["back_mask_url"] = None + # # result['back_mask_image'] = None - rbga_mask = rgb_to_rgba(mask_image, front_mask + back_mask) - mask_pil = Image.fromarray(cvtColor(rbga_mask.astype(np.uint8), COLOR_BGR2RGBA)) - image_data = io.BytesIO() - mask_pil.save(image_data, format='PNG') - image_data.seek(0) - image_bytes = image_data.read() - req = oss_upload_image(oss_client=self.minio_client, bucket=AIDA_CLOTHING, object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) - result['mask_url'] = req.bucket_name + "/" + req.object_name - else: - rbga_mask = rgb_to_rgba(mask_image, front_mask) - mask_pil = Image.fromarray(cvtColor(rbga_mask.astype(np.uint8), COLOR_BGR2RGBA)) - image_data = io.BytesIO() - mask_pil.save(image_data, format='PNG') - image_data.seek(0) - image_bytes = image_data.read() - req = oss_upload_image(oss_client=self.minio_client, bucket=AIDA_CLOTHING, object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) - result['mask_url'] = req.bucket_name + "/" + req.object_name - result['back_image'] = None - result["back_image_url"] = None - # result["back_mask_url"] = None - # result['back_mask_image'] = None + result_back_image = np.zeros_like(rgba_image) + back_mask = cv2.resize(back_mask, new_size) + result_back_image[back_mask != 0] = rgba_image[back_mask != 0] + result_back_image_pil = Image.fromarray(cvtColor(result_back_image, COLOR_BGR2RGBA)) + result['back_image'], result["back_image_url"], _ = upload_png_mask(self.minio_client, result_back_image_pil, f'{generate_uuid()}', mask=None) + mask_image[back_mask != 0] = [0, 255, 0] + + rbga_mask = rgb_to_rgba(mask_image, front_mask + back_mask) + mask_pil = Image.fromarray(cvtColor(rbga_mask.astype(np.uint8), COLOR_BGR2RGBA)) + image_data = io.BytesIO() + mask_pil.save(image_data, format='PNG') + image_data.seek(0) + image_bytes = image_data.read() + req = oss_upload_image(oss_client=self.minio_client, bucket=AIDA_CLOTHING, object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) + result['mask_url'] = req.bucket_name + "/" + req.object_name # 创建中间图层 result_pattern_image_rgba = rgb_to_rgba(result['pattern_image'], result['mask']) result_pattern_image_pil = Image.fromarray(cvtColor(result_pattern_image_rgba, COLOR_BGR2RGBA)) From 08f9f7ebf7a0ca80b1e069d6612d6b60c561fbc2 Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Wed, 26 Feb 2025 14:25:49 +0800 Subject: [PATCH 007/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4?= =?UTF-8?q?=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20te?= =?UTF-8?q?st(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):=20=20product=20image?= =?UTF-8?q?=20=E4=BA=BA=E8=84=B8=E8=AF=86=E5=88=AB=E9=97=AE=E9=A2=98?= =?UTF-8?q?=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/core/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/core/config.py b/app/core/config.py index 6a4ad23..d816407 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -137,7 +137,7 @@ GEN_SINGLE_LOGO_RABBITMQ_QUEUES = os.getenv("GEN_SINGLE_LOGO_RABBITMQ_QUEUES", f GPI_RABBITMQ_QUEUES = os.getenv("GEN_PRODUCT_IMAGE_RABBITMQ_QUEUES", f"ToProductImage{RABBITMQ_ENV}") GPI_MODEL_NAME_OVERALL = 'diffusion_ensemble_all' GPI_MODEL_NAME_SINGLE = 'stable_diffusion_1_5_cnet' -GPI_MODEL_URL = '10.1.1.243:10051' +GPI_MODEL_URL = '10.1.1.243:15551' # Generate Single Logo service config GRI_RABBITMQ_QUEUES = os.getenv("GEN_RELIGHT_IMAGE_RABBITMQ_QUEUES", f"Relight{RABBITMQ_ENV}") From a2e78f3dd52a480f7839f2774a5a6e72a33d32ab Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Fri, 28 Feb 2025 16:26:44 +0800 Subject: [PATCH 008/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20sketch=20=E6=8E=A8=E8=8D=90=E7=AE=97?= =?UTF-8?q?=E6=B3=95=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs?= =?UTF-8?q?=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refac?= =?UTF-8?q?tor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E?= =?UTF-8?q?=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_recommendation.py | 118 +++++++ app/api/api_route.py | 2 + app/core/config.py | 25 +- app/main.py | 12 +- app/service/recommend/scheduled_task.py | 431 ++++++++++++++++++++++++ app/service/recommend/service.py | 172 ++++++++++ 6 files changed, 755 insertions(+), 5 deletions(-) create mode 100644 app/api/api_recommendation.py create mode 100644 app/service/recommend/scheduled_task.py create mode 100644 app/service/recommend/service.py diff --git a/app/api/api_recommendation.py b/app/api/api_recommendation.py new file mode 100644 index 0000000..c533709 --- /dev/null +++ b/app/api/api_recommendation.py @@ -0,0 +1,118 @@ +import io +import logging +import sys +import time +from typing import List + +import numpy as np +from apscheduler.schedulers.background import BackgroundScheduler +from apscheduler.triggers.cron import CronTrigger +from fastapi import HTTPException, APIRouter + +from app.service.recommend.service import load_resources, matrix_data + +sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8') +logger = logging.getLogger() +router = APIRouter() + + +@router.on_event("startup") +async def startup_event(): + # 初始加载 + load_resources() + + # 配置定时任务 + scheduler = BackgroundScheduler() + scheduler.add_job( + load_resources, + trigger=CronTrigger(hour=0, minute=30), + name="每日资源刷新" + ) + scheduler.start() + logger.info("定时任务已启动") + + +@router.get("/recommend/{user_id}/{category}/{num_recommendations}", response_model=List[str]) +async def get_recommendations(user_id: int, category: str, num_recommendations: int = 10): + """ + :param user_id: 4 + :param category: female_skirt + :param num_recommendations: 1 + :return: + [ + "aida-sys-image/images/female/skirt/903000017.jpg" + ] + """ + try: + start_time = time.time() + cache_key = (user_id, category) + + # 检查缓存 + if cache_key in matrix_data["cached_scores"]: + processed_inter, processed_feat = matrix_data["cached_scores"][cache_key] + valid_sketch_idxs_inter = matrix_data["cached_valid_idxs"][cache_key] + else: + # 实时计算逻辑(同原代码) + user_idx_inter = matrix_data["user_index_interaction"].get(user_id) + user_idx_feature = matrix_data["user_index_feature"].get(user_id) + + category_iids = matrix_data["category_to_iids"].get(category, []) + valid_sketch_idxs_inter = [ + idx for iid, idx in matrix_data["sketch_index_interaction"].items() + if iid in category_iids + ] + + # 处理交互分数 + raw_inter_scores = [] + if user_idx_inter is not None and valid_sketch_idxs_inter: + raw_inter_scores = matrix_data["interaction_matrix"][user_idx_inter, valid_sketch_idxs_inter] + processed_inter = raw_inter_scores * 0.7 + + # 处理特征分数 + valid_sketch_idxs_feature = [ + idx for iid, idx in matrix_data["sketch_index_feature"].items() + if iid in category_iids + ] + raw_feat_scores = [] + if user_idx_feature is not None and valid_sketch_idxs_feature: + raw_feat_scores = matrix_data["feature_matrix"][user_idx_feature, valid_sketch_idxs_feature] + raw_feat_scores = (raw_feat_scores - np.min(raw_feat_scores)) / ( + np.max(raw_feat_scores) - np.min(raw_feat_scores) + 1e-8) + processed_feat = raw_feat_scores * 0.3 + else: + processed_feat = np.array([]) + + # 更新缓存 + matrix_data["cached_scores"][cache_key] = (processed_inter, processed_feat) + matrix_data["cached_valid_idxs"][cache_key] = valid_sketch_idxs_inter + + # 合并分数 + final_scores = processed_inter + processed_feat + valid_sketch_idxs = matrix_data["cached_valid_idxs"][cache_key] + + # 概率采样 + scores = np.array(final_scores) + + # 调整后的概率转换(带温度控制的softmax) + def calibrated_softmax(scores, temperature=1.0): + scores = scores / temperature + scale = scores - max(scores) + exps = np.exp(scale) + return exps / np.sum(exps) + + probs = calibrated_softmax(scores, 0.07) + + chosen_indices = np.random.choice( + len(valid_sketch_idxs), + size=min(num_recommendations, len(valid_sketch_idxs)), + p=probs, + replace=False + ) + recommendations = [matrix_data["iid_to_sketch"][valid_sketch_idxs[idx]] for idx in chosen_indices] + + logger.info(f"推荐生成完成,耗时: {time.time() - start_time:.2f}秒") + return recommendations + + except Exception as e: + logger.error(f"推荐失败: {str(e)}", exc_info=True) + raise HTTPException(status_code=500, detail=str(e)) diff --git a/app/api/api_route.py b/app/api/api_route.py index 973a940..3890316 100644 --- a/app/api/api_route.py +++ b/app/api/api_route.py @@ -10,6 +10,7 @@ from app.api import api_generate_image from app.api import api_image2sketch from app.api import api_prompt_generation from app.api import api_super_resolution +from app.api import api_recommendation from app.api import api_test router = APIRouter() @@ -26,3 +27,4 @@ router.include_router(api_image2sketch.router, tags=['api_image2sketch'], prefix router.include_router(api_brighten.router, tags=['api_brighten'], prefix="/api") router.include_router(api_query_image.router, tags=['api_query_image'], prefix="/api") router.include_router(api_brand_dna.router, tags=['api_brand_dna'], prefix="/api") +router.include_router(api_recommendation.router, tags=['api_recommendation'], prefix="/api") diff --git a/app/core/config.py b/app/core/config.py index d816407..df4702b 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -25,10 +25,13 @@ if DEBUG: LOGS_PATH = "logs/" CATEGORY_PATH = "service/attribute/config/descriptor/category/category_dis.csv" SEG_CACHE_PATH = "../seg_cache/" + RECOMMEND_PATH_PREFIX = "service/recommend/" else: LOGS_PATH = "app/logs/" CATEGORY_PATH = "app/service/attribute/config/descriptor/category/category_dis.csv" SEG_CACHE_PATH = "/seg_cache/" + RECOMMEND_PATH_PREFIX = "app/service/recommend/" + # RABBITMQ_ENV = "" # 生产环境 RABBITMQ_ENV = "-dev" # 开发环境 @@ -36,7 +39,6 @@ RABBITMQ_ENV = "-dev" # 开发环境 JAVA_STREAM_API_URL = os.getenv("JAVA_STREAM_API_URL", "https://api.aida.com.hk/api/third/party/receiveDesignResults") - settings = Settings() # minio 配置 @@ -114,7 +116,6 @@ GMV_MODEL_NAME = 'multi_view' GMV_RABBITMQ_QUEUES = os.getenv("GMV_RABBITMQ_QUEUES", f"GenerateMultiView{RABBITMQ_ENV}") - GI_MINIO_BUCKET = "aida-users" GI_RABBITMQ_QUEUES = os.getenv("GI_RABBITMQ_QUEUES", f"GenerateImage{RABBITMQ_ENV}") GI_SYS_IMAGE_URL = "aida-sys-image/generate_image/white_image.jpg" @@ -191,3 +192,23 @@ PRIORITY_DICT = { } QWEN_API_KEY = "sk-a6bdf594e1f54a4aa3e9d4d48f8c661f" + +DB_CONFIG = { + "host": "18.167.251.121", + "port": 3306, + "user": "root", + "password": "QWa998345", + "database": "aida", + "charset": "utf8mb4" +} + +TABLE_CATEGORIES = { + "female_dress": "female/dress", + "female_outwear": "female/outwear", + "female_trousers": "female/trousers", + "female_skirt": "female/skirt", + "female_blouse": "female/blouse", + "male_tops": "male/tops", + "male_bottoms": "male/bottoms", + "male_outwear": "male/outwear" +} diff --git a/app/main.py b/app/main.py index 95c666a..cbdce4a 100644 --- a/app/main.py +++ b/app/main.py @@ -1,15 +1,17 @@ import logging.config -from http.client import HTTPException -from fastapi.responses import JSONResponse -from fastapi import FastAPI, HTTPException, Request import uvicorn +from apscheduler.schedulers.background import BackgroundScheduler +from apscheduler.triggers.cron import CronTrigger from fastapi import FastAPI +from fastapi import HTTPException, Request +from fastapi.responses import JSONResponse from app.api.api_route import router from app.core.config import settings from app.core.record_api_count import count_api_calls from app.schemas.response_template import ResponseModel +from app.service.recommend.service import load_resources from logging_env import LOGGER_CONFIG_DICT logging.config.dictConfig(LOGGER_CONFIG_DICT) @@ -17,6 +19,8 @@ logging.getLogger("pika").setLevel(logging.WARNING) from starlette.middleware.cors import CORSMiddleware +logger = logging.getLogger(__name__) + def get_application() -> FastAPI: application = FastAPI( @@ -51,5 +55,7 @@ async def http_exception_handler(request: Request, exc: HTTPException): ) + + if __name__ == '__main__': uvicorn.run(app, host="0.0.0.0", port=8000) diff --git a/app/service/recommend/scheduled_task.py b/app/service/recommend/scheduled_task.py new file mode 100644 index 0000000..ec1e4aa --- /dev/null +++ b/app/service/recommend/scheduled_task.py @@ -0,0 +1,431 @@ +import pymysql +import numpy as np +from apscheduler.schedulers.blocking import BlockingScheduler +import os +import logging +from collections import defaultdict +import torch +from torchvision import models, transforms +from minio import Minio +from PIL import Image +import io +import seaborn as sns +import matplotlib.pyplot as plt +from scipy.sparse import csr_matrix +import matplotlib.font_manager as fm +from scipy import sparse + +from app.core.config import DB_CONFIG, TABLE_CATEGORIES, RECOMMEND_PATH_PREFIX + +# 自动选择可用字体 +try: + # 尝试加载常见中文字体 + font_path = fm.findfont(fm.FontProperties(family=['Microsoft YaHei', 'SimHei', 'WenQuanYi Zen Hei'])) + plt.rcParams['font.sans-serif'] = fm.FontProperties(fname=font_path).get_name() +except: + # 回退到英文字体 + plt.rcParams['font.sans-serif'] = ['DejaVu Sans'] +plt.rcParams['axes.unicode_minus'] = False + +# 检查系统中可用的字体并选择支持中文的字体 +font_path = fm.findfont(fm.FontProperties(family='Microsoft YaHei')) # 或其他支持中文的字体 +plt.rcParams['font.sans-serif'] = ['Microsoft YaHei'] # 设置为 Microsoft YaHei +plt.rcParams['axes.unicode_minus'] = False # 解决负号显示问题 + +# 配置日志记录 +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(levelname)s - %(message)s', + filename='scheduler.log' +) + +# MinIO 配置信息 +minio_client = Minio( + "www.minio.aida.com.hk:12024", # MinIO Endpoint + access_key="admin", # Access Key + secret_key="Aidlab123123!", # Secret Key + secure=True # 使用https +) + +# 预加载系统sketch特征向量 +SYSTEM_FEATURES = np.load(f'{RECOMMEND_PATH_PREFIX}sketch_feature_dict.npy', allow_pickle=True).item() + + +# 保存sketch_to_iid到文件 +def save_sketch_to_iid(): + """保存sketch到iid的映射""" + sketch_to_iid = {sketch_path: iid for iid, sketch_path in enumerate(SYSTEM_FEATURES.keys(), start=1)} + np.save('sketch_to_iid.npy', sketch_to_iid) + print("sketch_to_iid 已保存") + + +# 从文件加载sketch_to_iid +def load_sketch_to_iid(): + """加载保存的sketch到iid的映射""" + if os.path.exists('sketch_to_iid.npy'): + sketch_to_iid = np.load('sketch_to_iid.npy', allow_pickle=True).item() + print("sketch_to_iid 已加载") + return sketch_to_iid + else: + # 如果文件不存在,则生成并保存 + print("sketch_to_iid 文件不存在,正在生成并保存...") + save_sketch_to_iid() + return np.load('sketch_to_iid.npy', allow_pickle=True).item() + + +# 使用load_sketch_to_iid来获取映射 +sketch_to_iid = load_sketch_to_iid() + +# 在代码中其他地方使用sketch_to_iid +# print(f"Total sketches: {len(sketch_to_iid)}") + +# 定义图像预处理(与ResNet训练时的预处理一致) +transform = transforms.Compose([ + transforms.Resize((224, 224)), # ResNet 要求 224x224 的输入 + transforms.ToTensor(), # 转换为 Tensor + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), # 标准化 +]) + +# 加载预训练的 ResNet 模型 (ResNet50) +resnet_model = models.resnet50(pretrained=True) +modules = list(resnet_model.children())[:-1] # 移除最后的全连接层 +resnet_model = torch.nn.Sequential(*modules) +resnet_model.eval() # 设置为评估模式 + + +# 从 MinIO 获取图片并进行预处理 +def get_sketch_image_from_minio(sketch_path): + """ + 从 MinIO 获取 sketch 图像并预处理 + """ + # 分割路径,获取桶名和文件路径 + path_parts = sketch_path.split('/', 1) # 根据第一个斜杠分割,得到桶名和路径 + bucket_name = path_parts[0] # 桶名 + file_name = path_parts[1] # 文件路径(从第二部分开始) + + try: + # 获取文件 + obj = minio_client.get_object(bucket_name, file_name) + img_data = obj.read() # 读取图像数据 + img = Image.open(io.BytesIO(img_data)) # 将数据转为图像对象 + img = transform(img) # 对图像进行预处理 + return img.unsqueeze(0) # 扩展维度以适应批量处理 + except Exception as e: + print(f"Error fetching image for {sketch_path}: {e}") + return None + + +def extract_feature_vector_from_resnet(sketch_path): + """ + 提取 sketch 图像的特征向量 + """ + # 从 MinIO 获取图像并预处理 + img_tensor = get_sketch_image_from_minio(sketch_path) + if img_tensor is None: + return np.zeros(2048) # 如果获取失败,返回零向量 + + with torch.no_grad(): # 在不需要计算梯度的情况下进行推断 + feature_vector = resnet_model(img_tensor) # 获取 ResNet 的输出 + return feature_vector.squeeze().cpu().numpy() # 转换为 NumPy 数组并去掉 batch 维度 + + +def update_user_matrices(): + """每天更新用户交互次数矩阵和特征向量矩阵""" + conn = None + try: + conn = pymysql.connect(**DB_CONFIG) + cursor = conn.cursor() + + # 修改后的查询语句(移除category过滤) + cursor.execute(""" + SELECT account_id, path, COUNT(*) as like_count + FROM user_preference_log_test + GROUP BY account_id, path + """) + user_data = cursor.fetchall() + logging.info(f"成功读取{len(user_data)}条用户偏好记录") + + # 计算矩阵 + interaction_matrix, raw_counts_sparse, user_index_interaction_matrix, sketch_index_interaction_matrix, iid_to_category_interaction_matrix = calculate_interaction_matrix(user_data) + # visualize_sparse_matrix(raw_counts_sparse,'交互次数矩阵', 'interaction_frequency_matrix.png') + # visualize_sparse_matrix(interaction_matrix, '交互次数得分矩阵', 'interaction_score_matrix.png') + # plot_interaction_count_matrix(raw_counts_sparse) + # feature_matrix, iid_to_category_feature_matrix, user_index_feature_matrix, sketch_index_feature_matrix = calculate_feature_matrix(user_data) + feature_matrix, user_index_feature_matrix, sketch_index_feature_matrix, iid_to_category_feature_matrix = calculate_feature_matrix(user_data) + # visualize_sparse_matrix(feature_matrix, '系统sketch与用户category平均特征向量关联度矩阵', 'correlation_matrix.png') + # 存储矩阵 + np.save(f"{RECOMMEND_PATH_PREFIX}interaction_matrix.npy", interaction_matrix) + np.save(f"{RECOMMEND_PATH_PREFIX}feature_matrix.npy", feature_matrix) + # + np.save(f"{RECOMMEND_PATH_PREFIX}iid_to_category_interaction_matrix.npy", iid_to_category_interaction_matrix) + np.save(f"{RECOMMEND_PATH_PREFIX}user_index_interaction_matrix.npy", user_index_interaction_matrix) + # + np.save(f"{RECOMMEND_PATH_PREFIX}iid_to_category_feature_matrix.npy", iid_to_category_feature_matrix) + np.save(f"{RECOMMEND_PATH_PREFIX}user_index_feature_matrix.npy", user_index_feature_matrix) + # + np.save(f"{RECOMMEND_PATH_PREFIX}sketch_index_interaction_matrix.npy", sketch_index_interaction_matrix) + np.save(f"{RECOMMEND_PATH_PREFIX}sketch_index_feature_matrix.npy", sketch_index_feature_matrix) + # logging.info("矩阵更新完成") + + except Exception as e: + logging.error(f"定时任务执行失败: {str(e)}", exc_info=True) + finally: + if conn: + conn.close() + + +def plot_interaction_count_matrix(interaction_count_matrix): + """绘制交互次数矩阵的分布图(热图),不隐藏零值""" + try: + if not isinstance(interaction_count_matrix, csr_matrix): + sparse_matrix = csr_matrix(interaction_count_matrix) + else: + sparse_matrix = interaction_count_matrix + + # 转换为密集矩阵 + try: + dense_matrix = sparse_matrix.toarray() + except MemoryError: + logging.error("内存不足,无法转换为密集矩阵") + return + + # 自动检测可用中文字体 + try: + font_path = fm.findfont(fm.FontProperties(family='sans-serif', style='normal')) + plt.rcParams['font.sans-serif'] = fm.FontProperties(fname=font_path).get_name() + except: + plt.rcParams['font.sans-serif'] = ['DejaVu Sans'] # 回退到英文字体 + plt.rcParams['axes.unicode_minus'] = False + + # 处理大矩阵的显示,限制显示范围 + if dense_matrix.shape[0] > 1000 or dense_matrix.shape[1] > 1000: + dense_matrix = dense_matrix[:1000, :1000] # 只绘制前1000行列 + + plt.figure(figsize=(15, 10)) + + # 使用 `cmap` 来设置颜色,零值可以使用特定颜色,调整 `vmin` 和 `vmax` 让热图更具对比 + sns.heatmap( + dense_matrix, + cmap="Blues", # 可以选择不同的颜色映射,"Blues" 或 "YlGnBu" + annot=False, # 关闭标注 + cbar_kws={"label": "Interaction Count"}, # 添加颜色条标签 + linewidths=0.5, + vmin=0, # 设置最小值,确保零值明显 + vmax=np.max(dense_matrix) # 设置最大值,保持颜色映射的合理性 + ) + + plt.title('User-Sketch Interaction Matrix (With Zero Entries)') + plt.xlabel('Sketch Index') + plt.ylabel('User Index') + plt.savefig('interaction_heatmap_with_zeros.png', dpi=150, bbox_inches='tight') + plt.close() + + logging.info("热图已保存为 interaction_heatmap_with_zeros.png") + + except Exception as e: + logging.error(f"绘图失败: {str(e)}", exc_info=True) + +def visualize_sparse_matrix(matrix, title='Non-zero Interactions (Scatter Plot)', filename="scatter_figure_interaction.png"): + if not sparse.issparse(matrix): + # 转换为稀疏矩阵 + matrix = sparse.csr_matrix(matrix) + + # 获取非零元素的坐标和值 + rows, cols = matrix.nonzero() + values = matrix.data + + # 绘制散点图 + plt.figure(figsize=(24, 20)) + plt.scatter(cols, rows, c=values, cmap='coolwarm', alpha=0.7, s=1) + plt.colorbar(label='Interaction Count') + plt.title(title) + plt.xlabel('Item Index') + plt.ylabel('Item Index') + plt.savefig(filename) + +def calculate_interaction_matrix(user_data): + """基于新表结构的交互次数矩阵计算(仅系统sketch)""" + # 获取所有用户ID + all_users = set() + for account_id, path, like_count in user_data: + category = get_category_from_path(path) + if category not in TABLE_CATEGORIES.keys(): + continue + all_users.add(account_id) + + # 获取所有系统sketch的iid + system_sketch_iids = {sketch_to_iid[path] for path in SYSTEM_FEATURES.keys() if path in sketch_to_iid} + + # 创建映射关系 + user_index = {uid: idx for idx, uid in enumerate(sorted(all_users))} + sketch_index = {iid: idx for idx, iid in enumerate(sorted(system_sketch_iids))} + + # 初始化双矩阵:归一化矩阵(密集)和原始计数矩阵(稀疏) + interaction_matrix = np.zeros((len(all_users), len(sketch_index))) # 归一化矩阵 + data, rows, cols = [], [], [] # 用于构建稀疏矩阵的COO格式数据 + + # 预计算用户最大交互次数 + user_max_likes = defaultdict(int) + for account_id, path, like_count in user_data: + if sketch_to_iid.get(path) in system_sketch_iids: + user_max_likes[account_id] = max(user_max_likes[account_id], like_count) + + # 填充矩阵 + for account_id, path, like_count in user_data: + sketch_iid = sketch_to_iid.get(path) + if sketch_iid not in system_sketch_iids: + continue + + user_idx = user_index[account_id] + sketch_idx = sketch_index[sketch_iid] + + # 填充稀疏矩阵数据 + data.append(like_count) + rows.append(user_idx) + cols.append(sketch_idx) + + # 归一化计算 + max_like = user_max_likes.get(account_id, 1) + interaction_matrix[user_idx, sketch_idx] = np.log1p(1 + like_count) / np.log1p(1 + max_like) + + # 构建稀疏矩阵(CSR格式适合快速行操作) + interaction_count_matrix = csr_matrix((data, (rows, cols)), shape=(len(all_users), len(sketch_index))) + + return interaction_matrix, interaction_count_matrix, user_index, sketch_index, {iid: get_category_from_path(path) for path, iid in sketch_to_iid.items()} + + +def calculate_feature_matrix(user_data): + """基于新表结构的特征矩阵计算,返回用户与系统草图的相似度矩阵(加权平均)""" + + # 用户特征数据存储结构:{(account_id, category): {sketch_iid: [(feature_vector, weight)]}} + user_feature_weights = defaultdict(lambda: defaultdict(list)) + + # 初始化所有用户和系统草图集合 + all_users = set() + all_system_sketches = set(SYSTEM_FEATURES.keys()) + + # ==== 第一遍遍历:收集特征向量和权重 ==== + for account_id, path, like_count in user_data: + category = get_category_from_path(path) + if category not in TABLE_CATEGORIES.keys(): + continue + + sketch_iid = sketch_to_iid.get(path) + if not sketch_iid: + continue + + # 记录用户 + all_users.add(account_id) + + # 提取特征并记录权重(like_count) + if path in SYSTEM_FEATURES: # 系统草图 + feature = SYSTEM_FEATURES[path] + weight = like_count # 使用like_count作为权重 + user_feature_weights[(account_id, category)][sketch_iid].append((feature, weight)) + else: # 用户草图 + feature = extract_feature_vector_from_resnet(path) + weight = like_count + user_feature_weights[(account_id, category)][sketch_iid].append((feature, weight)) + + # ==== 第二遍遍历:收集所有系统草图iid ==== + system_sketch_iids = set() + for sketch_path in SYSTEM_FEATURES: + if iid := sketch_to_iid.get(sketch_path): + system_sketch_iids.add(iid) + + # ==== 创建索引映射 ==== + user_list = sorted(all_users) + sketch_list = sorted(system_sketch_iids) + + user_index = {uid: idx for idx, uid in enumerate(user_list)} + sketch_index = {iid: idx for idx, iid in enumerate(sketch_list)} + + # ==== 初始化特征矩阵 ==== + feature_matrix = np.zeros((len(user_list), len(sketch_list))) + + # ==== 预计算加权平均特征向量 ==== + user_avg_features = {} + for (account_id, category), sketches in user_feature_weights.items(): + try: + # 展平所有特征向量和权重 + all_features_weights = [(vec, weight) for vec_list in sketches.values() for vec, weight in vec_list] + + if len(all_features_weights) == 0: + continue + + # 计算总权重 + total_weight = sum(weight for _, weight in all_features_weights) + if total_weight <= 0: # 防止除零错误 + total_weight = 1.0 + + # 加权平均计算 + weighted_sum = np.zeros_like(all_features_weights[0][0]) # 获取特征向量维度 + for vec, weight in all_features_weights: + weighted_sum += vec * weight + + avg_vec = weighted_sum / total_weight + user_avg_features[(account_id, category)] = avg_vec + + except Exception as e: + logging.warning(f"用户({account_id},{category})加权特征计算失败: {str(e)}") + continue + + # ==== 计算相似度并填充矩阵 ==== + for sketch_path, sys_vector in SYSTEM_FEATURES.items(): + sketch_iid = sketch_to_iid.get(sketch_path) + + system_sketch_category = get_category_from_path(sketch_path) + if not sketch_iid or sketch_iid not in sketch_index: + continue + + sketch_col = sketch_index[sketch_iid] + + # 遍历所有用户 + for account_id in all_users: + user_row = user_index.get(account_id) + if user_row is None: + continue + + # 获取用户加权平均特征向量 + try: + # 直接通过复合键获取用户特征向量 + user_vec = user_avg_features[(account_id, system_sketch_category)] + except KeyError: + # 该用户在此类别下无特征数据 + continue + + # 计算余弦相似度 + cos_sim = cosine_similarity(user_vec, sys_vector) + feature_matrix[user_row, sketch_col] = cos_sim + + return feature_matrix, user_index, sketch_index, {iid: get_category_from_path(path) for path, iid in sketch_to_iid.items()} + + +def get_category_from_path(path): + """从path字段解析类别""" + try: + parts = path.split('/') + if len(parts) >= 2: + return f"{parts[2]}_{parts[3]}" + return "unknown" + except: + return "unknown" + + +def cosine_similarity(vec1, vec2): + """计算余弦相似度(增加零值处理)""" + norm = np.linalg.norm(vec1) * np.linalg.norm(vec2) + return np.dot(vec1, vec2) / (norm + 1e-10) if norm != 0 else 0.0 + + +if __name__ == "__main__": + try: + update_user_matrices() + # scheduler = BlockingScheduler() + # scheduler.add_job(update_user_matrices, 'cron', hour=12, timezone='Asia/Shanghai') + # logging.info("定时任务已启动,每天12:00执行") + # scheduler.start() + except KeyboardInterrupt: + logging.info("定时任务已停止") + except Exception as e: + logging.error(f"调度器启动失败: {str(e)}", exc_info=True) diff --git a/app/service/recommend/service.py b/app/service/recommend/service.py new file mode 100644 index 0000000..bbdc6c3 --- /dev/null +++ b/app/service/recommend/service.py @@ -0,0 +1,172 @@ +# 预加载资源 +import logging +import time +from collections import defaultdict + +import numpy as np + +from app.core.config import DB_CONFIG, RECOMMEND_PATH_PREFIX + +logger = logging.getLogger() +import pymysql + +matrix_data = { + "interaction_matrix": None, + "feature_matrix": None, + "user_index_interaction": None, + "sketch_index_interaction": None, + "user_index_feature": None, + "sketch_index_feature": None, + "iid_to_sketch": None, + "category_to_iids": None, + "cached_scores": {}, + "cached_valid_idxs": {}, + "category_sketch_idxs_inter": None, + "category_sketch_idxs_feature": None, + "user_inter_full": dict(), + "user_feat_full": dict(), +} + + +def load_resources(): + """加载所有矩阵和映射关系,并触发预缓存""" + try: + start_time = time.time() + + # 清空缓存 + matrix_data["cached_scores"].clear() + matrix_data["cached_valid_idxs"].clear() + + # 加载数据 + sketch_to_iid = np.load(f'{RECOMMEND_PATH_PREFIX}sketch_to_iid.npy', allow_pickle=True).item() + matrix_data["iid_to_sketch"] = {v: k for k, v in sketch_to_iid.items()} + + matrix_data["interaction_matrix"] = np.load(f"{RECOMMEND_PATH_PREFIX}interaction_matrix.npy", allow_pickle=True) + matrix_data["user_index_interaction"] = np.load(f"{RECOMMEND_PATH_PREFIX}user_index_interaction_matrix.npy", allow_pickle=True).item() + matrix_data["sketch_index_interaction"] = np.load(f"{RECOMMEND_PATH_PREFIX}sketch_index_interaction_matrix.npy", + allow_pickle=True).item() + + matrix_data["feature_matrix"] = np.load(f"{RECOMMEND_PATH_PREFIX}feature_matrix.npy", allow_pickle=True) + matrix_data["user_index_feature"] = np.load(f"{RECOMMEND_PATH_PREFIX}user_index_feature_matrix.npy", allow_pickle=True).item() + matrix_data["sketch_index_feature"] = np.load(f"{RECOMMEND_PATH_PREFIX}sketch_index_feature_matrix.npy", allow_pickle=True).item() + + category_to_iid_map = np.load(f"{RECOMMEND_PATH_PREFIX}iid_to_category_interaction_matrix.npy", allow_pickle=True).item() + matrix_data["category_to_iids"] = defaultdict(list) + for iid, cat in category_to_iid_map.items(): + matrix_data["category_to_iids"][cat].append(iid) + + logger.info(f"资源加载完成,耗时: {time.time() - start_time:.2f}秒") + + # 触发预缓存 + precache_user_category() + + except Exception as e: + logger.error(f"资源加载失败: {str(e)}") + raise RuntimeError("初始化失败") + + +def precache_user_category(): + """预缓存用户-分类组合数据""" + if not all([ + matrix_data["interaction_matrix"] is not None, + matrix_data["feature_matrix"] is not None, + matrix_data["user_index_interaction"] is not None + ]): + logger.warning("资源未加载完成,跳过预缓存") + return + + start_time = time.time() + user_categories = get_all_user_categories() + + precached_count = 0 + for user_id, categories in user_categories.items(): + for category in categories: + cache_key = (user_id, category) + if cache_key in matrix_data["cached_scores"]: + continue + + try: + # 获取用户索引 + user_idx_inter = matrix_data["user_index_interaction"].get(user_id) + user_idx_feature = matrix_data["user_index_feature"].get(user_id) + + # 获取类别对应的iid列表 + category_iids = matrix_data["category_to_iids"].get(category, []) + + # 过滤有效草图索引 + valid_sketch_idxs_inter = [ + idx for iid, idx in matrix_data["sketch_index_interaction"].items() + if iid in category_iids + ] + + # 处理交互分数 + if user_idx_inter is not None and valid_sketch_idxs_inter: + raw_inter_scores = matrix_data["interaction_matrix"][user_idx_inter, valid_sketch_idxs_inter] + processed_inter = raw_inter_scores * 0.7 + else: + processed_inter = np.array([]) + + # 处理特征分数 + valid_sketch_idxs_feature = [ + idx for iid, idx in matrix_data["sketch_index_feature"].items() + if iid in category_iids + ] + + if user_idx_feature is not None and valid_sketch_idxs_feature: + raw_feat_scores = matrix_data["feature_matrix"][user_idx_feature, valid_sketch_idxs_feature] + raw_feat_scores = (raw_feat_scores - np.min(raw_feat_scores)) / ( + np.max(raw_feat_scores) - np.min(raw_feat_scores) + 1e-8) + processed_feat = raw_feat_scores * 0.3 + else: + processed_feat = np.array([]) + + # 缓存结果 + if len(processed_inter) == len(processed_feat): + matrix_data["cached_scores"][cache_key] = (processed_inter, processed_feat) + matrix_data["cached_valid_idxs"][cache_key] = valid_sketch_idxs_inter + precached_count += 1 + + except Exception as e: + logger.error(f"预缓存失败 (user={user_id}, category={category}): {str(e)}") + + logger.info(f"预缓存完成,共缓存 {precached_count} 个组合,耗时: {time.time() - start_time:.2f}秒") + + +def get_all_user_categories(): + """获取所有用户及其对应的分类""" + conn = None + try: + conn = pymysql.connect(**DB_CONFIG) + cursor = conn.cursor() + + query = """ + SELECT DISTINCT account_id, path + FROM user_preference_log_prediction + """ + cursor.execute(query) + results = cursor.fetchall() + + user_categories = defaultdict(set) + for account_id, path in results: + category = get_category_from_path(path) + user_categories[account_id].add(category) + + return dict(user_categories) + + except Exception as e: + logger.error(f"数据库查询失败: {str(e)}") + return {} + finally: + if conn: + conn.close() + + +def get_category_from_path(path: str) -> str: + """从路径解析类别""" + try: + parts = path.split('/') + if len(parts) >= 4: + return f"{parts[2]}_{parts[3]}" + return "unknown" + except: + return "unknown" From a79387dec2277110e76b483aa492008431f3477f Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Fri, 28 Feb 2025 17:31:55 +0800 Subject: [PATCH 009/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20sketch=20=E6=8E=A8=E8=8D=90=E7=AE=97?= =?UTF-8?q?=E6=B3=95=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs?= =?UTF-8?q?=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refac?= =?UTF-8?q?tor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E?= =?UTF-8?q?=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- requirements.txt | Bin 1860 -> 1902 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/requirements.txt b/requirements.txt index 73507145f0a1adf6986bae737597a22a911f640e..8f37179d818b8e83fd8539afc7ea502a74609355 100644 GIT binary patch delta 46 zcmX@Y_l|GFAtuE{h609ShGd2ehE#?WhEj$cAYH^@3xviDdJKjPhMRXXb+Z5fCshkB delta 12 TcmaFIcZ6@lA*Ri5n3`AsCkq8? From 903c1ee6a4929453b42f89a7d95d6e26fe74da7c Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Wed, 5 Mar 2025 11:19:35 +0800 Subject: [PATCH 010/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=E6=A8=A1=E7=89=B9=E8=85=BF=E9=83=A8?= =?UTF-8?q?=E7=BC=96=E8=BE=91=E5=8A=9F=E8=83=BD=20fix=EF=BC=88=E4=BF=AE?= =?UTF-8?q?=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_mannequins_edit.py | 40 ++++++++++ app/api/api_route.py | 4 +- app/schemas/mannequin_edit.py | 8 ++ app/service/mannequins_edit/service.py | 101 +++++++++++++++++++++++++ 4 files changed, 152 insertions(+), 1 deletion(-) create mode 100644 app/api/api_mannequins_edit.py create mode 100644 app/schemas/mannequin_edit.py create mode 100644 app/service/mannequins_edit/service.py diff --git a/app/api/api_mannequins_edit.py b/app/api/api_mannequins_edit.py new file mode 100644 index 0000000..5cfaf3d --- /dev/null +++ b/app/api/api_mannequins_edit.py @@ -0,0 +1,40 @@ +import json +import logging + +from fastapi import APIRouter, HTTPException + +from app.schemas.mannequin_edit import MannequinModel +from app.schemas.response_template import ResponseModel +from app.service.mannequins_edit.service import MannequinEditService + +router = APIRouter() +logger = logging.getLogger() + + +@router.post("/mannequins_edit") +def mannequins_edit(request_data: MannequinModel): + """ + 模特腿长调整 + 创建一个具有以下参数的请求体: + - **mannequins**: mannequins url等信息 + - **scale**: 大腿小腿比例 + - **bucket_name**: bucket name + - **mannequin_name**: 模特名称 + + 示例参数: + - **{ + "mannequins": "aida-sys-image/models/male/dc36ce58-46c3-4b6f-8787-5ca7d6fc26e6.png", + "scale": [0.75, 0.75], + "bucket_name": "test", + "mannequin_name": "mannequin_name" + }** + """ + try: + logger.info(f"mannequins_edit request item is : @@@@@@:{json.dumps(request_data.dict())}") + service = MannequinEditService(request_data) + data = service() + logger.info(f"mannequins_edit response @@@@@@:{json.dumps(data)}") + except Exception as e: + logger.warning(f"mannequins_edit Run Exception @@@@@@:{e}") + raise HTTPException(status_code=404, detail=str(e)) + return ResponseModel(data=data) diff --git a/app/api/api_route.py b/app/api/api_route.py index 3890316..33e238e 100644 --- a/app/api/api_route.py +++ b/app/api/api_route.py @@ -8,9 +8,10 @@ from app.api import api_design from app.api import api_design_pre_processing from app.api import api_generate_image from app.api import api_image2sketch +from app.api import api_mannequins_edit from app.api import api_prompt_generation -from app.api import api_super_resolution from app.api import api_recommendation +from app.api import api_super_resolution from app.api import api_test router = APIRouter() @@ -28,3 +29,4 @@ router.include_router(api_brighten.router, tags=['api_brighten'], prefix="/api") router.include_router(api_query_image.router, tags=['api_query_image'], prefix="/api") router.include_router(api_brand_dna.router, tags=['api_brand_dna'], prefix="/api") router.include_router(api_recommendation.router, tags=['api_recommendation'], prefix="/api") +router.include_router(api_mannequins_edit.router, tags=['api_mannequins_edit'], prefix="/api") diff --git a/app/schemas/mannequin_edit.py b/app/schemas/mannequin_edit.py new file mode 100644 index 0000000..c5514d6 --- /dev/null +++ b/app/schemas/mannequin_edit.py @@ -0,0 +1,8 @@ +from pydantic import BaseModel + + +class MannequinModel(BaseModel): + mannequins: str + scale: list[float, float] + bucket_name: str + mannequin_name: str diff --git a/app/service/mannequins_edit/service.py b/app/service/mannequins_edit/service.py new file mode 100644 index 0000000..bbb6cc5 --- /dev/null +++ b/app/service/mannequins_edit/service.py @@ -0,0 +1,101 @@ +import cv2 +import mediapipe as mp +import numpy as np +from minio import Minio + +from app.core.config import MINIO_URL, MINIO_ACCESS, MINIO_SECRET, MINIO_SECURE +from app.schemas.mannequin_edit import MannequinModel +from app.service.utils.new_oss_client import oss_get_image, oss_upload_image + +minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) + + +class MannequinEditService(): + def __init__(self, request_data): + self.scale = request_data.scale + self.image = oss_get_image(oss_client=minio_client, bucket=request_data.mannequins.split('/')[0], object_name=request_data.mannequins[request_data.mannequins.find('/') + 1:], data_type="cv2") + self.mannequin_name = request_data.mannequin_name + self.bucket_name = request_data.bucket_name + if self.image.shape[2] == 4: + self.bgr = self.image[:, :, :3] + self.alpha = self.image[:, :, 3] + self.bgr = cv2.bitwise_and(self.bgr, self.bgr, mask=cv2.normalize(self.alpha, None, 0, 1, cv2.NORM_MINMAX)) + self.h, self.w, _ = self.bgr.shape + else: + self.bgr = self.image + self.h, self.w, _ = self.bgr.shape + self.alpha = None + + def __call__(self, *args, **kwargs): + leg_top, leg_bottom = self.attitude_detection() + if leg_top and leg_bottom: + new_mannequin = self.resize_leg(leg_top, leg_bottom) + _, encoded_image = cv2.imencode('.png', new_mannequin) + image_bytes = encoded_image.tobytes() + req = oss_upload_image(oss_client=minio_client, bucket=self.bucket_name, object_name=f"{self.mannequin_name}.png", image_bytes=image_bytes) + return req.bucket_name + "/" + req.object_name + else: + return "No leg detected" + + def attitude_detection(self): + mp_pose = mp.solutions.pose + pose = mp_pose.Pose() + + # 将 BGR 图像转换为 RGB 格式 + image_rgb = cv2.cvtColor(self.bgr, cv2.COLOR_BGR2RGB) + leg_top, leg_bottom = None, None + # 进行姿态检测 + results = pose.process(image_rgb) + if results.pose_landmarks: + # 获取腿部关键点 + landmarks = results.pose_landmarks.landmark + + # 找到腿部上边界和下边界 + leg_top = int(landmarks[mp_pose.PoseLandmark.LEFT_HIP].y * self.h) + leg_bottom = int(max(landmarks[mp_pose.PoseLandmark.LEFT_ANKLE].y, + landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE].y) * self.h) + + return leg_top, leg_bottom + + def resize_leg(self, leg_top, leg_bottom): + # 上半身 + top_part_bgr = self.bgr[:leg_top, :] + top_part_bgr_alpha = self.alpha[:leg_top, :] + + # 小腿 + part_thigh = self.bgr[leg_top:leg_bottom, :] + part_thigh_alpha = self.alpha[leg_top:leg_bottom, :] + + # 大腿 + part_calf = self.bgr[leg_bottom:, :] + part_calf_alpha = self.alpha[leg_bottom:, :] + + new_thigh_height = int((leg_bottom - leg_top) * self.scale[0]) + new_calf_height = int((self.h - leg_bottom) * self.scale[1]) + + resized_thigh = cv2.resize(part_thigh, (self.w, new_thigh_height), interpolation=cv2.INTER_LINEAR) + resized_thigh_alpha = cv2.resize(part_thigh_alpha, (self.w, new_thigh_height), interpolation=cv2.INTER_LINEAR) + resized_calf = cv2.resize(part_calf, (self.w, new_calf_height), interpolation=cv2.INTER_LINEAR) + resized_calf_alpha = cv2.resize(part_calf_alpha, (self.w, new_calf_height), interpolation=cv2.INTER_LINEAR) + + new_bgr = np.vstack((top_part_bgr, resized_thigh, resized_calf)) + new_bgr_alpha = np.vstack((top_part_bgr_alpha, resized_thigh_alpha, resized_calf_alpha)) + + if self.alpha is not None: + # 拼接 alpha 通道 + # 合并 BGR 通道和 alpha 通道 + new_image = np.dstack((new_bgr, new_bgr_alpha)) + else: + new_image = new_bgr + return new_image + + +if __name__ == '__main__': + request_data = MannequinModel( + mannequins="aida-sys-image/models/male/dc36ce58-46c3-4b6f-8787-5ca7d6fc26e6.png", + scale=[0.75, 0.75], + bucket_name="test", + mannequin_name="mannequin_name" + ) + service = MannequinEditService(request_data) + print(service()) From 8dc508d4bbc6ebfae342801e876523377b77bbe1 Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Wed, 5 Mar 2025 15:07:14 +0800 Subject: [PATCH 011/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4?= =?UTF-8?q?=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20te?= =?UTF-8?q?st(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):=20=E4=BA=BA=E8=84=B8?= =?UTF-8?q?=E8=AF=86=E5=88=AB=E9=97=AE=E9=A2=98=E6=B5=8B=E8=AF=95=E7=BB=93?= =?UTF-8?q?=E6=9D=9F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/core/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/core/config.py b/app/core/config.py index df4702b..5a1e2a3 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -138,7 +138,7 @@ GEN_SINGLE_LOGO_RABBITMQ_QUEUES = os.getenv("GEN_SINGLE_LOGO_RABBITMQ_QUEUES", f GPI_RABBITMQ_QUEUES = os.getenv("GEN_PRODUCT_IMAGE_RABBITMQ_QUEUES", f"ToProductImage{RABBITMQ_ENV}") GPI_MODEL_NAME_OVERALL = 'diffusion_ensemble_all' GPI_MODEL_NAME_SINGLE = 'stable_diffusion_1_5_cnet' -GPI_MODEL_URL = '10.1.1.243:15551' +GPI_MODEL_URL = '10.1.1.243:10051' # Generate Single Logo service config GRI_RABBITMQ_QUEUES = os.getenv("GEN_RELIGHT_IMAGE_RABBITMQ_QUEUES", f"Relight{RABBITMQ_ENV}") From 3655472529b014ad624f1d2b20bdfe9e6de604f6 Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Wed, 5 Mar 2025 15:08:56 +0800 Subject: [PATCH 012/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4?= =?UTF-8?q?=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20te?= =?UTF-8?q?st(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):=20=E4=BA=BA=E8=84=B8?= =?UTF-8?q?=E8=AF=86=E5=88=AB=E9=97=AE=E9=A2=98=E6=B5=8B=E8=AF=95=E7=BB=93?= =?UTF-8?q?=E6=9D=9F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- requirements.txt | Bin 1902 -> 1938 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/requirements.txt b/requirements.txt index 8f37179d818b8e83fd8539afc7ea502a74609355..909fb2168e3b601ad8fa54f6433f2598f4a30e16 100644 GIT binary patch delta 48 wcmaFIH;I448z$vkhE#?WhD?S;h5{g20OZ*Mp#g&)gCUqSVldqNnyHxu057Zx{r~^~ delta 12 TcmbQl|Bi3N8>Y Date: Mon, 10 Mar 2025 11:12:49 +0800 Subject: [PATCH 013/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20design=20=E9=80=89=E5=8C=BA=E9=A2=9C=E8=89=B2?= =?UTF-8?q?=E8=B0=83=E6=95=B4=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98?= =?UTF-8?q?=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84?= =?UTF-8?q?=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/design_fast/pipeline/color.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/app/service/design_fast/pipeline/color.py b/app/service/design_fast/pipeline/color.py index 3033bb5..d6c84e4 100644 --- a/app/service/design_fast/pipeline/color.py +++ b/app/service/design_fast/pipeline/color.py @@ -29,6 +29,24 @@ class Color: else: pattern = self.get_pattern(result['color']) resize_pattern = cv2.resize(pattern, (dim_image_w, dim_image_h), interpolation=cv2.INTER_AREA) + + if "partial_color" in result.keys() and result['partial_color'] != "": + bucket_name = result['partial_color'].split('/')[0] + object_name = result['partial_color'][result['partial_color'].find('/') + 1:] + partial_color = oss_get_image(oss_client=self.minio_client, bucket=bucket_name, object_name=object_name, data_type="cv2") + h, w = partial_color.shape[0:2] + resize_pattern = cv2.resize(resize_pattern, (w, h), interpolation=cv2.INTER_AREA) + # 分离出 png 图的 alpha 通道 + alpha_channel = partial_color[:, :, 3] + # 提取 png 图的 RGB 通道 + png_rgb = partial_color[:, :, :3] + # 创建一个与 cv 图大小相同的掩码,用于指示哪些像素需要替换 + mask = alpha_channel > 0 + # 将掩码扩展为 3 通道,以便与 cv 图进行逐元素操作 + mask_3ch = np.stack([mask] * 3, axis=-1) + # 根据掩码将 png 图的颜色覆盖到 cv 图上 + resize_pattern[mask_3ch] = png_rgb[mask_3ch] + resize_pattern = cv2.resize(resize_pattern, (dim_image_w, dim_image_h), interpolation=cv2.INTER_AREA) closed_mo = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2) gray_mo = np.expand_dims(result['gray'], axis=2).repeat(3, axis=2) get_image_fir = resize_pattern * (closed_mo / 255) * (gray_mo / 255) From e94d5ac6a3224de036d252e8ac089fe35a05feee Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Thu, 13 Mar 2025 12:04:14 +0800 Subject: [PATCH 014/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4?= =?UTF-8?q?=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20te?= =?UTF-8?q?st(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):=20Agent=20generate=20?= =?UTF-8?q?test?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_agent_generate_image.py | 20 ++++++ app/api/api_route.py | 3 + app/service/generate_image/agent_generate.py | 68 ++++++++++++++++++++ 3 files changed, 91 insertions(+) create mode 100644 app/api/api_agent_generate_image.py create mode 100644 app/service/generate_image/agent_generate.py diff --git a/app/api/api_agent_generate_image.py b/app/api/api_agent_generate_image.py new file mode 100644 index 0000000..d8efbbb --- /dev/null +++ b/app/api/api_agent_generate_image.py @@ -0,0 +1,20 @@ +import logging + +from fastapi import APIRouter, HTTPException + +from app.schemas.response_template import ResponseModel +from app.service.generate_image.agent_generate import GenerateImage + +router = APIRouter() +logger = logging.getLogger() + + +@router.get("/agent_generate_image") +def generate_image(prompt: str): + try: + server = GenerateImage() + data = server.get_result(prompt) + except Exception as e: + logger.warning(f"generate_image Run Exception @@@@@@:{e}") + raise HTTPException(status_code=404, detail=str(e)) + return ResponseModel(data=data) diff --git a/app/api/api_route.py b/app/api/api_route.py index 33e238e..61bd43f 100644 --- a/app/api/api_route.py +++ b/app/api/api_route.py @@ -12,6 +12,8 @@ from app.api import api_mannequins_edit from app.api import api_prompt_generation from app.api import api_recommendation from app.api import api_super_resolution +from app.api import api_agent_generate_image + from app.api import api_test router = APIRouter() @@ -30,3 +32,4 @@ router.include_router(api_query_image.router, tags=['api_query_image'], prefix=" router.include_router(api_brand_dna.router, tags=['api_brand_dna'], prefix="/api") router.include_router(api_recommendation.router, tags=['api_recommendation'], prefix="/api") router.include_router(api_mannequins_edit.router, tags=['api_mannequins_edit'], prefix="/api") +router.include_router(api_agent_generate_image.router, tags=['api_agent_generate_image'], prefix="/api") diff --git a/app/service/generate_image/agent_generate.py b/app/service/generate_image/agent_generate.py new file mode 100644 index 0000000..24623dc --- /dev/null +++ b/app/service/generate_image/agent_generate.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +""" +@Project :trinity_client +@File :service_att_recognition.py +@Author :周成融 +@Date :2023/7/26 12:01:05 +@detail : +""" +import logging +from datetime import timedelta + +import cv2 +import numpy as np +import tritonclient.grpc as grpcclient +from minio import Minio +from tritonclient.utils import np_to_triton_dtype + +from app.core.config import * +from app.service.utils.oss_client import oss_upload_image + +logger = logging.getLogger() + + +class GenerateImage: + def __init__(self): + self.grpc_client = grpcclient.InferenceServerClient(url=GI_MODEL_URL) + self.image = np.random.randint(0, 256, (1024, 1024, 3), dtype=np.uint8) + self.batch_size = 1 + self.mode = 'txt2img' + self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) + + def get_result(self, prompt): + prompts = [prompt] * self.batch_size + modes = [self.mode] * self.batch_size + images = [self.image.astype(np.float16)] * self.batch_size + + text_obj = np.array(prompts, dtype="object").reshape((-1, 1)) + mode_obj = np.array(modes, dtype="object").reshape((-1, 1)) + image_obj = np.array(images, dtype=np.float16).reshape((-1, 1024, 1024, 3)) + + input_text = grpcclient.InferInput("prompt", text_obj.shape, np_to_triton_dtype(text_obj.dtype)) + input_image = grpcclient.InferInput("input_image", image_obj.shape, np_to_triton_dtype(image_obj.dtype)) + input_mode = grpcclient.InferInput("mode", mode_obj.shape, np_to_triton_dtype(mode_obj.dtype)) + + input_text.set_data_from_numpy(text_obj) + input_image.set_data_from_numpy(image_obj) + input_mode.set_data_from_numpy(mode_obj) + + inputs = [input_text, input_image, input_mode] + result = self.grpc_client.infer(model_name=GI_MODEL_NAME, inputs=inputs) + image = result.as_numpy("generated_image") + image_result = cv2.cvtColor(np.squeeze(image.astype(np.uint8)), cv2.COLOR_RGB2BGR) + _, img_byte_array = cv2.imencode('.jpg', image_result) + object_name = f'test.jpg' + req = oss_upload_image(bucket='test', object_name=object_name, image_bytes=img_byte_array) + url = self.minio_client.get_presigned_url( + "GET", + "test", + object_name, + expires=timedelta(hours=2), + ) + return url + + +if __name__ == '__main__': + server = GenerateImage() + print(server.get_result("rabbit")) From 2e717f0145c2f64832244bd5698ee9bfa108cab4 Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Thu, 13 Mar 2025 15:01:59 +0800 Subject: [PATCH 015/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4?= =?UTF-8?q?=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20te?= =?UTF-8?q?st(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):=20Agent=20generate=20?= =?UTF-8?q?test?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_agent_generate_image.py | 2 +- app/api/api_route.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/app/api/api_agent_generate_image.py b/app/api/api_agent_generate_image.py index d8efbbb..9aeda19 100644 --- a/app/api/api_agent_generate_image.py +++ b/app/api/api_agent_generate_image.py @@ -17,4 +17,4 @@ def generate_image(prompt: str): except Exception as e: logger.warning(f"generate_image Run Exception @@@@@@:{e}") raise HTTPException(status_code=404, detail=str(e)) - return ResponseModel(data=data) + return data diff --git a/app/api/api_route.py b/app/api/api_route.py index 61bd43f..45f8567 100644 --- a/app/api/api_route.py +++ b/app/api/api_route.py @@ -30,6 +30,6 @@ router.include_router(api_image2sketch.router, tags=['api_image2sketch'], prefix router.include_router(api_brighten.router, tags=['api_brighten'], prefix="/api") router.include_router(api_query_image.router, tags=['api_query_image'], prefix="/api") router.include_router(api_brand_dna.router, tags=['api_brand_dna'], prefix="/api") -router.include_router(api_recommendation.router, tags=['api_recommendation'], prefix="/api") +# router.include_router(api_recommendation.router, tags=['api_recommendation'], prefix="/api") router.include_router(api_mannequins_edit.router, tags=['api_mannequins_edit'], prefix="/api") router.include_router(api_agent_generate_image.router, tags=['api_agent_generate_image'], prefix="/api") From 00b8e9fb02b6db39e60186e975f52f74f25ccb06 Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Thu, 13 Mar 2025 15:14:19 +0800 Subject: [PATCH 016/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4?= =?UTF-8?q?=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20te?= =?UTF-8?q?st(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):=20Agent=20generate=20?= =?UTF-8?q?test?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_agent_generate_image.py | 13 ++++++------ app/service/generate_image/agent_generate.py | 22 ++++++++++++-------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/app/api/api_agent_generate_image.py b/app/api/api_agent_generate_image.py index 9aeda19..e4001ff 100644 --- a/app/api/api_agent_generate_image.py +++ b/app/api/api_agent_generate_image.py @@ -1,6 +1,8 @@ +import io import logging from fastapi import APIRouter, HTTPException +from starlette.responses import StreamingResponse from app.schemas.response_template import ResponseModel from app.service.generate_image.agent_generate import GenerateImage @@ -11,10 +13,7 @@ logger = logging.getLogger() @router.get("/agent_generate_image") def generate_image(prompt: str): - try: - server = GenerateImage() - data = server.get_result(prompt) - except Exception as e: - logger.warning(f"generate_image Run Exception @@@@@@:{e}") - raise HTTPException(status_code=404, detail=str(e)) - return data + server = GenerateImage() + byte_stream = server.get_result(prompt) + # 返回流式响应 + return StreamingResponse(byte_stream, media_type="image/png") diff --git a/app/service/generate_image/agent_generate.py b/app/service/generate_image/agent_generate.py index 24623dc..58ac869 100644 --- a/app/service/generate_image/agent_generate.py +++ b/app/service/generate_image/agent_generate.py @@ -7,6 +7,7 @@ @Date :2023/7/26 12:01:05 @detail : """ +import io import logging from datetime import timedelta @@ -52,15 +53,18 @@ class GenerateImage: image = result.as_numpy("generated_image") image_result = cv2.cvtColor(np.squeeze(image.astype(np.uint8)), cv2.COLOR_RGB2BGR) _, img_byte_array = cv2.imencode('.jpg', image_result) - object_name = f'test.jpg' - req = oss_upload_image(bucket='test', object_name=object_name, image_bytes=img_byte_array) - url = self.minio_client.get_presigned_url( - "GET", - "test", - object_name, - expires=timedelta(hours=2), - ) - return url + byte_stream = io.BytesIO(img_byte_array) + byte_stream.seek(0) + + # object_name = f'test.jpg' + # req = oss_upload_image(bucket='test', object_name=object_name, image_bytes=img_byte_array) + # url = self.minio_client.get_presigned_url( + # "GET", + # "test", + # object_name, + # expires=timedelta(hours=2), + # ) + return byte_stream if __name__ == '__main__': From b4671a3793e8cf181d71010b567e79c8d1bdceb7 Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Mon, 17 Mar 2025 11:14:54 +0800 Subject: [PATCH 017/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20pose=20transform=20=E6=8E=A5=E5=8F=A3=20f?= =?UTF-8?q?ix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_pose_transform.py | 49 ++++++++ app/api/api_route.py | 6 +- app/schemas/pose_transform.py | 7 ++ .../generate_image/service_pose_transform.py | 117 ++++++++++++++++++ 4 files changed, 176 insertions(+), 3 deletions(-) create mode 100644 app/api/api_pose_transform.py create mode 100644 app/schemas/pose_transform.py create mode 100644 app/service/generate_image/service_pose_transform.py diff --git a/app/api/api_pose_transform.py b/app/api/api_pose_transform.py new file mode 100644 index 0000000..fe5fc5a --- /dev/null +++ b/app/api/api_pose_transform.py @@ -0,0 +1,49 @@ +import json +import logging + +from fastapi import APIRouter, BackgroundTasks, HTTPException + +from app.schemas.pose_transform import PoseTransformModel +from app.schemas.response_template import ResponseModel +from app.service.generate_image.service_pose_transform import PoseTransformService, infer_cancel as pose_transform_infer_cancel + +router = APIRouter() +logger = logging.getLogger() + + +@router.post("/pose_transform") +def pose_transform(request_item: PoseTransformModel, background_tasks: BackgroundTasks): + """ + 创建一个具有以下参数的请求体: + - **tasks_id**: 任务id 用于取消生成任务和获取生成结果 + - **image_url**: 被生成图片的S3或minio url地址 + - **pose_id**: 1 + + + 示例参数: + { + "tasks_id": "123-89", + "image_url": "aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png", + "pose_id": "1" + } + """ + try: + logger.info(f"pose_transform request item is : @@@@@@:{json.dumps(request_item.dict())}") + service = PoseTransformService(request_item) + background_tasks.add_task(service.get_result) + except Exception as e: + logger.warning(f"pose_transform Run Exception @@@@@@:{e}") + raise HTTPException(status_code=404, detail=str(e)) + return ResponseModel() + + +@router.get("/pose_transform_cancel/{tasks_id}") +def pose_transform_cancel(tasks_id: str): + try: + logger.info(f"pose_transform_cancel request item is : @@@@@@:{tasks_id}") + data = pose_transform_infer_cancel(tasks_id) + logger.info(f"pose_transform_cancel response @@@@@@:{data}") + except Exception as e: + logger.warning(f"pose_transform_cancel Run Exception @@@@@@:{e}") + raise HTTPException(status_code=404, detail=str(e)) + return ResponseModel(data=data['data']) diff --git a/app/api/api_route.py b/app/api/api_route.py index 45f8567..7fbbc9c 100644 --- a/app/api/api_route.py +++ b/app/api/api_route.py @@ -1,5 +1,6 @@ from fastapi import APIRouter +from app.api import api_agent_generate_image from app.api import api_attribute_retrieve, api_query_image from app.api import api_brand_dna from app.api import api_brighten @@ -9,11 +10,9 @@ from app.api import api_design_pre_processing from app.api import api_generate_image from app.api import api_image2sketch from app.api import api_mannequins_edit +from app.api import api_pose_transform from app.api import api_prompt_generation -from app.api import api_recommendation from app.api import api_super_resolution -from app.api import api_agent_generate_image - from app.api import api_test router = APIRouter() @@ -33,3 +32,4 @@ router.include_router(api_brand_dna.router, tags=['api_brand_dna'], prefix="/api # router.include_router(api_recommendation.router, tags=['api_recommendation'], prefix="/api") router.include_router(api_mannequins_edit.router, tags=['api_mannequins_edit'], prefix="/api") router.include_router(api_agent_generate_image.router, tags=['api_agent_generate_image'], prefix="/api") +router.include_router(api_pose_transform.router, tags=['api_pose_transform'], prefix="/api") diff --git a/app/schemas/pose_transform.py b/app/schemas/pose_transform.py new file mode 100644 index 0000000..045d8b9 --- /dev/null +++ b/app/schemas/pose_transform.py @@ -0,0 +1,7 @@ +from pydantic import BaseModel + + +class PoseTransformModel(BaseModel): + image_url: str + tasks_id: str + pose_id: str diff --git a/app/service/generate_image/service_pose_transform.py b/app/service/generate_image/service_pose_transform.py new file mode 100644 index 0000000..f2948b3 --- /dev/null +++ b/app/service/generate_image/service_pose_transform.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +""" +@Project :trinity_client +@File :service_pose_transform.py +@Author :周成融 +@Date :2023/7/26 12:01:05 +@detail : +""" +import json +import logging + +import cv2 +import numpy as np +import redis +import tritonclient.grpc as grpcclient +from PIL import Image + +from app.core.config import * +from app.schemas.pose_transform import PoseTransformModel +from app.service.generate_image.utils.upload_sd_image import upload_SDXL_image +from app.service.utils.oss_client import oss_get_image + +logger = logging.getLogger() + + +class PoseTransformService: + def __init__(self, request_data): + if DEBUG is False: + self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) + self.channel = self.connection.channel() + self.grpc_client = grpcclient.InferenceServerClient(url=GRI_MODEL_URL) + self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) + self.category = "pose_transform" + self.batch_size = 1 + self.seed = "1" + self.image_url = request_data.image_url + self.image = oss_get_image(bucket=self.image_url.split('/')[0], object_name=self.image_url[self.image_url.find('/') + 1:], data_type="cv2") + self.tasks_id = request_data.tasks_id + self.user_id = self.tasks_id[self.tasks_id.rfind('-') + 1:] + self.gen_product_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'image_url': ''} + self.redis_client.set(self.tasks_id, json.dumps(self.gen_product_data)) + self.redis_client.expire(self.tasks_id, 600) + + def callback(self, result, error): + if error: + self.gen_product_data['status'] = "FAILURE" + self.gen_product_data['message'] = str(error) + self.redis_client.set(self.tasks_id, json.dumps(self.gen_product_data)) + else: + image = result.as_numpy("generated_inpaint_image") + image_result = Image.fromarray(np.squeeze(image.astype(np.uint8))) + image_url = upload_SDXL_image(image_result, user_id=self.user_id, category=f"{self.category}", file_name=f"{self.tasks_id}.png") + self.gen_product_data['status'] = "SUCCESS" + self.gen_product_data['message'] = "success" + self.gen_product_data['image_url'] = str(image_url) + self.redis_client.set(self.tasks_id, json.dumps(self.gen_product_data)) + + def read_tasks_status(self): + status_data = self.redis_client.get(self.tasks_id) + return json.loads(status_data), status_data + + def get_result(self): + try: + image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB) + image = cv2.resize(image, (512, 768)) + images = [image.astype(np.uint8)] * self.batch_size + + image_obj = np.array(images, dtype=np.uint8).reshape((768, 512, 3)) + + input_image = grpcclient.InferInput("input_image", image_obj.shape, "UINT8") + + input_image.set_data_from_numpy(image_obj) + + inputs = [input_image] + # ctx = self.grpc_client.async_infer(model_name=GRI_MODEL_NAME_OVERALL, inputs=inputs, callback=self.callback) + + # time_out = 600 + # while time_out > 0: + # gen_product_data, _ = self.read_tasks_status() + # if gen_product_data['status'] in ["REVOKED", "FAILURE", "NO_FACE"]: + # ctx.cancel() + # break + # elif gen_product_data['status'] == "SUCCESS": + # break + # time_out -= 1 + # time.sleep(0.1) + gen_product_data, _ = self.read_tasks_status() + return gen_product_data + except Exception as e: + self.gen_product_data['status'] = "FAILURE" + self.gen_product_data['message'] = str(e) + self.redis_client.set(self.tasks_id, json.dumps(self.gen_product_data)) + raise Exception(str(e)) + finally: + dict_gen_product_data, str_gen_product_data = self.read_tasks_status() + if DEBUG is False: + self.channel.basic_publish(exchange='', routing_key=GRI_RABBITMQ_QUEUES, body=str_gen_product_data) + logger.info(f" [x] Sent to: {GRI_RABBITMQ_QUEUES} data:@@@@ {json.dumps(dict_gen_product_data, indent=4)}") + + +def infer_cancel(tasks_id): + redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) + data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'} + gen_product_data = json.dumps(data) + redis_client.set(tasks_id, gen_product_data) + return data + + +if __name__ == '__main__': + rd = PoseTransformModel( + tasks_id="123-89", + image_url='aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png', + pose_id="1" + ) + server = PoseTransformService(rd) + print(server.get_result()) From 446ffbb29d9bbf0bd427bd188056314c1179d77d Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Wed, 19 Mar 2025 10:31:48 +0800 Subject: [PATCH 018/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4?= =?UTF-8?q?=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20te?= =?UTF-8?q?st(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):=20=E5=90=AF=E5=8A=A8s?= =?UTF-8?q?ketch=E6=8E=A8=E8=8D=90=E6=8E=A5=E5=8F=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_route.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/api/api_route.py b/app/api/api_route.py index 7fbbc9c..9858ba6 100644 --- a/app/api/api_route.py +++ b/app/api/api_route.py @@ -1,6 +1,6 @@ from fastapi import APIRouter -from app.api import api_agent_generate_image +from app.api import api_agent_generate_image, api_recommendation from app.api import api_attribute_retrieve, api_query_image from app.api import api_brand_dna from app.api import api_brighten @@ -29,7 +29,7 @@ router.include_router(api_image2sketch.router, tags=['api_image2sketch'], prefix router.include_router(api_brighten.router, tags=['api_brighten'], prefix="/api") router.include_router(api_query_image.router, tags=['api_query_image'], prefix="/api") router.include_router(api_brand_dna.router, tags=['api_brand_dna'], prefix="/api") -# router.include_router(api_recommendation.router, tags=['api_recommendation'], prefix="/api") +router.include_router(api_recommendation.router, tags=['api_recommendation'], prefix="/api") router.include_router(api_mannequins_edit.router, tags=['api_mannequins_edit'], prefix="/api") router.include_router(api_agent_generate_image.router, tags=['api_agent_generate_image'], prefix="/api") router.include_router(api_pose_transform.router, tags=['api_pose_transform'], prefix="/api") From 28ef85c061f2b321b2a77c003faea75b23215c90 Mon Sep 17 00:00:00 2001 From: shahaibo <1023316923@qq.com> Date: Wed, 19 Mar 2025 15:05:29 +0800 Subject: [PATCH 019/101] =?UTF-8?q?TASK=EF=BC=9A=E7=B3=BB=E7=BB=9Fsketch?= =?UTF-8?q?=E6=8E=A8=E8=8D=90=E6=8E=A5=E5=8F=A3=E6=96=B0=E7=94=A8=E6=88=B7?= =?UTF-8?q?=E5=A4=84=E7=90=86=EF=BC=9B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_recommendation.py | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/app/api/api_recommendation.py b/app/api/api_recommendation.py index c533709..93fb251 100644 --- a/app/api/api_recommendation.py +++ b/app/api/api_recommendation.py @@ -31,6 +31,15 @@ async def startup_event(): scheduler.start() logger.info("定时任务已启动") +def get_random_recommendations(category: str, num: int) -> List[str]: + """全品类随机推荐""" + all_iids = list(matrix_data["iid_to_sketch"].keys()) + # 优先从当前品类选择 + category_iids = matrix_data["category_to_iids"].get(category, all_iids) + # 确保不超出实际数量 + sample_size = min(num, len(category_iids)) + sampled = np.random.choice(category_iids, size=sample_size, replace=False) + return [matrix_data["iid_to_sketch"][iid] for iid in sampled] @router.get("/recommend/{user_id}/{category}/{num_recommendations}", response_model=List[str]) async def get_recommendations(user_id: int, category: str, num_recommendations: int = 10): @@ -46,6 +55,14 @@ async def get_recommendations(user_id: int, category: str, num_recommendations: try: start_time = time.time() cache_key = (user_id, category) + # === 新增:用户存在性检查 === + user_exists_inter = user_id in matrix_data["user_index_interaction"] + user_exists_feat = user_id in matrix_data["user_index_feature"] + + # 任一矩阵不存在用户则返回随机推荐 + if not (user_exists_inter and user_exists_feat): + logger.info(f"用户 {user_id} 数据不完整,触发随机推荐") + return get_random_recommendations(category, num_recommendations) # 检查缓存 if cache_key in matrix_data["cached_scores"]: @@ -100,7 +117,7 @@ async def get_recommendations(user_id: int, category: str, num_recommendations: exps = np.exp(scale) return exps / np.sum(exps) - probs = calibrated_softmax(scores, 0.07) + probs = calibrated_softmax(scores, 0.09) chosen_indices = np.random.choice( len(valid_sketch_idxs), @@ -115,4 +132,4 @@ async def get_recommendations(user_id: int, category: str, num_recommendations: except Exception as e: logger.error(f"推荐失败: {str(e)}", exc_info=True) - raise HTTPException(status_code=500, detail=str(e)) + raise HTTPException(status_code=500, detail=str(e)) \ No newline at end of file From 6f48626005ac759a76c743026f0bc4bde8e00572 Mon Sep 17 00:00:00 2001 From: shahaibo <1023316923@qq.com> Date: Wed, 19 Mar 2025 15:23:21 +0800 Subject: [PATCH 020/101] =?UTF-8?q?TASK=EF=BC=9A=E7=B3=BB=E7=BB=9Fsketch?= =?UTF-8?q?=E6=8E=A8=E8=8D=90=E6=8E=A5=E5=8F=A3=E9=A2=84=E7=BC=93=E5=AD=98?= =?UTF-8?q?=E9=80=9F=E5=BA=A6=E4=BC=98=E5=8C=96=EF=BC=9B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/recommend/service.py | 91 ++++++++++++++++++++++---------- 1 file changed, 64 insertions(+), 27 deletions(-) diff --git a/app/service/recommend/service.py b/app/service/recommend/service.py index bbdc6c3..1ff9336 100644 --- a/app/service/recommend/service.py +++ b/app/service/recommend/service.py @@ -9,6 +9,7 @@ from app.core.config import DB_CONFIG, RECOMMEND_PATH_PREFIX logger = logging.getLogger() import pymysql +from concurrent.futures import ThreadPoolExecutor matrix_data = { "interaction_matrix": None, @@ -66,7 +67,7 @@ def load_resources(): def precache_user_category(): - """预缓存用户-分类组合数据""" + """优化后的用户分类预缓存(添加耗时统计)""" if not all([ matrix_data["interaction_matrix"] is not None, matrix_data["feature_matrix"] is not None, @@ -75,61 +76,97 @@ def precache_user_category(): logger.warning("资源未加载完成,跳过预缓存") return - start_time = time.time() + start_time = time.perf_counter() + time_stats = { + "get_all_user_categories": 0, + "process_user_category": 0, + "thread_execution": 0, + "cache_update": 0, + "total": 0, + } + + # 统计用户类别获取时间 + t1 = time.perf_counter() user_categories = get_all_user_categories() + time_stats["get_all_user_categories"] = time.perf_counter() - t1 precached_count = 0 - for user_id, categories in user_categories.items(): + + def process_user_category(user_id, categories): + """单用户类别缓存计算(统计耗时)""" + local_cache = {} + local_valid_idxs = {} + t_start = time.perf_counter() + for category in categories: cache_key = (user_id, category) if cache_key in matrix_data["cached_scores"]: continue try: - # 获取用户索引 user_idx_inter = matrix_data["user_index_interaction"].get(user_id) user_idx_feature = matrix_data["user_index_feature"].get(user_id) - # 获取类别对应的iid列表 + # 统计获取类别 IID 耗时 + t_iid = time.perf_counter() category_iids = matrix_data["category_to_iids"].get(category, []) + valid_sketch_idxs_inter = [matrix_data["sketch_index_interaction"][iid] + for iid in category_iids if iid in matrix_data["sketch_index_interaction"]] + valid_sketch_idxs_feature = [matrix_data["sketch_index_feature"][iid] + for iid in category_iids if iid in matrix_data["sketch_index_feature"]] + time_stats["process_user_category"] += time.perf_counter() - t_iid - # 过滤有效草图索引 - valid_sketch_idxs_inter = [ - idx for iid, idx in matrix_data["sketch_index_interaction"].items() - if iid in category_iids - ] - - # 处理交互分数 + # 统计矩阵计算耗时 + t_matrix = time.perf_counter() + processed_inter = np.zeros(len(valid_sketch_idxs_inter)) if user_idx_inter is not None and valid_sketch_idxs_inter: raw_inter_scores = matrix_data["interaction_matrix"][user_idx_inter, valid_sketch_idxs_inter] processed_inter = raw_inter_scores * 0.7 - else: - processed_inter = np.array([]) - - # 处理特征分数 - valid_sketch_idxs_feature = [ - idx for iid, idx in matrix_data["sketch_index_feature"].items() - if iid in category_iids - ] + processed_feat = np.zeros(len(valid_sketch_idxs_feature)) if user_idx_feature is not None and valid_sketch_idxs_feature: raw_feat_scores = matrix_data["feature_matrix"][user_idx_feature, valid_sketch_idxs_feature] raw_feat_scores = (raw_feat_scores - np.min(raw_feat_scores)) / ( np.max(raw_feat_scores) - np.min(raw_feat_scores) + 1e-8) processed_feat = raw_feat_scores * 0.3 - else: - processed_feat = np.array([]) + time_stats["process_user_category"] += time.perf_counter() - t_matrix - # 缓存结果 if len(processed_inter) == len(processed_feat): - matrix_data["cached_scores"][cache_key] = (processed_inter, processed_feat) - matrix_data["cached_valid_idxs"][cache_key] = valid_sketch_idxs_inter - precached_count += 1 + local_cache[cache_key] = (processed_inter, processed_feat) + local_valid_idxs[cache_key] = valid_sketch_idxs_inter except Exception as e: logger.error(f"预缓存失败 (user={user_id}, category={category}): {str(e)}") - logger.info(f"预缓存完成,共缓存 {precached_count} 个组合,耗时: {time.time() - start_time:.2f}秒") + return local_cache, local_valid_idxs + + # 统计线程执行时间 + t2 = time.perf_counter() + with ThreadPoolExecutor(max_workers=8) as executor: + futures = {executor.submit(process_user_category, user_id, categories): user_id for user_id, categories in user_categories.items()} + for future in futures: + try: + t_cache = time.perf_counter() + cache_part, valid_idxs_part = future.result() + matrix_data["cached_scores"].update(cache_part) + matrix_data["cached_valid_idxs"].update(valid_idxs_part) + time_stats["cache_update"] += time.perf_counter() - t_cache + precached_count += len(cache_part) + except Exception as e: + logger.error(f"线程执行错误: {str(e)}") + time_stats["thread_execution"] = time.perf_counter() - t2 + + time_stats["total"] = time.perf_counter() - start_time + + # 输出统计信息 + logger.info(f""" + 预缓存完成,共缓存 {precached_count} 组数据,耗时统计如下: + - 获取用户类别数据: {time_stats["get_all_user_categories"]:.2f}s + - 计算用户类别缓存: {time_stats["process_user_category"]:.2f}s + - 线程任务执行: {time_stats["thread_execution"]:.2f}s + - 更新缓存数据: {time_stats["cache_update"]:.2f}s + - 总耗时: {time_stats["total"]:.2f}s + """) def get_all_user_categories(): From dab155d200adb2a318f5cfc888d63f12d3edba52 Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Thu, 20 Mar 2025 10:25:33 +0800 Subject: [PATCH 021/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20pose=20transform=20=E9=80=BB=E8=BE=91?= =?UTF-8?q?=E4=BF=AE=E6=94=B9=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89?= =?UTF-8?q?:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:?= =?UTF-8?q?=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(?= =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_pose_transform.py | 3 ++- app/core/config.py | 5 +++++ app/schemas/pose_transform.py | 1 + .../generate_image/service_pose_transform.py | 14 ++++++++++---- 4 files changed, 18 insertions(+), 5 deletions(-) diff --git a/app/api/api_pose_transform.py b/app/api/api_pose_transform.py index fe5fc5a..4b66467 100644 --- a/app/api/api_pose_transform.py +++ b/app/api/api_pose_transform.py @@ -24,7 +24,8 @@ def pose_transform(request_item: PoseTransformModel, background_tasks: Backgroun { "tasks_id": "123-89", "image_url": "aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png", - "pose_id": "1" + "pose_id": "1", + "result_type" : "gif" } """ try: diff --git a/app/core/config.py b/app/core/config.py index 5a1e2a3..662d7e2 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -146,6 +146,11 @@ GRI_MODEL_NAME_OVERALL = 'diffusion_relight_ensemble' GRI_MODEL_NAME_SINGLE = 'stable_diffusion_1_5_relight' GRI_MODEL_URL = '10.1.1.240:10051' + +# Pose Transform service config + +PS_RABBITMQ_QUEUES = os.getenv("PS_RABBITMQ_QUEUES", f"PoseTransform{RABBITMQ_ENV}") + # SEG service config SEGMENTATION = { "new_model_name": "seg_knet", diff --git a/app/schemas/pose_transform.py b/app/schemas/pose_transform.py index 045d8b9..05db63f 100644 --- a/app/schemas/pose_transform.py +++ b/app/schemas/pose_transform.py @@ -5,3 +5,4 @@ class PoseTransformModel(BaseModel): image_url: str tasks_id: str pose_id: str + result_type: str diff --git a/app/service/generate_image/service_pose_transform.py b/app/service/generate_image/service_pose_transform.py index f2948b3..8de243e 100644 --- a/app/service/generate_image/service_pose_transform.py +++ b/app/service/generate_image/service_pose_transform.py @@ -38,7 +38,12 @@ class PoseTransformService: self.image = oss_get_image(bucket=self.image_url.split('/')[0], object_name=self.image_url[self.image_url.find('/') + 1:], data_type="cv2") self.tasks_id = request_data.tasks_id self.user_id = self.tasks_id[self.tasks_id.rfind('-') + 1:] - self.gen_product_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'image_url': ''} + self.result_type = request_data.result_type + if self.result_type == "gif": + self.gen_product_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'gif_url': 'test/mannequin_name.png', 'video_url': '', 'type': self.result_type} + else: + self.gen_product_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'gif_url': '', 'video_url': 'test/mannequin_name.png', 'type': self.result_type} + self.redis_client.set(self.tasks_id, json.dumps(self.gen_product_data)) self.redis_client.expire(self.tasks_id, 600) @@ -95,8 +100,8 @@ class PoseTransformService: finally: dict_gen_product_data, str_gen_product_data = self.read_tasks_status() if DEBUG is False: - self.channel.basic_publish(exchange='', routing_key=GRI_RABBITMQ_QUEUES, body=str_gen_product_data) - logger.info(f" [x] Sent to: {GRI_RABBITMQ_QUEUES} data:@@@@ {json.dumps(dict_gen_product_data, indent=4)}") + self.channel.basic_publish(exchange='', routing_key=PS_RABBITMQ_QUEUES, body=str_gen_product_data) + logger.info(f" [x] Sent to: {PS_RABBITMQ_QUEUES} data:@@@@ {json.dumps(dict_gen_product_data, indent=4)}") def infer_cancel(tasks_id): @@ -111,7 +116,8 @@ if __name__ == '__main__': rd = PoseTransformModel( tasks_id="123-89", image_url='aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png', - pose_id="1" + pose_id="1", + result_type="gif", ) server = PoseTransformService(rd) print(server.get_result()) From 157f75b0a6e9aec545aaf3c7dc72bb72b30c4fee Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Thu, 20 Mar 2025 10:34:21 +0800 Subject: [PATCH 022/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20pose=20transform=20=E9=80=BB=E8=BE=91?= =?UTF-8?q?=E4=BF=AE=E6=94=B9=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89?= =?UTF-8?q?:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:?= =?UTF-8?q?=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(?= =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/generate_image/service_pose_transform.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/app/service/generate_image/service_pose_transform.py b/app/service/generate_image/service_pose_transform.py index 8de243e..f22cdfb 100644 --- a/app/service/generate_image/service_pose_transform.py +++ b/app/service/generate_image/service_pose_transform.py @@ -41,8 +41,10 @@ class PoseTransformService: self.result_type = request_data.result_type if self.result_type == "gif": self.gen_product_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'gif_url': 'test/mannequin_name.png', 'video_url': '', 'type': self.result_type} - else: + elif self.result_type == "video": self.gen_product_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'gif_url': '', 'video_url': 'test/mannequin_name.png', 'type': self.result_type} + elif self.result_type == "all": + self.gen_product_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'gif_url': 'test/mannequin_name.png', 'video_url': 'test/mannequin_name.png', 'type': self.result_type} self.redis_client.set(self.tasks_id, json.dumps(self.gen_product_data)) self.redis_client.expire(self.tasks_id, 600) From 754aee71180e78ceb947de3eda847b1b12e6fe2b Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Thu, 20 Mar 2025 10:41:36 +0800 Subject: [PATCH 023/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20pose=20transform=20=E9=80=BB=E8=BE=91?= =?UTF-8?q?=E4=BF=AE=E6=94=B9=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89?= =?UTF-8?q?:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:?= =?UTF-8?q?=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(?= =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_pose_transform.py | 3 +-- app/schemas/pose_transform.py | 1 - app/service/generate_image/service_pose_transform.py | 7 +------ 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/app/api/api_pose_transform.py b/app/api/api_pose_transform.py index 4b66467..fe5fc5a 100644 --- a/app/api/api_pose_transform.py +++ b/app/api/api_pose_transform.py @@ -24,8 +24,7 @@ def pose_transform(request_item: PoseTransformModel, background_tasks: Backgroun { "tasks_id": "123-89", "image_url": "aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png", - "pose_id": "1", - "result_type" : "gif" + "pose_id": "1" } """ try: diff --git a/app/schemas/pose_transform.py b/app/schemas/pose_transform.py index 05db63f..045d8b9 100644 --- a/app/schemas/pose_transform.py +++ b/app/schemas/pose_transform.py @@ -5,4 +5,3 @@ class PoseTransformModel(BaseModel): image_url: str tasks_id: str pose_id: str - result_type: str diff --git a/app/service/generate_image/service_pose_transform.py b/app/service/generate_image/service_pose_transform.py index f22cdfb..4892d9a 100644 --- a/app/service/generate_image/service_pose_transform.py +++ b/app/service/generate_image/service_pose_transform.py @@ -39,12 +39,7 @@ class PoseTransformService: self.tasks_id = request_data.tasks_id self.user_id = self.tasks_id[self.tasks_id.rfind('-') + 1:] self.result_type = request_data.result_type - if self.result_type == "gif": - self.gen_product_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'gif_url': 'test/mannequin_name.png', 'video_url': '', 'type': self.result_type} - elif self.result_type == "video": - self.gen_product_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'gif_url': '', 'video_url': 'test/mannequin_name.png', 'type': self.result_type} - elif self.result_type == "all": - self.gen_product_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'gif_url': 'test/mannequin_name.png', 'video_url': 'test/mannequin_name.png', 'type': self.result_type} + self.gen_product_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'gif_url': 'test/mannequin_name.png', 'video_url': 'test/mannequin_name.png', 'type': self.result_type} self.redis_client.set(self.tasks_id, json.dumps(self.gen_product_data)) self.redis_client.expire(self.tasks_id, 600) From 87a3d43950b8b27bf9b155286c1ef964eebc4898 Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Thu, 20 Mar 2025 10:58:29 +0800 Subject: [PATCH 024/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20pose=20transform=20=E9=80=BB=E8=BE=91?= =?UTF-8?q?=E4=BF=AE=E6=94=B9=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89?= =?UTF-8?q?:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:?= =?UTF-8?q?=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(?= =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/generate_image/service_pose_transform.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/app/service/generate_image/service_pose_transform.py b/app/service/generate_image/service_pose_transform.py index 4892d9a..b0a5cf3 100644 --- a/app/service/generate_image/service_pose_transform.py +++ b/app/service/generate_image/service_pose_transform.py @@ -38,8 +38,7 @@ class PoseTransformService: self.image = oss_get_image(bucket=self.image_url.split('/')[0], object_name=self.image_url[self.image_url.find('/') + 1:], data_type="cv2") self.tasks_id = request_data.tasks_id self.user_id = self.tasks_id[self.tasks_id.rfind('-') + 1:] - self.result_type = request_data.result_type - self.gen_product_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'gif_url': 'test/mannequin_name.png', 'video_url': 'test/mannequin_name.png', 'type': self.result_type} + self.gen_product_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'gif_url': 'test/mannequin_name.png', 'video_url': 'test/mannequin_name.png'} self.redis_client.set(self.tasks_id, json.dumps(self.gen_product_data)) self.redis_client.expire(self.tasks_id, 600) @@ -113,8 +112,7 @@ if __name__ == '__main__': rd = PoseTransformModel( tasks_id="123-89", image_url='aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png', - pose_id="1", - result_type="gif", + pose_id="1" ) server = PoseTransformService(rd) print(server.get_result()) From 53731e3894fe655bed112186d9b07212b3543051 Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Thu, 20 Mar 2025 15:50:08 +0800 Subject: [PATCH 025/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20pose=20transform=20=E9=80=BB=E8=BE=91?= =?UTF-8?q?=E4=BF=AE=E6=94=B9=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89?= =?UTF-8?q?:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:?= =?UTF-8?q?=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(?= =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/generate_image/service_pose_transform.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/service/generate_image/service_pose_transform.py b/app/service/generate_image/service_pose_transform.py index b0a5cf3..6c1c1c9 100644 --- a/app/service/generate_image/service_pose_transform.py +++ b/app/service/generate_image/service_pose_transform.py @@ -38,7 +38,7 @@ class PoseTransformService: self.image = oss_get_image(bucket=self.image_url.split('/')[0], object_name=self.image_url[self.image_url.find('/') + 1:], data_type="cv2") self.tasks_id = request_data.tasks_id self.user_id = self.tasks_id[self.tasks_id.rfind('-') + 1:] - self.gen_product_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'gif_url': 'test/mannequin_name.png', 'video_url': 'test/mannequin_name.png'} + self.gen_product_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'gif_url': 'test/mannequin_name.png', 'video_url': 'test/mannequin_name.png', 'image_url': 'test/mannequin_name.png'} self.redis_client.set(self.tasks_id, json.dumps(self.gen_product_data)) self.redis_client.expire(self.tasks_id, 600) From 44d63af2adac233fabc3fff7dd28efd19b40257f Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Thu, 20 Mar 2025 20:23:35 +0800 Subject: [PATCH 026/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=E6=A8=A1=E7=89=B9=E7=BC=96=E8=BE=91?= =?UTF-8?q?=E9=80=BB=E8=BE=91=E4=BF=AE=E6=94=B9=20fix=EF=BC=88=E4=BF=AE?= =?UTF-8?q?=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_mannequins_edit.py | 6 +- app/schemas/mannequin_edit.py | 4 +- app/service/mannequins_edit/service.py | 77 +++++++++----------------- 3 files changed, 34 insertions(+), 53 deletions(-) diff --git a/app/api/api_mannequins_edit.py b/app/api/api_mannequins_edit.py index 5cfaf3d..9eec805 100644 --- a/app/api/api_mannequins_edit.py +++ b/app/api/api_mannequins_edit.py @@ -24,9 +24,11 @@ def mannequins_edit(request_data: MannequinModel): 示例参数: - **{ "mannequins": "aida-sys-image/models/male/dc36ce58-46c3-4b6f-8787-5ca7d6fc26e6.png", - "scale": [0.75, 0.75], + "scale": 0.75, "bucket_name": "test", - "mannequin_name": "mannequin_name" + "mannequin_name": "mannequin_name", + "top" : 270, + "bottom" : 432 }** """ try: diff --git a/app/schemas/mannequin_edit.py b/app/schemas/mannequin_edit.py index c5514d6..2a8f5f9 100644 --- a/app/schemas/mannequin_edit.py +++ b/app/schemas/mannequin_edit.py @@ -3,6 +3,8 @@ from pydantic import BaseModel class MannequinModel(BaseModel): mannequins: str - scale: list[float, float] + scale: float bucket_name: str mannequin_name: str + top: int + bottom: int diff --git a/app/service/mannequins_edit/service.py b/app/service/mannequins_edit/service.py index bbb6cc5..685709f 100644 --- a/app/service/mannequins_edit/service.py +++ b/app/service/mannequins_edit/service.py @@ -1,5 +1,4 @@ import cv2 -import mediapipe as mp import numpy as np from minio import Minio @@ -13,6 +12,8 @@ minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET class MannequinEditService(): def __init__(self, request_data): self.scale = request_data.scale + self.top = request_data.top + self.bottom = request_data.bottom self.image = oss_get_image(oss_client=minio_client, bucket=request_data.mannequins.split('/')[0], object_name=request_data.mannequins[request_data.mannequins.find('/') + 1:], data_type="cv2") self.mannequin_name = request_data.mannequin_name self.bucket_name = request_data.bucket_name @@ -27,59 +28,33 @@ class MannequinEditService(): self.alpha = None def __call__(self, *args, **kwargs): - leg_top, leg_bottom = self.attitude_detection() - if leg_top and leg_bottom: - new_mannequin = self.resize_leg(leg_top, leg_bottom) - _, encoded_image = cv2.imencode('.png', new_mannequin) - image_bytes = encoded_image.tobytes() - req = oss_upload_image(oss_client=minio_client, bucket=self.bucket_name, object_name=f"{self.mannequin_name}.png", image_bytes=image_bytes) - return req.bucket_name + "/" + req.object_name - else: - return "No leg detected" + new_mannequin = self.resize_leg(self.top, self.bottom) + _, encoded_image = cv2.imencode('.png', new_mannequin) + image_bytes = encoded_image.tobytes() + req = oss_upload_image(oss_client=minio_client, bucket=self.bucket_name, object_name=f"{self.mannequin_name}.png", image_bytes=image_bytes) + return req.bucket_name + "/" + req.object_name - def attitude_detection(self): - mp_pose = mp.solutions.pose - pose = mp_pose.Pose() + def resize_leg(self, top, bottom): + # 上部 + top_part = self.bgr[:top, :] + top_part_alpha = self.alpha[:top, :] - # 将 BGR 图像转换为 RGB 格式 - image_rgb = cv2.cvtColor(self.bgr, cv2.COLOR_BGR2RGB) - leg_top, leg_bottom = None, None - # 进行姿态检测 - results = pose.process(image_rgb) - if results.pose_landmarks: - # 获取腿部关键点 - landmarks = results.pose_landmarks.landmark + # 需要resize 部分 + part_resize = self.bgr[top:bottom, :] + part_resize_alpha = self.alpha[top:bottom, :] - # 找到腿部上边界和下边界 - leg_top = int(landmarks[mp_pose.PoseLandmark.LEFT_HIP].y * self.h) - leg_bottom = int(max(landmarks[mp_pose.PoseLandmark.LEFT_ANKLE].y, - landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE].y) * self.h) + # 下部 + part_bottom = self.bgr[bottom:, :] + part_bottom_alpha = self.alpha[bottom:, :] - return leg_top, leg_bottom + new_height = int((bottom - top) * self.scale) - def resize_leg(self, leg_top, leg_bottom): - # 上半身 - top_part_bgr = self.bgr[:leg_top, :] - top_part_bgr_alpha = self.alpha[:leg_top, :] + resized_thigh = cv2.resize(part_resize, (self.w, new_height), interpolation=cv2.INTER_LINEAR) + resized_thigh_alpha = cv2.resize(part_resize_alpha, (self.w, new_height), interpolation=cv2.INTER_LINEAR) - # 小腿 - part_thigh = self.bgr[leg_top:leg_bottom, :] - part_thigh_alpha = self.alpha[leg_top:leg_bottom, :] - - # 大腿 - part_calf = self.bgr[leg_bottom:, :] - part_calf_alpha = self.alpha[leg_bottom:, :] - - new_thigh_height = int((leg_bottom - leg_top) * self.scale[0]) - new_calf_height = int((self.h - leg_bottom) * self.scale[1]) - - resized_thigh = cv2.resize(part_thigh, (self.w, new_thigh_height), interpolation=cv2.INTER_LINEAR) - resized_thigh_alpha = cv2.resize(part_thigh_alpha, (self.w, new_thigh_height), interpolation=cv2.INTER_LINEAR) - resized_calf = cv2.resize(part_calf, (self.w, new_calf_height), interpolation=cv2.INTER_LINEAR) - resized_calf_alpha = cv2.resize(part_calf_alpha, (self.w, new_calf_height), interpolation=cv2.INTER_LINEAR) - - new_bgr = np.vstack((top_part_bgr, resized_thigh, resized_calf)) - new_bgr_alpha = np.vstack((top_part_bgr_alpha, resized_thigh_alpha, resized_calf_alpha)) + # 组合 + new_bgr = np.vstack((top_part, resized_thigh, part_bottom)) + new_bgr_alpha = np.vstack((top_part_alpha, resized_thigh_alpha, part_bottom_alpha)) if self.alpha is not None: # 拼接 alpha 通道 @@ -93,9 +68,11 @@ class MannequinEditService(): if __name__ == '__main__': request_data = MannequinModel( mannequins="aida-sys-image/models/male/dc36ce58-46c3-4b6f-8787-5ca7d6fc26e6.png", - scale=[0.75, 0.75], + scale=0.1, bucket_name="test", - mannequin_name="mannequin_name" + mannequin_name="mannequin_name", + top=270, + bottom=432 ) service = MannequinEditService(request_data) print(service()) From 7b01d3e5174454200c13b114c8bec86677b53edc Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Mon, 24 Mar 2025 15:16:57 +0800 Subject: [PATCH 027/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20brand=20dna=20=E6=96=87=E4=BB=B6?= =?UTF-8?q?=E8=B7=AF=E5=BE=84=E4=BF=AE=E6=94=B9=20fix=EF=BC=88=E4=BF=AE?= =?UTF-8?q?=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/brand_dna/service.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/app/service/brand_dna/service.py b/app/service/brand_dna/service.py index 012e682..5a525c2 100644 --- a/app/service/brand_dna/service.py +++ b/app/service/brand_dna/service.py @@ -9,7 +9,7 @@ import torch.nn.functional as F import tritonclient.http as httpclient from minio import Minio -from app.core.config import MINIO_URL, MINIO_ACCESS, MINIO_SECRET, MINIO_SECURE, DESIGN_MODEL_URL +from app.core.config import MINIO_URL, MINIO_ACCESS, MINIO_SECRET, MINIO_SECURE, DESIGN_MODEL_URL, CATEGORY_PATH from app.schemas.brand_dna import BrandDnaModel from app.service.attribute.config import local_debug_const from app.service.utils.generate_uuid import generate_uuid @@ -25,8 +25,8 @@ class BrandDna: self.sketch_bucket = "test" self.image_url = request_item.image_url self.is_brand_dna = request_item.is_brand_dna - # self.attr_type = pd.read_csv(CATEGORY_PATH) - self.attr_type = pd.read_csv(r"E:\workspace\trinity_client_aida\app\service\attribute\config\descriptor\category\category_dis.csv") + self.attr_type = pd.read_csv(CATEGORY_PATH) + # self.attr_type = pd.read_csv(r"E:\workspace\trinity_client_aida\app\service\attribute\config\descriptor\category\category_dis.csv") self.att_client = httpclient.InferenceServerClient(url=DESIGN_MODEL_URL) self.seg_client = httpclient.InferenceServerClient(url='10.1.1.243:30000') # self.const = const From 5a643af5a6b6966d721798a1b66f6496258ff262 Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Mon, 24 Mar 2025 15:24:15 +0800 Subject: [PATCH 028/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20brand=20dna=20=E6=96=87=E4=BB=B6?= =?UTF-8?q?=E8=B7=AF=E5=BE=84=E4=BF=AE=E6=94=B9=20fix=EF=BC=88=E4=BF=AE?= =?UTF-8?q?=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/brand_dna/service.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/app/service/brand_dna/service.py b/app/service/brand_dna/service.py index 5a525c2..fd35c19 100644 --- a/app/service/brand_dna/service.py +++ b/app/service/brand_dna/service.py @@ -35,8 +35,8 @@ class BrandDna: # 获取结果 def get_result(self): mask, image = self.get_seg_mask() - cv2.imshow("", image) - cv2.waitKey(0) + # cv2.imshow("", image) + # cv2.waitKey(0) height, width, channels = image.shape result_dict = [] @@ -50,8 +50,8 @@ class BrandDna: outwear_img[mask == value] = image[mask == value] outwear_mask_img[mask == value] = [0, 0, 255] - cv2.imshow("", outwear_img) - cv2.waitKey(0) + # cv2.imshow("", outwear_img) + # cv2.waitKey(0) # 预处理之后的input img preprocess_img = self.category_preprocess(outwear_img) @@ -89,8 +89,8 @@ class BrandDna: tops_img[mask == value] = image[mask == value] tops_mask_img[mask == value] = [0, 0, 255] - cv2.imshow("", tops_img) - cv2.waitKey(0) + # cv2.imshow("", tops_img) + # cv2.waitKey(0) # 预处理之后的input img preprocess_img = self.category_preprocess(tops_img) @@ -129,8 +129,8 @@ class BrandDna: bottoms_img[mask == value] = image[mask == value] bottoms_mask_img[mask == value] = [0, 0, 255] - cv2.imshow("", bottoms_img) - cv2.waitKey(0) + # cv2.imshow("", bottoms_img) + # cv2.waitKey(0) # 预处理之后的input img preprocess_img = self.category_preprocess(bottoms_img) @@ -327,7 +327,7 @@ if __name__ == '__main__': # result_url = service.get_result() # print(result_url) request_item = BrandDnaModel( - image_url="aida-users/60/product_image/07cb5d5d-5022-44cc-b0d3-cc986cfebad1-2-60.png", + image_url="aida-results/result_00006a48-e315-11ee-b7c8-b48351119060.png", is_brand_dna=True ) service = BrandDna(request_item) From 99f2e66088a3211dadf0c8e791f6d18491723e0c Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Mon, 24 Mar 2025 15:24:54 +0800 Subject: [PATCH 029/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20brand=20dna=20=E6=96=87=E4=BB=B6?= =?UTF-8?q?=E8=B7=AF=E5=BE=84=E4=BF=AE=E6=94=B9=20fix=EF=BC=88=E4=BF=AE?= =?UTF-8?q?=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_brand_dna.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/api/api_brand_dna.py b/app/api/api_brand_dna.py index 6b19416..a250ead 100644 --- a/app/api/api_brand_dna.py +++ b/app/api/api_brand_dna.py @@ -20,8 +20,8 @@ def image2sketch(request_item: BrandDnaModel): 示例参数: { - "image_url": "test/image2sketch/real_Dress_3200fecdc83d0c556c2bd96aedbd7fbf.jpg_Img.jpg", - "is_brand_dna": False + "image_url": "aida-results/result_00006a48-e315-11ee-b7c8-b48351119060.png", + "is_brand_dna": false } """ try: From b69aadbcfbd41585bf75c09bb53564df361e1e59 Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Mon, 24 Mar 2025 15:39:35 +0800 Subject: [PATCH 030/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20brand=20dna=20=E6=96=87=E4=BB=B6?= =?UTF-8?q?=E8=B7=AF=E5=BE=84=E4=BF=AE=E6=94=B9=20fix=EF=BC=88=E4=BF=AE?= =?UTF-8?q?=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/brand_dna/service.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/app/service/brand_dna/service.py b/app/service/brand_dna/service.py index fd35c19..393d75a 100644 --- a/app/service/brand_dna/service.py +++ b/app/service/brand_dna/service.py @@ -11,7 +11,7 @@ from minio import Minio from app.core.config import MINIO_URL, MINIO_ACCESS, MINIO_SECRET, MINIO_SECURE, DESIGN_MODEL_URL, CATEGORY_PATH from app.schemas.brand_dna import BrandDnaModel -from app.service.attribute.config import local_debug_const +from app.service.attribute.config import local_debug_const, const from app.service.utils.generate_uuid import generate_uuid from app.service.utils.new_oss_client import oss_upload_image, oss_get_image @@ -29,8 +29,8 @@ class BrandDna: # self.attr_type = pd.read_csv(r"E:\workspace\trinity_client_aida\app\service\attribute\config\descriptor\category\category_dis.csv") self.att_client = httpclient.InferenceServerClient(url=DESIGN_MODEL_URL) self.seg_client = httpclient.InferenceServerClient(url='10.1.1.243:30000') - # self.const = const - self.const = local_debug_const + self.const = const + # self.const = local_debug_const # 获取结果 def get_result(self): From d029bdb9441709b8588c0b00860f6a0d4a508d4c Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Tue, 25 Mar 2025 15:55:52 +0800 Subject: [PATCH 031/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20brand=20name=20slogan=20logo=20?= =?UTF-8?q?=E7=94=9F=E6=88=90=E6=9C=8D=E5=8A=A1=20fix=EF=BC=88=E4=BF=AE?= =?UTF-8?q?=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_brand_dna.py | 29 ++++- app/core/config.py | 16 ++- app/schemas/brand_dna.py | 5 + .../brand_dna/service_generate_brand_info.py | 104 ++++++++++++++++++ app/service/brand_dna/test.py | 32 ++++++ 5 files changed, 175 insertions(+), 11 deletions(-) create mode 100644 app/service/brand_dna/service_generate_brand_info.py create mode 100644 app/service/brand_dna/test.py diff --git a/app/api/api_brand_dna.py b/app/api/api_brand_dna.py index a250ead..2133ee9 100644 --- a/app/api/api_brand_dna.py +++ b/app/api/api_brand_dna.py @@ -3,16 +3,17 @@ import logging from fastapi import APIRouter, HTTPException -from app.schemas.brand_dna import BrandDnaModel +from app.schemas.brand_dna import BrandDnaModel, GenerateBrandModel from app.schemas.response_template import ResponseModel from app.service.brand_dna.service import BrandDna +from app.service.brand_dna.service_generate_brand_info import GenerateBrandInfo router = APIRouter() logger = logging.getLogger() @router.post("/seg_product") -def image2sketch(request_item: BrandDnaModel): +def seg_product(request_item: BrandDnaModel): """ 创建一个具有以下参数的请求体: - **image_url**: 提取图片url @@ -32,3 +33,27 @@ def image2sketch(request_item: BrandDnaModel): logger.warning(f"brand dna Run Exception @@@@@@:{e}") raise HTTPException(status_code=404, detail=str(e)) return ResponseModel(data=result_url) + + +@router.post("/GenerateBrand") +def GenerateBrand(request_data: GenerateBrandModel): + """ + 通过prompt 生成 brand name ,brand slogan , brand logo。 + 创建一个具有以下参数的请求体: + - **prompt**: + + 示例参数: + { + "prompt": "xiaomi", + "user_id": "89" + } + """ + try: + logger.info(f"GenerateBrand request item is : @@@@@@:{request_data}") + service = GenerateBrandInfo(request_data) + data = service.get_result() + logger.info(f"GenerateBrand response @@@@@@:{data}") + except Exception as e: + logger.warning(f"GenerateBrand Run Exception @@@@@@:{e}") + raise HTTPException(status_code=404, detail=str(e)) + return ResponseModel(data=data) diff --git a/app/core/config.py b/app/core/config.py index 662d7e2..6ac56e3 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -9,14 +9,14 @@ load_dotenv(os.path.join(BASE_DIR, '.env')) class Settings(BaseSettings): - PROJECT_NAME = os.getenv('PROJECT_NAME', 'FASTAPI BASE') - SECRET_KEY = os.getenv('SECRET_KEY', '') - API_PREFIX = '' - BACKEND_CORS_ORIGINS = ['*'] - DATABASE_URL = os.getenv('SQL_DATABASE_URL', '') + PROJECT_NAME: str = os.getenv('PROJECT_NAME', 'FASTAPI BASE') + SECRET_KEY: str = os.getenv('SECRET_KEY', '') + API_PREFIX: str = '' + BACKEND_CORS_ORIGINS: list[str] = ['*'] + DATABASE_URL: str = os.getenv('SQL_DATABASE_URL', '') ACCESS_TOKEN_EXPIRE_SECONDS: int = 60 * 60 * 24 * 7 # Token expired after 7 days - SECURITY_ALGORITHM = 'HS256' - LOGGING_CONFIG_FILE = os.path.join(BASE_DIR, 'logging_env.py') + SECURITY_ALGORITHM: str = 'HS256' + LOGGING_CONFIG_FILE: str = os.path.join(BASE_DIR, 'logging_env.py') OSS = "minio" @@ -32,7 +32,6 @@ else: SEG_CACHE_PATH = "/seg_cache/" RECOMMEND_PATH_PREFIX = "app/service/recommend/" - # RABBITMQ_ENV = "" # 生产环境 RABBITMQ_ENV = "-dev" # 开发环境 # RABBITMQ_ENV = "-local" # 本地测试环境 @@ -146,7 +145,6 @@ GRI_MODEL_NAME_OVERALL = 'diffusion_relight_ensemble' GRI_MODEL_NAME_SINGLE = 'stable_diffusion_1_5_relight' GRI_MODEL_URL = '10.1.1.240:10051' - # Pose Transform service config PS_RABBITMQ_QUEUES = os.getenv("PS_RABBITMQ_QUEUES", f"PoseTransform{RABBITMQ_ENV}") diff --git a/app/schemas/brand_dna.py b/app/schemas/brand_dna.py index c5ae2ab..9796195 100644 --- a/app/schemas/brand_dna.py +++ b/app/schemas/brand_dna.py @@ -4,3 +4,8 @@ from pydantic import BaseModel class BrandDnaModel(BaseModel): image_url: str is_brand_dna: bool + + +class GenerateBrandModel(BaseModel): + user_id: str + prompt: str diff --git a/app/service/brand_dna/service_generate_brand_info.py b/app/service/brand_dna/service_generate_brand_info.py new file mode 100644 index 0000000..73c1294 --- /dev/null +++ b/app/service/brand_dna/service_generate_brand_info.py @@ -0,0 +1,104 @@ +import logging + +import cv2 +import numpy as np +import tritonclient.grpc as grpcclient +from langchain.output_parsers import ResponseSchema, StructuredOutputParser +from langchain_community.chat_models import ChatTongyi +from langchain_core.prompts import PromptTemplate +# from langchain_openai import ChatOpenAI +from minio import Minio +from tritonclient.utils import np_to_triton_dtype + +from app.core.config import GI_MODEL_URL, MINIO_URL, MINIO_ACCESS, MINIO_SECRET, MINIO_SECURE, GI_MODEL_NAME +from app.schemas.brand_dna import GenerateBrandModel +from app.service.utils.generate_uuid import generate_uuid +from app.service.utils.new_oss_client import oss_upload_image + + +class GenerateBrandInfo: + def __init__(self, request_data): + # minio client init + self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) + + # user info init + self.user_id = request_data.user_id + self.category = "brand_logo" + # generate logo init + self.grpc_client = grpcclient.InferenceServerClient(url=GI_MODEL_URL) + self.image = np.random.randint(0, 256, (1024, 1024, 3), dtype=np.uint8) + self.batch_size = 1 + self.mode = 'txt2img' + + # llm generate brand info init + self.model = ChatTongyi(model="qwen2.5-14b-instruct", api_key="sk-7658298c6b99443c98184a5e634fe6ab") + + self.response_schemas = [ + ResponseSchema(name="brand_name", description="Brand name."), + ResponseSchema(name="brand_slogan", description="Brand slogan."), + ResponseSchema(name="brand_logo_prompt", description="prompt required for brand logo generation.") + ] + self.output_parser = StructuredOutputParser.from_response_schemas(self.response_schemas) + self.format_instructions = self.output_parser.get_format_instructions() + self.prompt = PromptTemplate( + template="你是一个时装品牌的设计师。根据用户输入提取出brand name,brand slogan,brand logo 描述。如果没有以上内容,需要你根据用户输入随意发挥。随后根据brand logo 描述生成一个prompt,这个prompt用于生成模型.\n{format_instructions}\n{question}", + input_variables=["question"], + partial_variables={"format_instructions": self.format_instructions} + ) + self._input = self.prompt.format_prompt(question=request_data.prompt) + + self.result_data = {} + + def get_result(self): + self.llm_generate_brand_info() + self.generate_brand_logo() + return self.result_data + + def llm_generate_brand_info(self): + output = self.model(self._input.to_messages()) + brand_data = self.output_parser.parse(output.content) + self.result_data = brand_data + self.generate_logo_prompt = brand_data['brand_logo_prompt'] + + def generate_brand_logo(self): + prompts = [self.generate_logo_prompt] * self.batch_size + modes = [self.mode] * self.batch_size + images = [self.image.astype(np.float16)] * self.batch_size + + text_obj = np.array(prompts, dtype="object").reshape((-1, 1)) + mode_obj = np.array(modes, dtype="object").reshape((-1, 1)) + image_obj = np.array(images, dtype=np.float16).reshape((-1, 1024, 1024, 3)) + + input_text = grpcclient.InferInput("prompt", text_obj.shape, np_to_triton_dtype(text_obj.dtype)) + input_image = grpcclient.InferInput("input_image", image_obj.shape, np_to_triton_dtype(image_obj.dtype)) + input_mode = grpcclient.InferInput("mode", mode_obj.shape, np_to_triton_dtype(mode_obj.dtype)) + + input_text.set_data_from_numpy(text_obj) + input_image.set_data_from_numpy(image_obj) + input_mode.set_data_from_numpy(mode_obj) + + inputs = [input_text, input_image, input_mode] + result = self.grpc_client.infer(model_name=GI_MODEL_NAME, inputs=inputs) + image = result.as_numpy("generated_image") + image_result = cv2.cvtColor(np.squeeze(image.astype(np.uint8)), cv2.COLOR_RGB2BGR) + logo_url = self.upload_logo_image(image_result, generate_uuid()) + self.result_data['brand_logo'] = logo_url + + def upload_logo_image(self, image, object_name): + try: + _, img_byte_array = cv2.imencode('.jpg', image) + object_name = f'{self.user_id}/{self.category}/{object_name}' + req = oss_upload_image(oss_client=self.minio_client, bucket="aida-users", object_name=object_name, image_bytes=img_byte_array) + image_url = f"aida-users/{object_name}" + return image_url + except Exception as e: + logging.warning(f"upload_png_mask runtime exception : {e}") + + +if __name__ == '__main__': + request_data = GenerateBrandModel( + user_id="89", + prompt="xiaomi" + ) + service = GenerateBrandInfo(request_data) + print(service.get_result()) diff --git a/app/service/brand_dna/test.py b/app/service/brand_dna/test.py new file mode 100644 index 0000000..966f76e --- /dev/null +++ b/app/service/brand_dna/test.py @@ -0,0 +1,32 @@ +from dotenv import load_dotenv +from langchain.output_parsers import StructuredOutputParser, ResponseSchema +from langchain_core.prompts import PromptTemplate +from langchain_openai import ChatOpenAI + +# 加载.env文件的环境变量 +load_dotenv() + +# 创建一个大语言模型,model指定了大语言模型的种类 +model = ChatOpenAI(model="qwen2.5-14b-instruct") + +# 想要接收的响应模式 +response_schemas = [ + ResponseSchema(name="brand_name", description="Brand name."), + ResponseSchema(name="brand_slogan", description="Brand slogan."), + ResponseSchema(name="brand_logo_prompt", description="prompt required for brand logo generation.") +] +output_parser = StructuredOutputParser.from_response_schemas(response_schemas) +format_instructions = output_parser.get_format_instructions() +prompt = PromptTemplate( + template="你是一个时装品牌的设计师。根据用户输入提取出brand name,brand slogan,brand logo 描述。如果没有以上内容,需要你根据用户输入随意发挥。随后根据brand logo 描述生成一个prompt,这个prompt用于生成模型.\n{format_instructions}\n{question}", + input_variables=["question"], + partial_variables={"format_instructions": format_instructions} +) +_input = prompt.format_prompt(question="brand name: cat home") + +output = model(_input.to_messages()) +brand_data = output_parser.parse(output.content) + + +def generate_logo(bucket_name, object_name, prompt): + pass From 9fd911500ba3c830bcc527eebb7388beab393bd1 Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Tue, 25 Mar 2025 17:33:25 +0800 Subject: [PATCH 032/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20=E6=A8=A1=E7=89=B9=E7=BC=96=E8=BE=91?= =?UTF-8?q?=E5=8A=9F=E8=83=BD=E5=8F=82=E6=95=B0=E4=BF=AE=E6=94=B9=20fix?= =?UTF-8?q?=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_mannequins_edit.py | 7 +++++-- app/schemas/mannequin_edit.py | 2 +- app/service/mannequins_edit/service.py | 6 +++--- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/app/api/api_mannequins_edit.py b/app/api/api_mannequins_edit.py index 9eec805..6ff34d4 100644 --- a/app/api/api_mannequins_edit.py +++ b/app/api/api_mannequins_edit.py @@ -17,14 +17,17 @@ def mannequins_edit(request_data: MannequinModel): 模特腿长调整 创建一个具有以下参数的请求体: - **mannequins**: mannequins url等信息 - - **scale**: 大腿小腿比例 + - **resize_pixel**: 拉伸像素量 - **bucket_name**: bucket name - **mannequin_name**: 模特名称 + - **top**: 拉伸y轴点位 + - **bottom**: 拉伸y轴点位 + 示例参数: - **{ "mannequins": "aida-sys-image/models/male/dc36ce58-46c3-4b6f-8787-5ca7d6fc26e6.png", - "scale": 0.75, + "resize_pixel": -50, "bucket_name": "test", "mannequin_name": "mannequin_name", "top" : 270, diff --git a/app/schemas/mannequin_edit.py b/app/schemas/mannequin_edit.py index 2a8f5f9..9ea9c8c 100644 --- a/app/schemas/mannequin_edit.py +++ b/app/schemas/mannequin_edit.py @@ -3,7 +3,7 @@ from pydantic import BaseModel class MannequinModel(BaseModel): mannequins: str - scale: float + resize_pixel: float bucket_name: str mannequin_name: str top: int diff --git a/app/service/mannequins_edit/service.py b/app/service/mannequins_edit/service.py index 685709f..8db25d5 100644 --- a/app/service/mannequins_edit/service.py +++ b/app/service/mannequins_edit/service.py @@ -11,7 +11,7 @@ minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET class MannequinEditService(): def __init__(self, request_data): - self.scale = request_data.scale + self.resize_pixel = request_data.resize_pixel self.top = request_data.top self.bottom = request_data.bottom self.image = oss_get_image(oss_client=minio_client, bucket=request_data.mannequins.split('/')[0], object_name=request_data.mannequins[request_data.mannequins.find('/') + 1:], data_type="cv2") @@ -47,7 +47,7 @@ class MannequinEditService(): part_bottom = self.bgr[bottom:, :] part_bottom_alpha = self.alpha[bottom:, :] - new_height = int((bottom - top) * self.scale) + new_height = int((bottom - top) + self.resize_pixel) resized_thigh = cv2.resize(part_resize, (self.w, new_height), interpolation=cv2.INTER_LINEAR) resized_thigh_alpha = cv2.resize(part_resize_alpha, (self.w, new_height), interpolation=cv2.INTER_LINEAR) @@ -68,7 +68,7 @@ class MannequinEditService(): if __name__ == '__main__': request_data = MannequinModel( mannequins="aida-sys-image/models/male/dc36ce58-46c3-4b6f-8787-5ca7d6fc26e6.png", - scale=0.1, + resize_pixel=-100, bucket_name="test", mannequin_name="mannequin_name", top=270, From 1deea783a1fb19da60f73d916212b88e3ebd536a Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Thu, 27 Mar 2025 16:48:42 +0800 Subject: [PATCH 033/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20brand=20dna=20generate=20fix=EF=BC=88?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88=E6=96=87?= =?UTF-8?q?=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88?= =?UTF-8?q?=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B?= =?UTF-8?q?=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/brand_dna/service_generate_brand_info.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/service/brand_dna/service_generate_brand_info.py b/app/service/brand_dna/service_generate_brand_info.py index 73c1294..367a4d6 100644 --- a/app/service/brand_dna/service_generate_brand_info.py +++ b/app/service/brand_dna/service_generate_brand_info.py @@ -41,7 +41,7 @@ class GenerateBrandInfo: self.output_parser = StructuredOutputParser.from_response_schemas(self.response_schemas) self.format_instructions = self.output_parser.get_format_instructions() self.prompt = PromptTemplate( - template="你是一个时装品牌的设计师。根据用户输入提取出brand name,brand slogan,brand logo 描述。如果没有以上内容,需要你根据用户输入随意发挥。随后根据brand logo 描述生成一个prompt,这个prompt用于生成模型.\n{format_instructions}\n{question}", + template="你是一个时装品牌的设计师。根据用户输入提取出brand name,brand slogan,brand logo 描述。如果没有以上内容,需要你根据用户输入随意发挥。随后根据brand logo 描述生成一个prompt,这个prompt用于生成模型,prompt需要完全表达用户的想法并使用英文,使用简洁明了的单词不要过长。.\n{format_instructions}\n{question}", input_variables=["question"], partial_variables={"format_instructions": self.format_instructions} ) @@ -98,7 +98,7 @@ class GenerateBrandInfo: if __name__ == '__main__': request_data = GenerateBrandModel( user_id="89", - prompt="xiaomi" + prompt="华为" ) service = GenerateBrandInfo(request_data) print(service.get_result()) From ae38a3a357e107abdd5137184017dd291df9aaac Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Tue, 1 Apr 2025 14:14:50 +0800 Subject: [PATCH 034/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20=E6=A8=A1=E7=89=B9=E6=96=B0=E5=A2=9E=E5=90=8E?= =?UTF-8?q?=E5=A4=84=E7=90=86=EF=BC=8C=E4=BF=9D=E6=8C=81=E8=BE=93=E5=85=A5?= =?UTF-8?q?=E5=9B=BE=E7=89=87size=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/mannequins_edit/service.py | 36 ++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/app/service/mannequins_edit/service.py b/app/service/mannequins_edit/service.py index 8db25d5..c0f0a44 100644 --- a/app/service/mannequins_edit/service.py +++ b/app/service/mannequins_edit/service.py @@ -1,5 +1,6 @@ import cv2 import numpy as np +from PIL import Image from minio import Minio from app.core.config import MINIO_URL, MINIO_ACCESS, MINIO_SECRET, MINIO_SECURE @@ -34,6 +35,40 @@ class MannequinEditService(): req = oss_upload_image(oss_client=minio_client, bucket=self.bucket_name, object_name=f"{self.mannequin_name}.png", image_bytes=image_bytes) return req.bucket_name + "/" + req.object_name + def post_processing(self, image): + # 原始图片的尺寸 + original_width, original_height = image.size + + # 计算宽度和高度的缩放比例 + width_ratio = self.w / original_width + height_ratio = self.h / original_height + + # 选择较小的缩放比例,确保图片能完整放入目标图片中 + scale_ratio = min(width_ratio, height_ratio) + + # 计算调整后的尺寸 + new_width = int(original_width * scale_ratio) + new_height = int(original_height * scale_ratio) + + # 调整图片大小 + resized_image = image.resize((new_width, new_height)) + + # 创建一个 512x768 的透明图片 + result_image = Image.new("RGBA", (self.w, self.h), (255, 255, 255, 0)) + + # 计算需要粘贴的位置,使图片居中 + x_offset = (self.w - new_width) // 2 + y_offset = (self.h - new_height) // 2 + + # 将调整大小后的图片粘贴到透明图片上 + if resized_image.mode == "RGBA": + result_image.paste(resized_image, (x_offset, y_offset), mask=resized_image.split()[3]) + else: + result_image.paste(resized_image, (x_offset, y_offset)) + + image = np.array(result_image) + return image + def resize_leg(self, top, bottom): # 上部 top_part = self.bgr[:top, :] @@ -62,6 +97,7 @@ class MannequinEditService(): new_image = np.dstack((new_bgr, new_bgr_alpha)) else: new_image = new_bgr + new_image = self.post_processing(Image.fromarray(new_image)) return new_image From ddadf3e287f428ec712f676d27dece2b4989ebe5 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Mon, 7 Apr 2025 13:35:01 +0800 Subject: [PATCH 035/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20pose=20transform=20=E9=83=A8=E7=BD=B2?= =?UTF-8?q?=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/core/config.py | 1 + .../generate_image/service_pose_transform.py | 157 ++++++++++++------ .../utils/pose_transform_upload.py | 68 ++++++++ 3 files changed, 177 insertions(+), 49 deletions(-) create mode 100644 app/service/generate_image/utils/pose_transform_upload.py diff --git a/app/core/config.py b/app/core/config.py index 6ac56e3..ac9181f 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -148,6 +148,7 @@ GRI_MODEL_URL = '10.1.1.240:10051' # Pose Transform service config PS_RABBITMQ_QUEUES = os.getenv("PS_RABBITMQ_QUEUES", f"PoseTransform{RABBITMQ_ENV}") +PT_MODEL_URL = '10.1.1.243:10061' # SEG service config SEGMENTATION = { diff --git a/app/service/generate_image/service_pose_transform.py b/app/service/generate_image/service_pose_transform.py index 6c1c1c9..8a5e4c8 100644 --- a/app/service/generate_image/service_pose_transform.py +++ b/app/service/generate_image/service_pose_transform.py @@ -9,16 +9,19 @@ """ import json import logging +import time +from io import BytesIO -import cv2 +import imageio import numpy as np import redis import tritonclient.grpc as grpcclient from PIL import Image +from tritonclient.utils import np_to_triton_dtype from app.core.config import * from app.schemas.pose_transform import PoseTransformModel -from app.service.generate_image.utils.upload_sd_image import upload_SDXL_image +from app.service.generate_image.utils.pose_transform_upload import upload_gif, upload_video, upload_first_image from app.service.utils.oss_client import oss_get_image logger = logging.getLogger() @@ -29,33 +32,48 @@ class PoseTransformService: if DEBUG is False: self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) self.channel = self.connection.channel() - self.grpc_client = grpcclient.InferenceServerClient(url=GRI_MODEL_URL) + self.grpc_client = grpcclient.InferenceServerClient(url=PT_MODEL_URL) self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) self.category = "pose_transform" - self.batch_size = 1 - self.seed = "1" self.image_url = request_data.image_url - self.image = oss_get_image(bucket=self.image_url.split('/')[0], object_name=self.image_url[self.image_url.find('/') + 1:], data_type="cv2") + self.pose_num = request_data.pose_id + self.image = pre_processing_image(request_data.image_url) self.tasks_id = request_data.tasks_id self.user_id = self.tasks_id[self.tasks_id.rfind('-') + 1:] - self.gen_product_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'gif_url': 'test/mannequin_name.png', 'video_url': 'test/mannequin_name.png', 'image_url': 'test/mannequin_name.png'} + self.pose_transform_data = {'tasks_id': self.tasks_id, 'status': 'PENDING', 'message': "pending", 'gif_url': '', 'video_url': '', 'image_url': ''} - self.redis_client.set(self.tasks_id, json.dumps(self.gen_product_data)) + self.redis_client.set(self.tasks_id, json.dumps(self.pose_transform_data)) self.redis_client.expire(self.tasks_id, 600) def callback(self, result, error): if error: - self.gen_product_data['status'] = "FAILURE" - self.gen_product_data['message'] = str(error) - self.redis_client.set(self.tasks_id, json.dumps(self.gen_product_data)) + self.pose_transform_data['status'] = "FAILURE" + self.pose_transform_data['message'] = str(error) + self.redis_client.set(self.tasks_id, json.dumps(self.pose_transform_data)) else: - image = result.as_numpy("generated_inpaint_image") - image_result = Image.fromarray(np.squeeze(image.astype(np.uint8))) - image_url = upload_SDXL_image(image_result, user_id=self.user_id, category=f"{self.category}", file_name=f"{self.tasks_id}.png") - self.gen_product_data['status'] = "SUCCESS" - self.gen_product_data['message'] = "success" - self.gen_product_data['image_url'] = str(image_url) - self.redis_client.set(self.tasks_id, json.dumps(self.gen_product_data)) + result_data = np.squeeze(result.as_numpy("generated_image_list").astype(np.uint8))[:, :, :, ::-1] + + # 第一帧图像 + first_image = Image.fromarray(result_data[0]) + first_image_url = upload_first_image(first_image, user_id=self.user_id, category=f"{self.category}_first_img", file_name=f"{self.tasks_id}.png") + + # 上传GIF + gif_buffer = BytesIO() + imageio.mimsave(gif_buffer, result_data, format='GIF', fps=5) + gif_buffer.seek(0) + gif_url = upload_gif(gif_buffer=gif_buffer, user_id=self.user_id, category=f"{self.category}_gif", file_name=f"{self.tasks_id}.gif") + + # 上传video + video_url = upload_video(frames=result_data, user_id=self.user_id, category=f"{self.category}_video", file_name=f"{self.tasks_id}.mp4") + + self.pose_transform_data['status'] = "SUCCESS" + self.pose_transform_data['message'] = "success" + self.pose_transform_data['gif_url'] = str(gif_url) + self.pose_transform_data['video_url'] = str(video_url) + self.pose_transform_data['image_url'] = str(first_image_url) + + + self.redis_client.set(self.tasks_id, json.dumps(self.pose_transform_data)) def read_tasks_status(self): status_data = self.redis_client.get(self.tasks_id) @@ -63,51 +81,92 @@ class PoseTransformService: def get_result(self): try: - image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB) - image = cv2.resize(image, (512, 768)) - images = [image.astype(np.uint8)] * self.batch_size + pose_num = [self.pose_num] * 1 + pose_num_obj = np.array(pose_num, dtype="object").reshape((-1, 1)) + input_pose_num = grpcclient.InferInput("pose_num", pose_num_obj.shape, np_to_triton_dtype(pose_num_obj.dtype)) + input_pose_num.set_data_from_numpy(pose_num_obj) - image_obj = np.array(images, dtype=np.uint8).reshape((768, 512, 3)) + image_files = [self.image.astype(np.uint8)] * 1 + image_files_obj = np.array(image_files, dtype=np.uint8).reshape((-1, 768, 512, 3)) + input_image_files = grpcclient.InferInput("image_file", image_files_obj.shape, "UINT8") + input_image_files.set_data_from_numpy(image_files_obj) - input_image = grpcclient.InferInput("input_image", image_obj.shape, "UINT8") - - input_image.set_data_from_numpy(image_obj) - - inputs = [input_image] - # ctx = self.grpc_client.async_infer(model_name=GRI_MODEL_NAME_OVERALL, inputs=inputs, callback=self.callback) - - # time_out = 600 - # while time_out > 0: - # gen_product_data, _ = self.read_tasks_status() - # if gen_product_data['status'] in ["REVOKED", "FAILURE", "NO_FACE"]: - # ctx.cancel() - # break - # elif gen_product_data['status'] == "SUCCESS": - # break - # time_out -= 1 - # time.sleep(0.1) - gen_product_data, _ = self.read_tasks_status() - return gen_product_data + ctx = self.grpc_client.async_infer(model_name="animatex_1", inputs=[input_pose_num, input_image_files], callback=self.callback) + time_out = 6000 + while time_out > 0: + pose_transform_data, _ = self.read_tasks_status() + if pose_transform_data['status'] in ["REVOKED", "FAILURE"]: + ctx.cancel() + break + elif pose_transform_data['status'] == "SUCCESS": + break + time_out -= 1 + time.sleep(0.1) + pose_transform_data, _ = self.read_tasks_status() + return pose_transform_data except Exception as e: - self.gen_product_data['status'] = "FAILURE" - self.gen_product_data['message'] = str(e) - self.redis_client.set(self.tasks_id, json.dumps(self.gen_product_data)) + self.pose_transform_data['status'] = "FAILURE" + self.pose_transform_data['message'] = str(e) + self.redis_client.set(self.tasks_id, json.dumps(self.pose_transform_data)) raise Exception(str(e)) finally: - dict_gen_product_data, str_gen_product_data = self.read_tasks_status() + dict_pose_transform_data, str_pose_transform_data = self.read_tasks_status() if DEBUG is False: - self.channel.basic_publish(exchange='', routing_key=PS_RABBITMQ_QUEUES, body=str_gen_product_data) - logger.info(f" [x] Sent to: {PS_RABBITMQ_QUEUES} data:@@@@ {json.dumps(dict_gen_product_data, indent=4)}") + self.channel.basic_publish(exchange='', routing_key=PS_RABBITMQ_QUEUES, body=str_pose_transform_data) + logger.info(f" [x] Sent to: {PS_RABBITMQ_QUEUES} data:@@@@ {json.dumps(dict_pose_transform_data, indent=4)}") def infer_cancel(tasks_id): redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'} - gen_product_data = json.dumps(data) - redis_client.set(tasks_id, gen_product_data) + pose_transform_data = json.dumps(data) + redis_client.set(tasks_id, pose_transform_data) return data +def pre_processing_image(image_url): + image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL") + # 目标图片的尺寸 + target_width = 512 + target_height = 768 + + # 原始图片的尺寸 + original_width, original_height = image.size + + # 计算宽度和高度的缩放比例 + width_ratio = target_width / original_width + height_ratio = target_height / original_height + + # 选择较小的缩放比例,确保图片能完整放入目标图片中 + scale_ratio = min(width_ratio, height_ratio) + + # 计算调整后的尺寸 + new_width = int(original_width * scale_ratio) + new_height = int(original_height * scale_ratio) + + # 调整图片大小 + resized_image = image.resize((new_width, new_height)) + + # 创建一个 512x768 的透明图片 + result_image = Image.new("RGBA", (target_width, target_height), (255, 255, 255, 0)) + + # 计算需要粘贴的位置,使图片居中 + x_offset = (target_width - new_width) // 2 + y_offset = (target_height - new_height) // 2 + + # 将调整大小后的图片粘贴到透明图片上 + if resized_image.mode == "RGBA": + result_image.paste(resized_image, (x_offset, y_offset), mask=resized_image.split()[3]) + else: + result_image.paste(resized_image, (x_offset, y_offset)) + result_image = result_image.convert("RGB") + image = np.array(result_image) + + # image = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA) + + return image + + if __name__ == '__main__': rd = PoseTransformModel( tasks_id="123-89", diff --git a/app/service/generate_image/utils/pose_transform_upload.py b/app/service/generate_image/utils/pose_transform_upload.py new file mode 100644 index 0000000..86c3e6e --- /dev/null +++ b/app/service/generate_image/utils/pose_transform_upload.py @@ -0,0 +1,68 @@ +import io +import logging + +import imageio +import numpy as np +# import boto3 +from minio import Minio + +from app.core.config import * +from app.service.utils.new_oss_client import oss_upload_image + +minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) + + +def upload_first_image(image, user_id, category, file_name): + try: + image_data = io.BytesIO() + image.save(image_data, format='PNG') + image_data.seek(0) + image_bytes = image_data.read() + object_name = f'{user_id}/{category}/{file_name}' + req = oss_upload_image(oss_client=minio_client, bucket=GI_MINIO_BUCKET, object_name=object_name, image_bytes=image_bytes) + image_url = f"aida-users/{object_name}" + return image_url + except Exception as e: + logging.warning(f"upload_png_mask runtime exception : {e}") + + +def upload_gif(gif_buffer, user_id, category, file_name): + try: + object_name = f'{user_id}/{category}/{file_name}' + req = minio_client.put_object( + "aida-users", + object_name, + gif_buffer, + length=gif_buffer.getbuffer().nbytes, + content_type="image/gif" + ) + return f"aida-users/{object_name}" + except Exception as e: + logging.warning(f"upload_gif runtime exception : {e}") + + +def upload_video(frames, user_id, category, file_name): + try: + video_buffer = io.BytesIO() + with imageio.get_writer(video_buffer, format='mp4', fps=24) as writer: + for frame in frames: + writer.append_data(frame) + video_buffer.seek(0) + + object_name = f'{user_id}/{category}/{file_name}' + # 上传视频流到MinIO + minio_client.put_object( + bucket_name="aida-users", + object_name=object_name, + data=video_buffer, + length=video_buffer.getbuffer().nbytes, + content_type='video/mp4' + ) + return f"aida-users/{object_name}" + except Exception as e: + logging.warning(f"upload_video runtime exception : {e}") + + +if __name__ == '__main__': + images = np.random.randint(0, 256, size=(4, 512, 512, 3), dtype=np.uint8) + print(upload_video(images, user_id=89, category='test', file_name="1.mp4")) From 3ad724fe9f59d568d10e504e15c7a8b88a3bf39f Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Mon, 7 Apr 2025 14:24:41 +0800 Subject: [PATCH 036/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20pose=20transform=20=E9=83=A8=E7=BD=B2?= =?UTF-8?q?=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../utils/pose_transform_upload.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/app/service/generate_image/utils/pose_transform_upload.py b/app/service/generate_image/utils/pose_transform_upload.py index 86c3e6e..7e97a26 100644 --- a/app/service/generate_image/utils/pose_transform_upload.py +++ b/app/service/generate_image/utils/pose_transform_upload.py @@ -1,6 +1,7 @@ import io import logging +import cv2 import imageio import numpy as np # import boto3 @@ -43,20 +44,22 @@ def upload_gif(gif_buffer, user_id, category, file_name): def upload_video(frames, user_id, category, file_name): try: + # 生成内存中的视频字节流 video_buffer = io.BytesIO() - with imageio.get_writer(video_buffer, format='mp4', fps=24) as writer: - for frame in frames: - writer.append_data(frame) - video_buffer.seek(0) + with imageio.get_writer(video_buffer, format="mp4", fps=24) as writer: + for img in images: + writer.append_data(img) + writer.close() + video_bytes = video_buffer.getvalue() object_name = f'{user_id}/{category}/{file_name}' # 上传视频流到MinIO minio_client.put_object( bucket_name="aida-users", object_name=object_name, - data=video_buffer, - length=video_buffer.getbuffer().nbytes, - content_type='video/mp4' + data=io.BytesIO(video_bytes), + length=len(video_bytes), + content_type="video/mp4" ) return f"aida-users/{object_name}" except Exception as e: From 24eb43e2f024cb9d5cdc4d0c6d2160eeec882e11 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Mon, 7 Apr 2025 16:47:27 +0800 Subject: [PATCH 037/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20pose=20transform=20=E9=83=A8=E7=BD=B2?= =?UTF-8?q?=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/generate_image/service_pose_transform.py | 12 +++++------- .../generate_image/utils/pose_transform_upload.py | 2 +- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/app/service/generate_image/service_pose_transform.py b/app/service/generate_image/service_pose_transform.py index 8a5e4c8..3fc65c6 100644 --- a/app/service/generate_image/service_pose_transform.py +++ b/app/service/generate_image/service_pose_transform.py @@ -29,9 +29,6 @@ logger = logging.getLogger() class PoseTransformService: def __init__(self, request_data): - if DEBUG is False: - self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) - self.channel = self.connection.channel() self.grpc_client = grpcclient.InferenceServerClient(url=PT_MODEL_URL) self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) self.category = "pose_transform" @@ -72,7 +69,6 @@ class PoseTransformService: self.pose_transform_data['video_url'] = str(video_url) self.pose_transform_data['image_url'] = str(first_image_url) - self.redis_client.set(self.tasks_id, json.dumps(self.pose_transform_data)) def read_tasks_status(self): @@ -91,8 +87,8 @@ class PoseTransformService: input_image_files = grpcclient.InferInput("image_file", image_files_obj.shape, "UINT8") input_image_files.set_data_from_numpy(image_files_obj) - ctx = self.grpc_client.async_infer(model_name="animatex_1", inputs=[input_pose_num, input_image_files], callback=self.callback) - time_out = 6000 + ctx = self.grpc_client.async_infer(model_name="animatex_1", inputs=[input_pose_num, input_image_files], callback=self.callback, client_timeout=60000) + time_out = 60000 while time_out > 0: pose_transform_data, _ = self.read_tasks_status() if pose_transform_data['status'] in ["REVOKED", "FAILURE"]: @@ -112,7 +108,9 @@ class PoseTransformService: finally: dict_pose_transform_data, str_pose_transform_data = self.read_tasks_status() if DEBUG is False: - self.channel.basic_publish(exchange='', routing_key=PS_RABBITMQ_QUEUES, body=str_pose_transform_data) + connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) + channel = connection.channel() + channel.basic_publish(exchange='', routing_key=PS_RABBITMQ_QUEUES, body=str_pose_transform_data) logger.info(f" [x] Sent to: {PS_RABBITMQ_QUEUES} data:@@@@ {json.dumps(dict_pose_transform_data, indent=4)}") diff --git a/app/service/generate_image/utils/pose_transform_upload.py b/app/service/generate_image/utils/pose_transform_upload.py index 7e97a26..69708f6 100644 --- a/app/service/generate_image/utils/pose_transform_upload.py +++ b/app/service/generate_image/utils/pose_transform_upload.py @@ -47,7 +47,7 @@ def upload_video(frames, user_id, category, file_name): # 生成内存中的视频字节流 video_buffer = io.BytesIO() with imageio.get_writer(video_buffer, format="mp4", fps=24) as writer: - for img in images: + for img in frames: writer.append_data(img) writer.close() video_bytes = video_buffer.getvalue() From 2ceb5772b7afa264f8ba9bb23caf14a27dc3ba12 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Mon, 7 Apr 2025 17:07:32 +0800 Subject: [PATCH 038/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20pose=20transform=20=E9=83=A8=E7=BD=B2?= =?UTF-8?q?=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../utils/pose_transform_upload.py | 56 ++++++++++++++----- 1 file changed, 41 insertions(+), 15 deletions(-) diff --git a/app/service/generate_image/utils/pose_transform_upload.py b/app/service/generate_image/utils/pose_transform_upload.py index 69708f6..a17dc48 100644 --- a/app/service/generate_image/utils/pose_transform_upload.py +++ b/app/service/generate_image/utils/pose_transform_upload.py @@ -10,6 +10,12 @@ from minio import Minio from app.core.config import * from app.service.utils.new_oss_client import oss_upload_image + +# minio 配置 +MINIO_URL = "www.minio.aida.com.hk:12024" +MINIO_ACCESS = 'vXKFLSJkYeEq2DrSZvkB' +MINIO_SECRET = 'uKTZT3x7C43WvPN9QTc99DiRkwddWZrG9Uh3JVlR' +MINIO_SECURE = True minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) @@ -44,24 +50,44 @@ def upload_gif(gif_buffer, user_id, category, file_name): def upload_video(frames, user_id, category, file_name): try: - # 生成内存中的视频字节流 - video_buffer = io.BytesIO() - with imageio.get_writer(video_buffer, format="mp4", fps=24) as writer: - for img in frames: - writer.append_data(img) - writer.close() - video_bytes = video_buffer.getvalue() - object_name = f'{user_id}/{category}/{file_name}' - # 上传视频流到MinIO - minio_client.put_object( - bucket_name="aida-users", - object_name=object_name, - data=io.BytesIO(video_bytes), - length=len(video_bytes), - content_type="video/mp4" + # 创建视频写入器 + fps = 24 # 帧率 + video_path = "output.mp4" + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + video_writer = cv2.VideoWriter(video_path, fourcc, fps, (768, 512)) + + # 逐帧写入 + for frame in frames: + video_writer.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)) # OpenCV需BGR格式 + video_writer.release() + + minio_client.fput_object( + "aida-users", + object_name, + video_path, + content_type="video/mp4" # 指定MIME类型确保可在线播放[9](@ref) ) return f"aida-users/{object_name}" + + # # 生成内存中的视频字节流 + # video_buffer = io.BytesIO() + # with imageio.get_writer(video_buffer, format="mp4", fps=24) as writer: + # for img in frames: + # writer.append_data(img) + # writer.close() + # video_bytes = video_buffer.getvalue() + # + # object_name = f'{user_id}/{category}/{file_name}' + # # 上传视频流到MinIO + # minio_client.put_object( + # bucket_name="aida-users", + # object_name=object_name, + # data=io.BytesIO(video_bytes), + # length=len(video_bytes), + # content_type="video/mp4" + # ) + # return f"aida-users/{object_name}" except Exception as e: logging.warning(f"upload_video runtime exception : {e}") From 693417ea82387a95a830793a896b305eb3533906 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Mon, 7 Apr 2025 17:08:35 +0800 Subject: [PATCH 039/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20pose=20transform=20=E9=83=A8=E7=BD=B2?= =?UTF-8?q?=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/generate_image/utils/pose_transform_upload.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/app/service/generate_image/utils/pose_transform_upload.py b/app/service/generate_image/utils/pose_transform_upload.py index a17dc48..d768ffa 100644 --- a/app/service/generate_image/utils/pose_transform_upload.py +++ b/app/service/generate_image/utils/pose_transform_upload.py @@ -53,9 +53,8 @@ def upload_video(frames, user_id, category, file_name): object_name = f'{user_id}/{category}/{file_name}' # 创建视频写入器 fps = 24 # 帧率 - video_path = "output.mp4" fourcc = cv2.VideoWriter_fourcc(*'mp4v') - video_writer = cv2.VideoWriter(video_path, fourcc, fps, (768, 512)) + video_writer = cv2.VideoWriter(file_name, fourcc, fps, (768, 512)) # 逐帧写入 for frame in frames: @@ -65,7 +64,7 @@ def upload_video(frames, user_id, category, file_name): minio_client.fput_object( "aida-users", object_name, - video_path, + file_name, content_type="video/mp4" # 指定MIME类型确保可在线播放[9](@ref) ) return f"aida-users/{object_name}" From 01a1173e13baef50ebed85d8c295b2eeb9fd3476 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Mon, 7 Apr 2025 17:52:16 +0800 Subject: [PATCH 040/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20pose=20transform=20=E9=83=A8=E7=BD=B2?= =?UTF-8?q?=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../generate_image/utils/pose_transform_upload.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/app/service/generate_image/utils/pose_transform_upload.py b/app/service/generate_image/utils/pose_transform_upload.py index d768ffa..0310abb 100644 --- a/app/service/generate_image/utils/pose_transform_upload.py +++ b/app/service/generate_image/utils/pose_transform_upload.py @@ -52,9 +52,10 @@ def upload_video(frames, user_id, category, file_name): try: object_name = f'{user_id}/{category}/{file_name}' # 创建视频写入器 - fps = 24 # 帧率 - fourcc = cv2.VideoWriter_fourcc(*'mp4v') - video_writer = cv2.VideoWriter(file_name, fourcc, fps, (768, 512)) + fps = 1 # 帧率 + + fourcc = cv2.VideoWriter.fourcc(*'mp4v') + video_writer = cv2.VideoWriter(file_name, fourcc, fps, (512, 768)) # 逐帧写入 for frame in frames: @@ -67,6 +68,7 @@ def upload_video(frames, user_id, category, file_name): file_name, content_type="video/mp4" # 指定MIME类型确保可在线播放[9](@ref) ) + print(file_name) return f"aida-users/{object_name}" # # 生成内存中的视频字节流 @@ -92,5 +94,5 @@ def upload_video(frames, user_id, category, file_name): if __name__ == '__main__': - images = np.random.randint(0, 256, size=(4, 512, 512, 3), dtype=np.uint8) - print(upload_video(images, user_id=89, category='test', file_name="1.mp4")) + images = np.random.randint(0, 256, size=(10, 768, 512, 3), dtype=np.uint8) + print(upload_video(images, user_id=89, category='test', file_name="1123123.mp4")) From 635b506c4dea37158aedbbad78aa0aedf9c900ca Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Tue, 8 Apr 2025 11:02:31 +0800 Subject: [PATCH 041/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20pose=20transform=20=E9=83=A8=E7=BD=B2?= =?UTF-8?q?=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/generate_image/utils/pose_transform_upload.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/service/generate_image/utils/pose_transform_upload.py b/app/service/generate_image/utils/pose_transform_upload.py index 0310abb..5f9f70d 100644 --- a/app/service/generate_image/utils/pose_transform_upload.py +++ b/app/service/generate_image/utils/pose_transform_upload.py @@ -54,7 +54,7 @@ def upload_video(frames, user_id, category, file_name): # 创建视频写入器 fps = 1 # 帧率 - fourcc = cv2.VideoWriter.fourcc(*'mp4v') + fourcc = cv2.VideoWriter.fourcc(*'avc1') video_writer = cv2.VideoWriter(file_name, fourcc, fps, (512, 768)) # 逐帧写入 From 7d12fcc98cabe6124af43cb973e6e2baf86d982c Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Tue, 8 Apr 2025 12:36:44 +0800 Subject: [PATCH 042/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20pose=20transform=20=E9=83=A8=E7=BD=B2?= =?UTF-8?q?=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../utils/pose_transform_upload.py | 57 +++++++----------- requirements.txt | Bin 1938 -> 1942 bytes 2 files changed, 23 insertions(+), 34 deletions(-) diff --git a/app/service/generate_image/utils/pose_transform_upload.py b/app/service/generate_image/utils/pose_transform_upload.py index 5f9f70d..c7fd37b 100644 --- a/app/service/generate_image/utils/pose_transform_upload.py +++ b/app/service/generate_image/utils/pose_transform_upload.py @@ -2,15 +2,14 @@ import io import logging import cv2 -import imageio import numpy as np +import skvideo.io # import boto3 from minio import Minio from app.core.config import * from app.service.utils.new_oss_client import oss_upload_image - # minio 配置 MINIO_URL = "www.minio.aida.com.hk:12024" MINIO_ACCESS = 'vXKFLSJkYeEq2DrSZvkB' @@ -50,18 +49,8 @@ def upload_gif(gif_buffer, user_id, category, file_name): def upload_video(frames, user_id, category, file_name): try: + ndarray_to_video(frames, file_name) object_name = f'{user_id}/{category}/{file_name}' - # 创建视频写入器 - fps = 1 # 帧率 - - fourcc = cv2.VideoWriter.fourcc(*'avc1') - video_writer = cv2.VideoWriter(file_name, fourcc, fps, (512, 768)) - - # 逐帧写入 - for frame in frames: - video_writer.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)) # OpenCV需BGR格式 - video_writer.release() - minio_client.fput_object( "aida-users", object_name, @@ -70,29 +59,29 @@ def upload_video(frames, user_id, category, file_name): ) print(file_name) return f"aida-users/{object_name}" - - # # 生成内存中的视频字节流 - # video_buffer = io.BytesIO() - # with imageio.get_writer(video_buffer, format="mp4", fps=24) as writer: - # for img in frames: - # writer.append_data(img) - # writer.close() - # video_bytes = video_buffer.getvalue() - # - # object_name = f'{user_id}/{category}/{file_name}' - # # 上传视频流到MinIO - # minio_client.put_object( - # bucket_name="aida-users", - # object_name=object_name, - # data=io.BytesIO(video_bytes), - # length=len(video_bytes), - # content_type="video/mp4" - # ) - # return f"aida-users/{object_name}" except Exception as e: logging.warning(f"upload_video runtime exception : {e}") +def ndarray_to_video(images, output_path, frame_size=(512, 768), fps=1): + # 初始化视频写入器 + writer = skvideo.io.FFmpegWriter( + output_path, + inputdict={'-r': str(fps)}, + outputdict={'-r': str(fps), '-vcodec': 'libx264'} + ) + # 逐帧写入 + for frame in images: + # 调整尺寸(可选) + resized_frame = cv2.resize(frame, frame_size) + # 转换颜色通道(若需从 BGR 转 RGB) + rgb_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2RGB) + writer.writeFrame(rgb_frame) + + # 关闭写入器 + writer.close() + + if __name__ == '__main__': - images = np.random.randint(0, 256, size=(10, 768, 512, 3), dtype=np.uint8) - print(upload_video(images, user_id=89, category='test', file_name="1123123.mp4")) + images = np.random.randint(0, 256, size=(4, 768, 512, 3), dtype=np.uint8) + print(upload_video(images, user_id=89, category='pose_transform_video', file_name="1123123.mp4")) diff --git a/requirements.txt b/requirements.txt index 909fb2168e3b601ad8fa54f6433f2598f4a30e16..9a4490feeb6ceade103d002bfdbca7ee7cadc5a4 100644 GIT binary patch delta 16 XcmbQlKaGC_4;ynLL)m6-wsuAUCc6YL delta 12 TcmbQnKZ$<>58Gw|wl+op7|H`# From db5b5519fd3ab688922015d4f42ee7a76eae576b Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Tue, 8 Apr 2025 12:37:56 +0800 Subject: [PATCH 043/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20pose=20transform=20=E9=83=A8=E7=BD=B2?= =?UTF-8?q?=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- requirements.txt | Bin 1942 -> 1970 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/requirements.txt b/requirements.txt index 9a4490feeb6ceade103d002bfdbca7ee7cadc5a4..3e5cb55d1b7f913548f31024557f7d9071aa1bad 100644 GIT binary patch delta 36 ocmbQnzlnc?2%C5@Lo!1qLpBhXFz7Or0qGQmREGS`{A}%v0Hak1V*mgE delta 12 TcmdnQKaGEb2-{`@wkAdZ8uA03 From 76184a6a756328f936164cd0b24bde1f0f6a4114 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Tue, 8 Apr 2025 13:18:13 +0800 Subject: [PATCH 044/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20pose=20transform=20=E9=83=A8=E7=BD=B2?= =?UTF-8?q?=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/generate_image/utils/pose_transform_upload.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/app/service/generate_image/utils/pose_transform_upload.py b/app/service/generate_image/utils/pose_transform_upload.py index c7fd37b..1f3577d 100644 --- a/app/service/generate_image/utils/pose_transform_upload.py +++ b/app/service/generate_image/utils/pose_transform_upload.py @@ -73,10 +73,10 @@ def ndarray_to_video(images, output_path, frame_size=(512, 768), fps=1): # 逐帧写入 for frame in images: # 调整尺寸(可选) - resized_frame = cv2.resize(frame, frame_size) + # resized_frame = cv2.resize(frame, frame_size) # 转换颜色通道(若需从 BGR 转 RGB) - rgb_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2RGB) - writer.writeFrame(rgb_frame) + # rgb_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2RGB) + writer.writeFrame(frame) # 关闭写入器 writer.close() From c9699aa396ea481c723c0a99b24ac12c36d3fcba Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Tue, 8 Apr 2025 18:17:01 +0800 Subject: [PATCH 045/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20pose=20transform=20=E9=83=A8=E7=BD=B2?= =?UTF-8?q?=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/generate_image/utils/pose_transform_upload.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/service/generate_image/utils/pose_transform_upload.py b/app/service/generate_image/utils/pose_transform_upload.py index 1f3577d..60c7e79 100644 --- a/app/service/generate_image/utils/pose_transform_upload.py +++ b/app/service/generate_image/utils/pose_transform_upload.py @@ -63,7 +63,7 @@ def upload_video(frames, user_id, category, file_name): logging.warning(f"upload_video runtime exception : {e}") -def ndarray_to_video(images, output_path, frame_size=(512, 768), fps=1): +def ndarray_to_video(images, output_path, frame_size=(512, 768), fps=4): # 初始化视频写入器 writer = skvideo.io.FFmpegWriter( output_path, From 3593f0d431688c60ee8cc47fc8d45859f9e4e625 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Tue, 8 Apr 2025 22:07:34 +0800 Subject: [PATCH 046/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20pose=20transform=20=E9=83=A8=E7=BD=B2?= =?UTF-8?q?=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/generate_image/utils/pose_transform_upload.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/service/generate_image/utils/pose_transform_upload.py b/app/service/generate_image/utils/pose_transform_upload.py index 60c7e79..b6d632d 100644 --- a/app/service/generate_image/utils/pose_transform_upload.py +++ b/app/service/generate_image/utils/pose_transform_upload.py @@ -63,7 +63,7 @@ def upload_video(frames, user_id, category, file_name): logging.warning(f"upload_video runtime exception : {e}") -def ndarray_to_video(images, output_path, frame_size=(512, 768), fps=4): +def ndarray_to_video(images, output_path, frame_size=(512, 768), fps=9): # 初始化视频写入器 writer = skvideo.io.FFmpegWriter( output_path, From f83a202b20dd7fbbad4da063b3cddb66fe0037a5 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Fri, 11 Apr 2025 17:14:59 +0800 Subject: [PATCH 047/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20clothing=20seg=20fix=EF=BC=88=E4=BF=AE?= =?UTF-8?q?=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_clothing_seg.py | 51 +++++++++ app/api/api_route.py | 6 +- app/schemas/clothing_seg.py | 6 ++ app/service/clothing_seg/service.py | 156 ++++++++++++++++++++++++++++ 4 files changed, 217 insertions(+), 2 deletions(-) create mode 100644 app/api/api_clothing_seg.py create mode 100644 app/schemas/clothing_seg.py create mode 100644 app/service/clothing_seg/service.py diff --git a/app/api/api_clothing_seg.py b/app/api/api_clothing_seg.py new file mode 100644 index 0000000..e09b882 --- /dev/null +++ b/app/api/api_clothing_seg.py @@ -0,0 +1,51 @@ +import json +import logging + +from fastapi import APIRouter, HTTPException + +from app.schemas.response_template import ResponseModel +from app.schemas.clothing_seg import ClothingSegModel +from app.service.clothing_seg.service import ClothingSeg + +router = APIRouter() +logger = logging.getLogger() + + +@router.post("/clothing_seg") +def clothing_seg(request_item: ClothingSegModel): + """ + 创建一个具有以下参数的请求体: + - **user_id**: 用户id + - **image_data**: 图片数据 + { + "image_url": "test/clothing_seg/dress.jpg", + "image_type": "product" + } + + 示例参数: + { + "user_id": 89, + "image_data": [ + { + "image_url": "test/clothing_seg/dress.jpg", + "image_type": "sketch" + }, + { + "image_url": "test/clothing_seg/skirt_559.jpg", + "image_type": "sketch" + }, + { + "image_url": "test/clothing_seg/10144613.jpg", + "image_type": "product" + } + ] + } + """ + try: + logger.info(f"clothing_seg request item is : @@@@@@:{json.dumps(request_item.dict())}") + server = ClothingSeg(request_item) + result_url = server.get_result() + except Exception as e: + logger.warning(f"clothing_seg Run Exception @@@@@@:{e}") + raise HTTPException(status_code=404, detail=str(e)) + return ResponseModel(data=result_url) diff --git a/app/api/api_route.py b/app/api/api_route.py index 9858ba6..47a4caf 100644 --- a/app/api/api_route.py +++ b/app/api/api_route.py @@ -1,6 +1,6 @@ from fastapi import APIRouter -from app.api import api_agent_generate_image, api_recommendation +from app.api import api_agent_generate_image from app.api import api_attribute_retrieve, api_query_image from app.api import api_brand_dna from app.api import api_brighten @@ -12,6 +12,7 @@ from app.api import api_image2sketch from app.api import api_mannequins_edit from app.api import api_pose_transform from app.api import api_prompt_generation +from app.api import api_clothing_seg from app.api import api_super_resolution from app.api import api_test @@ -29,7 +30,8 @@ router.include_router(api_image2sketch.router, tags=['api_image2sketch'], prefix router.include_router(api_brighten.router, tags=['api_brighten'], prefix="/api") router.include_router(api_query_image.router, tags=['api_query_image'], prefix="/api") router.include_router(api_brand_dna.router, tags=['api_brand_dna'], prefix="/api") -router.include_router(api_recommendation.router, tags=['api_recommendation'], prefix="/api") +# router.include_router(api_recommendation.router, tags=['api_recommendation'], prefix="/api") router.include_router(api_mannequins_edit.router, tags=['api_mannequins_edit'], prefix="/api") router.include_router(api_agent_generate_image.router, tags=['api_agent_generate_image'], prefix="/api") router.include_router(api_pose_transform.router, tags=['api_pose_transform'], prefix="/api") +router.include_router(api_clothing_seg.router, tags=['api_clothing_seg'], prefix="/api") diff --git a/app/schemas/clothing_seg.py b/app/schemas/clothing_seg.py new file mode 100644 index 0000000..234402c --- /dev/null +++ b/app/schemas/clothing_seg.py @@ -0,0 +1,6 @@ +from pydantic import BaseModel + + +class ClothingSegModel(BaseModel): + user_id: str + image_data: list[dict] diff --git a/app/service/clothing_seg/service.py b/app/service/clothing_seg/service.py new file mode 100644 index 0000000..a0f3640 --- /dev/null +++ b/app/service/clothing_seg/service.py @@ -0,0 +1,156 @@ +import io +import time +from pprint import pprint + +import cv2 +import numpy as np +import tritonclient.grpc as grpcclient +from PIL import Image +from minio import Minio +from tritonclient.utils import np_to_triton_dtype + +from app.core.config import * +from app.schemas.clothing_seg import ClothingSegModel +from app.service.design_fast.utils.design_ensemble import get_seg_result +from app.service.utils.decorator import RunTime +from app.service.utils.generate_uuid import generate_uuid +from app.service.utils.new_oss_client import oss_get_image, oss_upload_image + +minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) + + +class ClothingSeg: + def __init__(self, request_data): + self.image_data = request_data.image_data + self.user_id = request_data.user_id + self.triton_client = grpcclient.InferenceServerClient(url="10.1.1.243:10071") + + @RunTime + def get_result(self): + self.read_image() + self.clothing_seg() + self.upload_image() + for data in self.image_data: + del data["image"] + del data["clothing"] + + return self.image_data + + @RunTime + def upload_image(self): + for data in self.image_data: + data["clothing_url"] = [] + for clothing in data["clothing"]: + object_name = f"{self.user_id}/clothing_seg/{generate_uuid()}.png" + image_data = io.BytesIO() + clothing.save(image_data, format="PNG") + image_data.seek(0) + image_bytes = image_data.read() + oss_upload_image(oss_client=minio_client, bucket="aida-users", object_name=object_name, image_bytes=image_bytes) + data["clothing_url"].append(f"aida-users/{object_name}") + + @RunTime + def read_image(self): + for data in self.image_data: + url = data["image_url"] + image = oss_get_image(oss_client=minio_client, bucket=url.split("/", 1)[0], object_name=url.split("/", 1)[1], data_type="cv2") + data["image"] = image + + @RunTime + def clothing_seg(self): + for data in self.image_data: + image_type = data["image_type"] + image = data["image"] + clothing_result = [] + if image_type == "sketch": + seg_mask = get_seg_result(1, image) + temp = seg_mask != 0.0 + mask = (255 * (temp + 0).astype(np.uint8)) + x_min, y_min, x_max, y_max = get_bounding_box(mask) + cropped_mask = mask[y_min:y_max + 1, x_min:x_max + 1] + cropped_image = image[y_min:y_max + 1, x_min:x_max + 1] + h, w = cropped_image.shape[:2] + mask_pil = Image.fromarray(cropped_mask).convert("L") + image_pil = Image.fromarray(cv2.cvtColor(cropped_image, cv2.COLOR_BGR2RGB)) + transparent_image = Image.new("RGBA", (w, h), (0, 0, 0, 0)) + transparent_image.paste(image_pil, (0, 0), mask=mask_pil) + clothing_result.append(transparent_image) + else: + input_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + input0_data = [input_image.astype(np.float32)] * 1 + input0_data = np.array(input0_data, dtype=np.float32) + inputs = [ + grpcclient.InferInput( + "INPUT0", input0_data.shape, np_to_triton_dtype(input0_data.dtype) + ), + ] + + inputs[0].set_data_from_numpy(input0_data) + + outputs = [ + grpcclient.InferRequestedOutput("OUTPUT0"), + grpcclient.InferRequestedOutput("OUTPUT1"), + ] + response = self.triton_client.infer("seg_clothing", inputs, request_id=str(1), outputs=outputs) + output0_data = response.as_numpy("OUTPUT0") + cv2.imwrite("output02.png", output0_data * 100) + output1_data = response.as_numpy("OUTPUT1") + for alpha in output1_data: + x_min, y_min, x_max, y_max = get_bounding_box(alpha) + cropped_mask = alpha[y_min:y_max + 1, x_min:x_max + 1] + cropped_image = image[y_min:y_max + 1, x_min:x_max + 1] + h, w = cropped_image.shape[:2] + mask_pil = Image.fromarray(cropped_mask).convert("L") + image_pil = Image.fromarray(cv2.cvtColor(cropped_image, cv2.COLOR_BGR2RGB)) + transparent_image = Image.new("RGBA", (w, h), (0, 0, 0, 0)) + transparent_image.paste(image_pil, (0, 0), mask=mask_pil) + clothing_result.append(transparent_image) + data["clothing"] = clothing_result + + +@RunTime +def get_bounding_box(mask): + """ + 从仅包含 0 和 1 的掩码图像中获取边界框。 + + :param mask: 输入的掩码图像,二维 numpy 数组,元素为 0 或 1 + :return: 边界框坐标 (x_min, y_min, x_max, y_max) + """ + # 找到所有值不为 0 的像素的坐标 + rows, cols = np.where(mask != 0) + + if len(rows) == 0 or len(cols) == 0: + # 如果没有找到不为 0 的像素,返回全 0 的边界框 + return (0, 0, 0, 0) + + # 计算边界框的坐标 + x_min = np.min(cols) + y_min = np.min(rows) + x_max = np.max(cols) + y_max = np.max(rows) + + return (x_min, y_min, x_max, y_max) + + +if __name__ == "__main__": + request_data = ClothingSegModel( + user_id=89, + image_data=[ + { + "image_url": "test/clothing_seg/dress.jpg", + "image_type": "sketch" + }, + { + "image_url": "test/clothing_seg/skirt_559.jpg", + "image_type": "sketch" + }, + { + "image_url": "test/clothing_seg/10144613.jpg", + "image_type": "product" + } + ] + ) + start_time = time.time() + server = ClothingSeg(request_data) + pprint(server.get_result()) + print(time.time() - start_time) From 5a93673b52b15c4c49d8bf1649bb45771aa12265 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Mon, 14 Apr 2025 10:43:33 +0800 Subject: [PATCH 048/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20=E9=83=A8=E7=BD=B2api=5Frecommendation?= =?UTF-8?q?=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_route.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/app/api/api_route.py b/app/api/api_route.py index 47a4caf..b82c942 100644 --- a/app/api/api_route.py +++ b/app/api/api_route.py @@ -14,6 +14,7 @@ from app.api import api_pose_transform from app.api import api_prompt_generation from app.api import api_clothing_seg from app.api import api_super_resolution +from app.api import api_recommendation from app.api import api_test router = APIRouter() @@ -30,7 +31,7 @@ router.include_router(api_image2sketch.router, tags=['api_image2sketch'], prefix router.include_router(api_brighten.router, tags=['api_brighten'], prefix="/api") router.include_router(api_query_image.router, tags=['api_query_image'], prefix="/api") router.include_router(api_brand_dna.router, tags=['api_brand_dna'], prefix="/api") -# router.include_router(api_recommendation.router, tags=['api_recommendation'], prefix="/api") +router.include_router(api_recommendation.router, tags=['api_recommendation'], prefix="/api") router.include_router(api_mannequins_edit.router, tags=['api_mannequins_edit'], prefix="/api") router.include_router(api_agent_generate_image.router, tags=['api_agent_generate_image'], prefix="/api") router.include_router(api_pose_transform.router, tags=['api_pose_transform'], prefix="/api") From fed9d27bf5633cd00d5aa605e5fec4cd3bc5220d Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Mon, 14 Apr 2025 15:11:29 +0800 Subject: [PATCH 049/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20=E4=BC=98=E5=8C=96clothing=20seg=20fix?= =?UTF-8?q?=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/clothing_seg/service.py | 33 +++++++++++++++-------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/app/service/clothing_seg/service.py b/app/service/clothing_seg/service.py index a0f3640..7894bff 100644 --- a/app/service/clothing_seg/service.py +++ b/app/service/clothing_seg/service.py @@ -33,7 +33,6 @@ class ClothingSeg: for data in self.image_data: del data["image"] del data["clothing"] - return self.image_data @RunTime @@ -88,14 +87,16 @@ class ClothingSeg: inputs[0].set_data_from_numpy(input0_data) outputs = [ - grpcclient.InferRequestedOutput("OUTPUT0"), + # grpcclient.InferRequestedOutput("OUTPUT0"), grpcclient.InferRequestedOutput("OUTPUT1"), ] + response = self.triton_client.infer("seg_clothing", inputs, request_id=str(1), outputs=outputs) - output0_data = response.as_numpy("OUTPUT0") - cv2.imwrite("output02.png", output0_data * 100) + # output0_data = response.as_numpy("OUTPUT0") + # cv2.imwrite("output02.png", output0_data * 100) output1_data = response.as_numpy("OUTPUT1") for alpha in output1_data: + alpha = cv2.resize(alpha, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_CUBIC) x_min, y_min, x_max, y_max = get_bounding_box(alpha) cropped_mask = alpha[y_min:y_max + 1, x_min:x_max + 1] cropped_image = image[y_min:y_max + 1, x_min:x_max + 1] @@ -121,7 +122,7 @@ def get_bounding_box(mask): if len(rows) == 0 or len(cols) == 0: # 如果没有找到不为 0 的像素,返回全 0 的边界框 - return (0, 0, 0, 0) + return 0, 0, 0, 0 # 计算边界框的坐标 x_min = np.min(cols) @@ -129,21 +130,21 @@ def get_bounding_box(mask): x_max = np.max(cols) y_max = np.max(rows) - return (x_min, y_min, x_max, y_max) + return x_min, y_min, x_max, y_max if __name__ == "__main__": - request_data = ClothingSegModel( + test_data = ClothingSegModel( user_id=89, image_data=[ - { - "image_url": "test/clothing_seg/dress.jpg", - "image_type": "sketch" - }, - { - "image_url": "test/clothing_seg/skirt_559.jpg", - "image_type": "sketch" - }, + # { + # "image_url": "test/clothing_seg/dress.jpg", + # "image_type": "sketch" + # }, + # { + # "image_url": "test/clothing_seg/skirt_559.jpg", + # "image_type": "sketch" + # }, { "image_url": "test/clothing_seg/10144613.jpg", "image_type": "product" @@ -151,6 +152,6 @@ if __name__ == "__main__": ] ) start_time = time.time() - server = ClothingSeg(request_data) + server = ClothingSeg(test_data) pprint(server.get_result()) print(time.time() - start_time) From 59095a4d6b9e65d1f1b0636b1586295fd6c62c7b Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Wed, 16 Apr 2025 14:19:02 +0800 Subject: [PATCH 050/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4?= =?UTF-8?q?=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20te?= =?UTF-8?q?st(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 5a29a1f..b2eb511 100644 --- a/.gitignore +++ b/.gitignore @@ -141,4 +141,11 @@ app/logs/* *.db *.npy *.pytorch -*.jpg \ No newline at end of file +*.jpg +*.mp4 +*.sqlite3 +*.bin +*.pickle +*.csv +*.avi +*.json \ No newline at end of file From 88c9d6ef9318f7b8460b92594f705b9e27d23ca4 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Thu, 17 Apr 2025 11:23:40 +0800 Subject: [PATCH 051/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20design=20batch=20=E4=BB=A3=E7=A0=81=20?= =?UTF-8?q?=E6=9B=B4=E6=96=B0=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89?= =?UTF-8?q?:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:?= =?UTF-8?q?=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(?= =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../design_batch/design_batch_celery.py | 21 +++- app/service/design_batch/item.py | 27 +++- app/service/design_batch/pipeline/__init__.py | 2 + .../design_batch/pipeline/back_perspective.py | 79 ++++++++++++ app/service/design_batch/pipeline/color.py | 25 ++++ app/service/design_batch/pipeline/keypoint.py | 10 +- app/service/design_batch/pipeline/loading.py | 5 + .../design_batch/pipeline/print_painting.py | 95 ++++++++++++--- app/service/design_batch/pipeline/scale.py | 12 ++ .../design_batch/pipeline/segmentation.py | 35 ++++-- app/service/design_batch/pipeline/split.py | 115 ++++++++++++------ app/service/design_batch/utils/organize.py | 44 ++++++- 12 files changed, 399 insertions(+), 71 deletions(-) create mode 100644 app/service/design_batch/pipeline/back_perspective.py diff --git a/app/service/design_batch/design_batch_celery.py b/app/service/design_batch/design_batch_celery.py index 06ccc5e..f5cdc58 100644 --- a/app/service/design_batch/design_batch_celery.py +++ b/app/service/design_batch/design_batch_celery.py @@ -5,9 +5,9 @@ from celery import Celery from minio import Minio from app.core.config import * -from app.service.design_batch.item import BodyItem, TopItem, BottomItem +from app.service.design_batch.item import BodyItem, TopItem, BottomItem, AccessoriesItem from app.service.design_batch.utils.MQ import publish_status -from app.service.design_batch.utils.organize import organize_body, organize_clothing +from app.service.design_batch.utils.organize import organize_body, organize_clothing, organize_accessories from app.service.design_batch.utils.save_json import oss_upload_json from app.service.design_batch.utils.synthesis_item import update_base_size_priority, synthesis, synthesis_single @@ -19,6 +19,8 @@ logging.getLogger('pika').setLevel(logging.WARNING) logger = logging.getLogger() minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) +print("start") + def process_item(item, basic): # 处理project中单个item @@ -28,9 +30,14 @@ def process_item(item, basic): elif item['type'].lower() in ['blouse', 'outwear', 'dress', 'tops']: top_server = TopItem(data=item, basic=basic, minio_client=minio_client) item_data = top_server.process() - else: + elif item['type'].lower() in ['skirt', 'trousers', 'bottoms']: bottom_server = BottomItem(data=item, basic=basic, minio_client=minio_client) item_data = bottom_server.process() + elif item['type'].lower() in ['accessories']: + bottom_server = AccessoriesItem(data=item, basic=basic, minio_client=minio_client) + item_data = bottom_server.process() + else: + raise NotImplementedError(f"Item type {item['type']} not implemented") return item_data @@ -40,6 +47,10 @@ def process_layer(item, layers): body_layer = organize_body(item) layers.append(body_layer) return item['body_image'].size + elif item['name'] == 'accessories': + front_layer, back_layer = organize_accessories(item) + layers.append(front_layer) + layers.append(back_layer) else: front_layer, back_layer = organize_clothing(item) layers.append(front_layer) @@ -48,6 +59,9 @@ def process_layer(item, layers): @celery_app.task def batch_design(objects_data, tasks_id, json_name): + print(objects_data) + print(tasks_id) + print(json_name) object_response = [] threads = [] active_threads = 0 @@ -121,6 +135,7 @@ def batch_design(objects_data, tasks_id, json_name): for t in threads: t.join() logger.debug(object_response) + print(object_response) oss_upload_json(minio_client, object_response, json_name) publish_status(tasks_id, "ok", json_name) return object_response diff --git a/app/service/design_batch/item.py b/app/service/design_batch/item.py index cad1488..5ddfdc7 100644 --- a/app/service/design_batch/item.py +++ b/app/service/design_batch/item.py @@ -1,4 +1,4 @@ -from app.service.design_batch.pipeline import * +from app.service.design_fast.pipeline import LoadImage, KeyPoint, Segmentation, Color, PrintPainting, Scaling, Split, LoadBodyImage, ContourDetection class BaseItem: @@ -9,6 +9,27 @@ class BaseItem: self.result.update(basic) +class AccessoriesItem(BaseItem): + def __init__(self, data, basic, minio_client): + super().__init__(data, basic) + self.Accessories_pipeline = [ + LoadImage(minio_client), + # KeyPoint(), + ContourDetection(), + # Segmentation(minio_client), + # BackPerspective(minio_client), + Color(minio_client), + PrintPainting(minio_client), + Scaling(), + Split(minio_client) + ] + + def process(self): + for item in self.Accessories_pipeline: + self.result = item(self.result) + return self.result + + class TopItem(BaseItem): def __init__(self, data, basic, minio_client): super().__init__(data, basic) @@ -16,6 +37,7 @@ class TopItem(BaseItem): LoadImage(minio_client), KeyPoint(), Segmentation(minio_client), + # BackPerspective(minio_client), Color(minio_client), PrintPainting(minio_client), Scaling(), @@ -35,7 +57,8 @@ class BottomItem(BaseItem): LoadImage(minio_client), KeyPoint(), ContourDetection(), - # Segmentation(), + Segmentation(minio_client), + # BackPerspective(minio_client), Color(minio_client), PrintPainting(minio_client), Scaling(), diff --git a/app/service/design_batch/pipeline/__init__.py b/app/service/design_batch/pipeline/__init__.py index ec55933..f265bbe 100644 --- a/app/service/design_batch/pipeline/__init__.py +++ b/app/service/design_batch/pipeline/__init__.py @@ -1,3 +1,4 @@ +from .back_perspective import BackPerspective from .color import Color from .contour_detection import ContourDetection from .keypoint import KeyPoint @@ -13,6 +14,7 @@ __all__ = [ 'KeyPoint', 'ContourDetection', 'Segmentation', + 'BackPerspective', 'Color', 'PrintPainting', 'Scaling', diff --git a/app/service/design_batch/pipeline/back_perspective.py b/app/service/design_batch/pipeline/back_perspective.py new file mode 100644 index 0000000..5ddd37c --- /dev/null +++ b/app/service/design_batch/pipeline/back_perspective.py @@ -0,0 +1,79 @@ +import cv2 +import numpy as np + +from app.service.design_fast.utils.design_ensemble import get_seg_result +from app.service.utils.new_oss_client import oss_upload_image + + +class BackPerspective: + def __init__(self, minio_client): + self.minio_client = minio_client + + def __call__(self, result): + + # 如果sketch为系统图 查看是否有对应的 背后视角图 + if result['path'].split('/')[0] == 'aida-sys-image': + file_path = result['path'].replace("images", 'images_back', 1) + if self.is_file_exists(bucket_name='aida-sys-image', file_name=file_path[file_path.find('/') + 1:]): + result['back_perspective_url'] = file_path + return result + else: + seg_result = get_seg_result("1", result['image'])[0] + elif result['name'] in ['blouse', 'outwear', 'dress', 'tops']: + seg_result = result['seg_result'] + else: + seg_result = get_seg_result("1", result['image'])[0] + + m = self.thicken_contours_and_display(seg_result, thickness=10, color=(0, 0, 0)) + back_sketch = result['image'].copy() + back_sketch[m > 100] = 255 + # 上传背后视角图 + _, img_encoded = cv2.imencode(".jpg", back_sketch) + + resp = oss_upload_image(self.minio_client, bucket='test', object_name=result['path'], image_bytes=img_encoded.tobytes()) + result['back_perspective_url'] = f"{resp.bucket_name}/{resp.object_name}" + return result + + def thicken_contours_and_display(self, mask, thickness=10, color=(0, 0, 0)): + mask = mask.astype(np.uint8) * 255 + # 查找轮廓 + contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + + # 创建一个彩色副本用于绘制轮廓 + mask_color = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) + + def thicken_contour_inward(contour, thick): + # 创建一个空白的黑色图像与原始掩码大小相同 + blank = np.zeros_like(mask) + # 在空白图像上绘制白色的轮廓 + cv2.drawContours(blank, [contour], -1, 255, thickness=thick) + # 找到轮廓的中心(可以用重心等方法近似) + M = cv2.moments(contour) + cx = int(M['m10'] / M['m00']) + cy = int(M['m01'] / M['m00']) + # 进行距离变换,离中心越近的值越小 + dist_transform = cv2.distanceTransform(255 - blank, cv2.DIST_L2, 5) + # 根据距离变换的值来决定是否保留像素,离中心近的像素更容易被保留 + result = np.zeros_like(mask) + for i in range(dist_transform.shape[0]): + for j in range(dist_transform.shape[1]): + if dist_transform[i, j] < thick: + result[i, j] = 255 + return result + + for contour in contours: + thickened_contour = thicken_contour_inward(contour, thickness) + mask_color[thickened_contour > 0] = color + + _, binary_result = cv2.threshold(mask_color, 127, 255, cv2.THRESH_BINARY) + + # 转换为掩码形式 + mask_result = cv2.cvtColor(binary_result, cv2.COLOR_BGR2GRAY) + return mask_result + + def is_file_exists(self, bucket_name, file_name): + try: + self.minio_client.stat_object(bucket_name, file_name) + return True + except Exception: + return False diff --git a/app/service/design_batch/pipeline/color.py b/app/service/design_batch/pipeline/color.py index 546c671..d6c84e4 100644 --- a/app/service/design_batch/pipeline/color.py +++ b/app/service/design_batch/pipeline/color.py @@ -14,14 +14,39 @@ class Color: def __call__(self, result): dim_image_h, dim_image_w = result['image'].shape[0:2] + # 渐变色 if "gradient" in result.keys() and result['gradient'] != "": bucket_name = result['gradient'].split('/')[0] object_name = result['gradient'][result['gradient'].find('/') + 1:] pattern = self.get_gradient(bucket_name=bucket_name, object_name=object_name) resize_pattern = cv2.resize(pattern, (dim_image_w, dim_image_h), interpolation=cv2.INTER_AREA) + # 无色 + elif "color" not in result.keys() or result['color'] == "": + result['final_image'] = result['pattern_image'] = result['single_image'] = result['image'] + result['alpha'] = 100 / 255.0 + return result + # 正常颜色 else: pattern = self.get_pattern(result['color']) resize_pattern = cv2.resize(pattern, (dim_image_w, dim_image_h), interpolation=cv2.INTER_AREA) + + if "partial_color" in result.keys() and result['partial_color'] != "": + bucket_name = result['partial_color'].split('/')[0] + object_name = result['partial_color'][result['partial_color'].find('/') + 1:] + partial_color = oss_get_image(oss_client=self.minio_client, bucket=bucket_name, object_name=object_name, data_type="cv2") + h, w = partial_color.shape[0:2] + resize_pattern = cv2.resize(resize_pattern, (w, h), interpolation=cv2.INTER_AREA) + # 分离出 png 图的 alpha 通道 + alpha_channel = partial_color[:, :, 3] + # 提取 png 图的 RGB 通道 + png_rgb = partial_color[:, :, :3] + # 创建一个与 cv 图大小相同的掩码,用于指示哪些像素需要替换 + mask = alpha_channel > 0 + # 将掩码扩展为 3 通道,以便与 cv 图进行逐元素操作 + mask_3ch = np.stack([mask] * 3, axis=-1) + # 根据掩码将 png 图的颜色覆盖到 cv 图上 + resize_pattern[mask_3ch] = png_rgb[mask_3ch] + resize_pattern = cv2.resize(resize_pattern, (dim_image_w, dim_image_h), interpolation=cv2.INTER_AREA) closed_mo = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2) gray_mo = np.expand_dims(result['gray'], axis=2).repeat(3, axis=2) get_image_fir = resize_pattern * (closed_mo / 255) * (gray_mo / 255) diff --git a/app/service/design_batch/pipeline/keypoint.py b/app/service/design_batch/pipeline/keypoint.py index 313a613..73d7586 100644 --- a/app/service/design_batch/pipeline/keypoint.py +++ b/app/service/design_batch/pipeline/keypoint.py @@ -4,7 +4,8 @@ import numpy as np from pymilvus import MilvusClient from app.core.config import * -from app.service.design_batch.utils.design_ensemble import get_keypoint_result +from app.service.design_fast.utils.design_ensemble import get_keypoint_result +from app.service.utils.decorator import ClassCallRunTime, RunTime logger = logging.getLogger(__name__) @@ -16,14 +17,15 @@ class KeyPoint: def get_name(cls): return cls.name + @ClassCallRunTime def __call__(self, result): if result['name'] in ['blouse', 'skirt', 'dress', 'outwear', 'trousers', 'tops', 'bottoms']: # 查询是否有数据 且类别相同 相同则直接读 不同则推理后更新 # result['clothes_keypoint'] = self.infer_keypoint_result(result) site = 'up' if result['name'] in ['blouse', 'outwear', 'dress', 'tops'] else 'down' # keypoint_cache = search_keypoint_cache(result["image_id"], site) - keypoint_cache = self.keypoint_cache(result, site) + # keypoint_cache = self.keypoint_cache(result, site) + keypoint_cache = False # 取消向量查询 直接过模型推理 - # keypoint_cache = False if keypoint_cache is False: keypoint_infer_result, site = self.infer_keypoint_result(result) result['clothes_keypoint'] = self.save_keypoint_cache(result["image_id"], keypoint_infer_result, site) @@ -87,7 +89,7 @@ class KeyPoint: logger.info(f"save keypoint cache milvus error : {e}") return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, result.reshape(12, 2).astype(int).tolist())) - # @ RunTime + @RunTime def keypoint_cache(self, result, site): try: client = MilvusClient(uri=MILVUS_URL, token=MILVUS_TOKEN, db_name=MILVUS_ALIAS) diff --git a/app/service/design_batch/pipeline/loading.py b/app/service/design_batch/pipeline/loading.py index 8f02378..5a55d9d 100644 --- a/app/service/design_batch/pipeline/loading.py +++ b/app/service/design_batch/pipeline/loading.py @@ -1,6 +1,9 @@ +import io import logging import cv2 +import numpy as np +from PIL import Image from app.service.utils.new_oss_client import oss_get_image @@ -71,6 +74,8 @@ class LoadImage: keypoint = 'head_point' elif name == 'earring': keypoint = 'ear_point' + elif name == 'accessories': + keypoint = "accessories" else: raise KeyError(f"{name} does not belong to item category list: blouse, outwear, dress, trousers, skirt, " f"bag, shoes, hairstyle, earring.") diff --git a/app/service/design_batch/pipeline/print_painting.py b/app/service/design_batch/pipeline/print_painting.py index 6fe40d8..1534f9c 100644 --- a/app/service/design_batch/pipeline/print_painting.py +++ b/app/service/design_batch/pipeline/print_painting.py @@ -15,8 +15,25 @@ class PrintPainting: single_print = result['print']['single'] overall_print = result['print']['overall'] element_print = result['print']['element'] + partial_path = result['print']['partial'] if 'partial' in result['print'] else None result['single_image'] = None result['print_image'] = None + # TODO 给result['pattern_image'] resize 到resize_scale的大小 + # TODO 给result['mask'] resize 到resize_scale的大小 + + if result['resize_scale'][0] == 1.0 and result['resize_scale'][1] == 1.0: + pass + else: + height, width = result['pattern_image'].shape[:2] + new_width = int(width * result['resize_scale'][0]) + new_height = int(height * result['resize_scale'][1]) + + result['pattern_image'] = cv2.resize(result['pattern_image'], (new_width, new_height)) + result['final_image'] = cv2.resize(result['final_image'], (new_width, new_height)) + result['mask'] = cv2.resize(result['mask'], (new_width, new_height)) + result['gray'] = cv2.resize(result['gray'], (new_width, new_height)) + + print(1) if overall_print['print_path_list']: painting_dict = {'dim_image_h': result['pattern_image'].shape[0], 'dim_image_w': result['pattern_image'].shape[1]} result['print_image'] = result['pattern_image'] @@ -39,7 +56,7 @@ class PrintPainting: for i in range(len(single_print['print_path_list'])): image, image_mode = self.read_image(single_print['print_path_list'][i]) if image_mode == "RGBA": - new_size = (int(image.width * single_print['print_scale_list'][i]), int(image.height * single_print['print_scale_list'][i])) + new_size = (int(result['pattern_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['pattern_image'].shape[0] * single_print['print_scale_list'][i][1])) mask = image.split()[3] resized_source = image.resize(new_size) @@ -62,9 +79,12 @@ class PrintPainting: mask = np.expand_dims(mask, axis=2) mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) mask = cv2.bitwise_not(mask) + + mask = cv2.resize(mask, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1]))) + image = cv2.resize(image, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1]))) # 旋转后的坐标需要重新算 - rotate_mask, _ = self.img_rotate(mask, single_print['print_angle_list'][i], single_print['print_scale_list'][i]) - rotate_image, rotated_new_size = self.img_rotate(image, single_print['print_angle_list'][i], single_print['print_scale_list'][i]) + rotate_mask, _ = self.img_rotate(mask, single_print['print_angle_list'][i]) + rotate_image, rotated_new_size = self.img_rotate(image, single_print['print_angle_list'][i]) # x, y = int(result['print']['location'][i][0] - rotated_new_size[0] - (rotate_mask.shape[0] - image.shape[0]) / 2), int(result['print']['location'][i][1] - rotated_new_size[1] - (rotate_mask.shape[1] - image.shape[1]) / 2) x, y = int(single_print['location'][i][0] - rotated_new_size[0]), int(single_print['location'][i][1] - rotated_new_size[1]) @@ -143,7 +163,7 @@ class PrintPainting: for i in range(len(element_print['element_path_list'])): image, image_mode = self.read_image(element_print['element_path_list'][i]) if image_mode == "RGBA": - new_size = (int(image.width * element_print['element_scale_list'][i]), int(image.height * element_print['element_scale_list'][i])) + new_size = (int(result['final_image'].shape[1] * element_print['element_scale_list'][i][0]), int(result['final_image'].shape[0] * element_print['element_scale_list'][i][1])) mask = image.split()[3] resized_source = image.resize(new_size) @@ -165,9 +185,11 @@ class PrintPainting: mask = np.expand_dims(mask, axis=2) mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) mask = cv2.bitwise_not(mask) + mask = cv2.resize(mask, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1]))) + image = cv2.resize(image, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1]))) # 旋转后的坐标需要重新算 - rotate_mask, _ = self.img_rotate(mask, element_print['element_angle_list'][i], element_print['element_scale_list'][i]) - rotate_image, rotated_new_size = self.img_rotate(image, element_print['element_angle_list'][i], element_print['element_scale_list'][i]) + rotate_mask, _ = self.img_rotate(mask, element_print['element_angle_list'][i]) + rotate_image, rotated_new_size = self.img_rotate(image, element_print['element_angle_list'][i]) # x, y = int(result['print']['location'][i][0] - rotated_new_size[0] - (rotate_mask.shape[0] - image.shape[0]) / 2), int(result['print']['location'][i][1] - rotated_new_size[1] - (rotate_mask.shape[1] - image.shape[1]) / 2) x, y = int(element_print['location'][i][0] - rotated_new_size[0]), int(element_print['location'][i][1] - rotated_new_size[1]) @@ -241,6 +263,45 @@ class PrintPainting: temp_fg = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2) tmp2 = (result['final_image'] * (temp_fg / 255)).astype(np.uint8) result['single_image'] = cv2.add(tmp1, tmp2) + + if partial_path: + print_background = np.zeros((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), dtype=np.uint8) + mask_background = np.zeros((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), dtype=np.uint8) + image, image_mode = self.read_image(partial_path) + if image_mode == "RGBA": + new_size = (result['pattern_image'].shape[1], result['pattern_image'].shape[0]) + + mask = image.split()[3] + resized_source = image.resize(new_size) + resized_source_mask = mask.resize(new_size) + + # rotated_resized_source = resized_source.rotate(-partial_print['print_angle_list'][i]) + # rotated_resized_source_mask = resized_source_mask.rotate(-partial_print['print_angle_list'][i]) + + source_image_pil = Image.fromarray(cv2.cvtColor(print_background, cv2.COLOR_BGR2RGB)) + source_image_pil_mask = Image.fromarray(cv2.cvtColor(mask_background, cv2.COLOR_BGR2RGB)) + + source_image_pil.paste(resized_source, (0, 0), resized_source) + source_image_pil_mask.paste(resized_source_mask, (0, 0), resized_source_mask) + + print_background = cv2.cvtColor(np.array(source_image_pil), cv2.COLOR_RGBA2BGR) + mask_background = cv2.cvtColor(np.array(source_image_pil_mask), cv2.COLOR_RGBA2BGR) + ret, mask_background = cv2.threshold(mask_background, 124, 255, cv2.THRESH_BINARY) + print_mask = cv2.bitwise_and(result['mask'], cv2.cvtColor(mask_background, cv2.COLOR_BGR2GRAY)) + img_fg = cv2.bitwise_or(print_background, print_background, mask=print_mask) + # TODO element 丢失信息 + three_channel_image = cv2.merge([cv2.bitwise_not(print_mask), cv2.bitwise_not(print_mask), cv2.bitwise_not(print_mask)]) + img_bg = cv2.bitwise_and(result['final_image'], three_channel_image) + # mask_mo = np.expand_dims(print_mask, axis=2).repeat(3, axis=2) + # gray_mo = np.expand_dims(result['gray'], axis=2).repeat(3, axis=2) + # img_fg = (img_fg * (mask_mo / 255) * (gray_mo / 255)).astype(np.uint8) + result['final_image'] = cv2.add(img_bg, img_fg) + canvas = np.full_like(result['final_image'], 255) + temp_bg = np.expand_dims(cv2.bitwise_not(result['mask']), axis=2).repeat(3, axis=2) + tmp1 = (canvas * (temp_bg / 255)).astype(np.uint8) + temp_fg = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2) + tmp2 = (result['final_image'] * (temp_fg / 255)).astype(np.uint8) + result['single_image'] = cv2.add(tmp1, tmp2) return result @staticmethod @@ -360,10 +421,10 @@ class PrintPainting: return print_image def get_print(self, print_dict): - if 'print_scale_list' not in print_dict.keys() or print_dict['print_scale_list'][0] < 0.3: + if 'print_scale_list' not in print_dict.keys() or print_dict['print_scale_list'][0][0] < 0.3: print_dict['scale'] = 0.3 else: - print_dict['scale'] = print_dict['print_scale_list'][0] + print_dict['scale'] = print_dict['print_scale_list'][0][0] bucket_name = print_dict['print_path_list'][0].split("/", 1)[0] object_name = print_dict['print_path_list'][0].split("/", 1)[1] @@ -386,8 +447,9 @@ class PrintPainting: # y_offset = random.randint(0, image.shape[1] - image_size_w) # 1.拿到偏移量后和resize后的print宽高取余 得到真正偏移量 - x_offset = print_w - int(location[0][1] % print_w) - y_offset = print_w - int(location[0][0] % print_h) + # 偏移量增加2分之print.w 使坐标位于图中间 如果要位于左上角删除+ print_w // 2 即可 + x_offset = print_w - int(location[0][1] % print_w) + print_w // 2 + y_offset = print_h - int(location[0][0] % print_h) + print_h // 2 # y_offset = int(location[0][0]) # x_offset = int(location[0][1]) @@ -409,7 +471,7 @@ class PrintPainting: return high, low @staticmethod - def img_rotate(image, angel, scale): + def img_rotate(image, angel): """顺时针旋转图像任意角度 Args: @@ -424,7 +486,7 @@ class PrintPainting: center = (w // 2, h // 2) # if type(angel) is not int: # angel = 0 - M = cv2.getRotationMatrix2D(center, -angel, scale) + M = cv2.getRotationMatrix2D(center, -angel, 1) # 调整旋转后的图像长宽 rotated_h = int((w * np.abs(M[0, 1]) + (h * np.abs(M[0, 0])))) rotated_w = int((h * np.abs(M[0, 1]) + (w * np.abs(M[0, 0])))) @@ -433,7 +495,7 @@ class PrintPainting: # 旋转图像 rotated_img = cv2.warpAffine(image, M, (rotated_w, rotated_h)) - return rotated_img, ((rotated_img.shape[1] - image.shape[1] * scale) // 2, (rotated_img.shape[0] - image.shape[0] * scale) // 2) + return rotated_img, ((rotated_img.shape[1] - image.shape[1]) // 2, (rotated_img.shape[0] - image.shape[0]) // 2) # return rotated_img, (0, 0) @staticmethod @@ -442,8 +504,11 @@ class PrintPainting: angle: 旋转的角度 crop: 是否需要进行裁剪,布尔向量 """ + if not isinstance(crop, bool): + raise ValueError("The 'crop' parameter must be a boolean.") + crop_image = lambda img, x0, y0, w, h: img[y0:y0 + h, x0:x0 + w] - w, h = img.shape[:2] + h, w = img.shape[:2] # 旋转角度的周期是360° angle %= 360 # 计算仿射变换矩阵 @@ -455,7 +520,7 @@ class PrintPainting: if crop: # 裁剪角度的等效周期是180° angle_crop = angle % 180 - if angle > 90: + if angle_crop > 90: angle_crop = 180 - angle_crop # 转化角度为弧度 theta = angle_crop * np.pi / 180 diff --git a/app/service/design_batch/pipeline/scale.py b/app/service/design_batch/pipeline/scale.py index 1908a9c..d1c7a36 100644 --- a/app/service/design_batch/pipeline/scale.py +++ b/app/service/design_batch/pipeline/scale.py @@ -46,4 +46,16 @@ class Scaling: result['scale'] = result['scale_bag'] elif result['keypoint'] == 'ear_point': result['scale'] = result['scale_earrings'] + elif result['keypoint'] == 'accessories': + # 由于没有识别配饰keypoint的模型 所以统一将配饰的两个关键点设定为 (0,0) (0,img.width) + # 模特的关键点设定为(0,0) (0,320/2) 距离比例简写为 160 / img.width + distance_clo = result['img_shape'][1] + distance_bdy = 320 / 2 + + if distance_clo == 0: + result['scale'] = 1 + else: + result['scale'] = distance_bdy / distance_clo + else: + result['scale'] = 1 return result diff --git a/app/service/design_batch/pipeline/segmentation.py b/app/service/design_batch/pipeline/segmentation.py index aa05c0d..0c9c51e 100644 --- a/app/service/design_batch/pipeline/segmentation.py +++ b/app/service/design_batch/pipeline/segmentation.py @@ -5,7 +5,8 @@ import cv2 import numpy as np from app.core.config import SEG_CACHE_PATH -from app.service.design_batch.utils.design_ensemble import get_seg_result +from app.service.design_fast.utils.design_ensemble import get_seg_result +from app.service.utils.decorator import ClassCallRunTime from app.service.utils.new_oss_client import oss_get_image logger = logging.getLogger() @@ -15,6 +16,7 @@ class Segmentation: def __init__(self, minio_client): self.minio_client = minio_client + @ClassCallRunTime def __call__(self, result): if "seg_mask_url" in result.keys() and result['seg_mask_url'] != "": seg_mask = oss_get_image(oss_client=self.minio_client, bucket=result['seg_mask_url'].split('/')[0], object_name=result['seg_mask_url'][result['seg_mask_url'].find('/') + 1:], data_type="cv2") @@ -31,24 +33,37 @@ class Segmentation: result['back_mask'] = np.array(green_mask, dtype=np.uint8) * 255 result['mask'] = result['front_mask'] + result['back_mask'] else: - # 本地查询seg 缓存是否存在 - _, seg_result = self.load_seg_result(result["image_id"]) - result['seg_result'] = seg_result - if not _: + # preview 过模型 不缓存 + if "preview_submit" in result.keys() and result['preview_submit'] == "preview": # 推理获得seg 结果 - seg_result = get_seg_result(result["image_id"], result['image'])[0] + seg_result = get_seg_result(result["image_id"], result['image']) + # submit 过模型 缓存 + elif "preview_submit" in result.keys() and result['preview_submit'] == "submit": + # 推理获得seg 结果 + seg_result = get_seg_result(result["image_id"], result['image']) self.save_seg_result(seg_result, result['image_id']) + # null 正常流程 加载本地缓存 无缓存则过模型 + else: + # 本地查询seg 缓存是否存在 + _, seg_result = self.load_seg_result(result["image_id"]) + # 判断缓存和实际图片size是否相同 + if not _ or result["image"].shape[:2] != seg_result.shape: + # 推理获得seg 结果 + seg_result = get_seg_result(result["image_id"], result['image']) + self.save_seg_result(seg_result, result['image_id']) + result['seg_result'] = seg_result + # 处理前片后片 - temp_front = seg_result == 1.0 + temp_front = seg_result == 1 result['front_mask'] = (255 * (temp_front + 0).astype(np.uint8)) - temp_back = seg_result == 2.0 + temp_back = seg_result == 2 result['back_mask'] = (255 * (temp_back + 0).astype(np.uint8)) result['mask'] = result['front_mask'] + result['back_mask'] return result @staticmethod def save_seg_result(seg_result, image_id): - file_path = f"seg_cache/{image_id}.npy" + file_path = f"{SEG_CACHE_PATH}{image_id}.npy" try: np.save(file_path, seg_result) logger.debug(f"保存成功 :{os.path.abspath(file_path)}") @@ -57,7 +72,7 @@ class Segmentation: @staticmethod def load_seg_result(image_id): - file_path = f"seg_cache/{image_id}.npy" + file_path = f"{SEG_CACHE_PATH}{image_id}.npy" # logger.info(f"load seg file name is :{SEG_CACHE_PATH}{image_id}.npy") try: seg_result = np.load(file_path) diff --git a/app/service/design_batch/pipeline/split.py b/app/service/design_batch/pipeline/split.py index 5dbcef5..88e8e75 100644 --- a/app/service/design_batch/pipeline/split.py +++ b/app/service/design_batch/pipeline/split.py @@ -7,10 +7,11 @@ from PIL import Image from cv2 import cvtColor, COLOR_BGR2RGBA from app.core.config import AIDA_CLOTHING -from app.service.design_batch.utils.conversion_image import rgb_to_rgba -from app.service.design_batch.utils.upload_image import upload_png_mask +from app.service.design_fast.utils.conversion_image import rgb_to_rgba +from app.service.design_fast.utils.transparent import sketch_to_transparent +from app.service.design_fast.utils.upload_image import upload_png_mask from app.service.utils.generate_uuid import generate_uuid -from app.service.utils.new_oss_client import oss_upload_image +from app.service.utils.new_oss_client import oss_upload_image, oss_get_image class Split(object): @@ -20,51 +21,95 @@ class Split(object): def __call__(self, result): try: - if result['name'] in ('outwear', 'dress', 'blouse', 'skirt', 'trousers', 'tops', 'bottoms'): - front_mask = result['front_mask'] - back_mask = result['back_mask'] + if result['name'] in ('outwear', 'dress', 'blouse', 'skirt', 'trousers', 'tops', 'bottoms', 'accessories'): + + if result['resize_scale'][0] == 1.0 and result['resize_scale'][1] == 1.0: + front_mask = result['front_mask'] + back_mask = result['back_mask'] + else: + height, width = result['front_mask'].shape[:2] + new_width = int(width * result['resize_scale'][0]) + new_height = int(height * result['resize_scale'][1]) + + front_mask = cv2.resize(result['front_mask'], (new_width, new_height)) + back_mask = cv2.resize(result['back_mask'], (new_width, new_height)) + rgba_image = rgb_to_rgba(result['final_image'], front_mask + back_mask) - new_size = (int(rgba_image.shape[1] * result["scale"] * result["resize_scale"][0]), int(rgba_image.shape[0] * result["scale"] * result["resize_scale"][1])) + new_size = (int(rgba_image.shape[1] * result["scale"]), int(rgba_image.shape[0] * result["scale"])) rgba_image = cv2.resize(rgba_image, new_size) result_front_image = np.zeros_like(rgba_image) front_mask = cv2.resize(front_mask, new_size) result_front_image[front_mask != 0] = rgba_image[front_mask != 0] result_front_image_pil = Image.fromarray(cvtColor(result_front_image, COLOR_BGR2RGBA)) + if 'transparent' in result.keys(): + # 用户自选区域transparent + transparent = result['transparent'] + if transparent['mask_url'] is not None and transparent['mask_url'] != "": + # 预处理用户自选区mask + seg_mask = oss_get_image(oss_client=self.minio_client, bucket=transparent['mask_url'].split('/')[0], object_name=transparent['mask_url'][transparent['mask_url'].find('/') + 1:], data_type="cv2") + seg_mask = cv2.resize(seg_mask, new_size, interpolation=cv2.INTER_NEAREST) + # 转换颜色空间为 RGB(OpenCV 默认是 BGR) + image_rgb = cv2.cvtColor(seg_mask, cv2.COLOR_BGR2RGB) + + r, g, b = cv2.split(image_rgb) + blue_mask = b > r + + # 创建红色和绿色掩码 + transparent_mask = np.array(blue_mask, dtype=np.uint8) * 255 + result_front_image_pil = sketch_to_transparent(result_front_image_pil, transparent_mask, transparent["scale"]) + else: + result_front_image_pil = sketch_to_transparent(result_front_image_pil, front_mask, transparent["scale"]) result['front_image'], result["front_image_url"], _ = upload_png_mask(self.minio_client, result_front_image_pil, f'{generate_uuid()}', mask=None) height, width = front_mask.shape mask_image = np.zeros((height, width, 3)) mask_image[front_mask != 0] = [0, 0, 255] - if result["name"] in ('blouse', 'dress', 'outwear', 'tops'): - result_back_image = np.zeros_like(rgba_image) - back_mask = cv2.resize(back_mask, new_size) - result_back_image[back_mask != 0] = rgba_image[back_mask != 0] - result_back_image_pil = Image.fromarray(cvtColor(result_back_image, COLOR_BGR2RGBA)) - result['back_image'], result["back_image_url"], _ = upload_png_mask(self.minio_client, result_back_image_pil, f'{generate_uuid()}', mask=None) - mask_image[back_mask != 0] = [0, 255, 0] + # if result["name"] in ('blouse', 'dress', 'outwear', 'tops'): + # result_back_image = np.zeros_like(rgba_image) + # back_mask = cv2.resize(back_mask, new_size) + # result_back_image[back_mask != 0] = rgba_image[back_mask != 0] + # result_back_image_pil = Image.fromarray(cvtColor(result_back_image, COLOR_BGR2RGBA)) + # result['back_image'], result["back_image_url"], _ = upload_png_mask(self.minio_client, result_back_image_pil, f'{generate_uuid()}', mask=None) + # mask_image[back_mask != 0] = [0, 255, 0] + # + # rbga_mask = rgb_to_rgba(mask_image, front_mask + back_mask) + # mask_pil = Image.fromarray(cvtColor(rbga_mask.astype(np.uint8), COLOR_BGR2RGBA)) + # image_data = io.BytesIO() + # mask_pil.save(image_data, format='PNG') + # image_data.seek(0) + # image_bytes = image_data.read() + # req = oss_upload_image(oss_client=self.minio_client, bucket=AIDA_CLOTHING, object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) + # result['mask_url'] = req.bucket_name + "/" + req.object_name + # else: + # rbga_mask = rgb_to_rgba(mask_image, front_mask) + # mask_pil = Image.fromarray(cvtColor(rbga_mask.astype(np.uint8), COLOR_BGR2RGBA)) + # image_data = io.BytesIO() + # mask_pil.save(image_data, format='PNG') + # image_data.seek(0) + # image_bytes = image_data.read() + # req = oss_upload_image(oss_client=self.minio_client, bucket=AIDA_CLOTHING, object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) + # result['mask_url'] = req.bucket_name + "/" + req.object_name + # result['back_image'] = None + # result["back_image_url"] = None + # # result["back_mask_url"] = None + # # result['back_mask_image'] = None - rbga_mask = rgb_to_rgba(mask_image, front_mask + back_mask) - mask_pil = Image.fromarray(cvtColor(rbga_mask.astype(np.uint8), COLOR_BGR2RGBA)) - image_data = io.BytesIO() - mask_pil.save(image_data, format='PNG') - image_data.seek(0) - image_bytes = image_data.read() - req = oss_upload_image(oss_client=self.minio_client, bucket=AIDA_CLOTHING, object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) - result['mask_url'] = req.bucket_name + "/" + req.object_name - else: - rbga_mask = rgb_to_rgba(mask_image, front_mask) - mask_pil = Image.fromarray(cvtColor(rbga_mask.astype(np.uint8), COLOR_BGR2RGBA)) - image_data = io.BytesIO() - mask_pil.save(image_data, format='PNG') - image_data.seek(0) - image_bytes = image_data.read() - req = oss_upload_image(oss_client=self.minio_client, bucket=AIDA_CLOTHING, object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) - result['mask_url'] = req.bucket_name + "/" + req.object_name - result['back_image'] = None - result["back_image_url"] = None - # result["back_mask_url"] = None - # result['back_mask_image'] = None + result_back_image = np.zeros_like(rgba_image) + back_mask = cv2.resize(back_mask, new_size) + result_back_image[back_mask != 0] = rgba_image[back_mask != 0] + result_back_image_pil = Image.fromarray(cvtColor(result_back_image, COLOR_BGR2RGBA)) + result['back_image'], result["back_image_url"], _ = upload_png_mask(self.minio_client, result_back_image_pil, f'{generate_uuid()}', mask=None) + mask_image[back_mask != 0] = [0, 255, 0] + + rbga_mask = rgb_to_rgba(mask_image, front_mask + back_mask) + mask_pil = Image.fromarray(cvtColor(rbga_mask.astype(np.uint8), COLOR_BGR2RGBA)) + image_data = io.BytesIO() + mask_pil.save(image_data, format='PNG') + image_data.seek(0) + image_bytes = image_data.read() + req = oss_upload_image(oss_client=self.minio_client, bucket=AIDA_CLOTHING, object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) + result['mask_url'] = req.bucket_name + "/" + req.object_name # 创建中间图层 result_pattern_image_rgba = rgb_to_rgba(result['pattern_image'], result['mask']) result_pattern_image_pil = Image.fromarray(cvtColor(result_pattern_image_rgba, COLOR_BGR2RGBA)) diff --git a/app/service/design_batch/utils/organize.py b/app/service/design_batch/utils/organize.py index 8190de0..33edc4f 100644 --- a/app/service/design_batch/utils/organize.py +++ b/app/service/design_batch/utils/organize.py @@ -33,8 +33,8 @@ def organize_clothing(layer): mask=cv2.resize(layer['mask'], layer["front_image"].size), gradient_string=layer['gradient_string'] if 'gradient_string' in layer.keys() else "", pattern_image_url=layer['pattern_image_url'], - pattern_image=layer['pattern_image'] - + pattern_image=layer['pattern_image'], + # back_perspective_url=layer['back_perspective_url'] if 'back_perspective_url' in layer.keys() else "" ) # 后片数据 back_layer = dict(priority=-layer.get("priority", 0) if layer.get("layer_order", False) else PRIORITY_DICT.get(f'{layer["name"].lower()}_back', None), @@ -50,6 +50,46 @@ def organize_clothing(layer): mask=cv2.resize(layer['mask'], layer["front_image"].size), gradient_string=layer['gradient_string'] if 'gradient_string' in layer.keys() else "", pattern_image_url=layer['pattern_image_url'], + # back_perspective_url=layer['back_perspective_url'] if 'back_perspective_url' in layer.keys() else "" + ) + return front_layer, back_layer + + +def organize_accessories(layer): + # 起始坐标 + start_point = (0, 0) + # 前片数据 + front_layer = dict(priority=layer['priority'] if layer.get("layer_order", False) else PRIORITY_DICT.get(f'{layer["name"].lower()}_front', None), + name=f'{layer["name"].lower()}_front', + image=layer["front_image"], + # mask_image=layer['front_mask_image'], + image_url=layer['front_image_url'], + mask_url=layer['mask_url'], + sacle=layer['scale'], + clothes_keypoint=(0, 0), + position=start_point, + resize_scale=layer["resize_scale"], + mask=cv2.resize(layer['mask'], layer["front_image"].size), + gradient_string=layer['gradient_string'] if 'gradient_string' in layer.keys() else "", + pattern_image_url=layer['pattern_image_url'], + pattern_image=layer['pattern_image'], + # back_perspective_url=layer['back_perspective_url'] if 'back_perspective_url' in layer.keys() else "" + ) + # 后片数据 + back_layer = dict(priority=-layer.get("priority", 0) if layer.get("layer_order", False) else PRIORITY_DICT.get(f'{layer["name"].lower()}_back', None), + name=f'{layer["name"].lower()}_back', + image=layer["back_image"], + # mask_image=layer['back_mask_image'], + image_url=layer['back_image_url'], + mask_url=layer['mask_url'], + sacle=layer['scale'], + clothes_keypoint=(0, 0), + position=start_point, + resize_scale=layer["resize_scale"], + mask=cv2.resize(layer['mask'], layer["front_image"].size), + gradient_string=layer['gradient_string'] if 'gradient_string' in layer.keys() else "", + pattern_image_url=layer['pattern_image_url'], + # back_perspective_url=layer['back_perspective_url'] if 'back_perspective_url' in layer.keys() else "" ) return front_layer, back_layer From 4e55275e6e592f242616c1d62b76701bca2e473b Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Mon, 21 Apr 2025 10:04:40 +0800 Subject: [PATCH 052/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20generate=20product=20relight=20pose=5F?= =?UTF-8?q?transform=20=E5=BC=80=E5=8F=91=EF=BC=8C=E8=AE=BE=E7=BD=AEbatch?= =?UTF-8?q?=20generate=20=E7=9A=84=E4=BC=98=E5=85=88=E7=BA=A7=E4=B8=BA100?= =?UTF-8?q?=20=EF=BC=8Csingle=20generate=20=E7=9A=84=E4=BC=98=E5=85=88?= =?UTF-8?q?=E7=BA=A7=E4=B8=BA1=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4?= =?UTF-8?q?=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20te?= =?UTF-8?q?st(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_generate_image.py | 22 +- app/core/config.py | 3 + app/schemas/generate_image.py | 23 +++ app/schemas/pose_transform.py | 7 + app/service/generate_batch_image/service.py | 24 +++ .../service_batch_generate_product_image.py | 191 ++++++++++++++++++ .../service_batch_generate_relight_image.py | 162 +++++++++++++++ .../service_batch_pose_transform.py | 176 ++++++++++++++++ .../generate_image/service_generate_image.py | 4 +- .../service_generate_product_image.py | 4 +- .../service_generate_relight_image.py | 4 +- app/service/utils/redis_utils.py | 99 +++++++++ 12 files changed, 712 insertions(+), 7 deletions(-) create mode 100644 app/service/generate_batch_image/service.py create mode 100644 app/service/generate_batch_image/service_batch_generate_product_image.py create mode 100644 app/service/generate_batch_image/service_batch_generate_relight_image.py create mode 100644 app/service/generate_batch_image/service_batch_pose_transform.py create mode 100644 app/service/utils/redis_utils.py diff --git a/app/api/api_generate_image.py b/app/api/api_generate_image.py index a37bec3..f151b91 100644 --- a/app/api/api_generate_image.py +++ b/app/api/api_generate_image.py @@ -3,8 +3,10 @@ import logging from fastapi import APIRouter, BackgroundTasks, HTTPException -from app.schemas.generate_image import GenerateImageModel, GenerateProductImageModel, GenerateSingleLogoImageModel, GenerateRelightImageModel, GenerateMultiViewModel +from app.schemas.generate_image import GenerateImageModel, GenerateProductImageModel, GenerateSingleLogoImageModel, GenerateRelightImageModel, GenerateMultiViewModel, BatchGenerateProductImageModel, BatchGenerateRelightImageModel +from app.schemas.pose_transform import BatchPoseTransformModel from app.schemas.response_template import ResponseModel +from app.service.generate_batch_image.service import start_product_batch_generate, start_relight_batch_generate, start_pose_transform_batch_generate from app.service.generate_image.service_generate_image import GenerateImage, infer_cancel as generate_image_infer_cancel from app.service.generate_image.service_generate_multi_view import GenerateMultiView, infer_cancel as generate_multi_view_cancel from app.service.generate_image.service_generate_product_image import GenerateProductImage, infer_cancel as generate_product_image_cancel @@ -228,3 +230,21 @@ def generate_relight_image(tasks_id: str): logger.warning(f"generate_relight_image_cancel_cancel Run Exception @@@@@@:{e}") raise HTTPException(status_code=404, detail=str(e)) return ResponseModel(data=data['data']) + + +"""batch generate img""" + + +@router.post("/batch_generate_product_image") +async def design(request_batch_item: BatchGenerateProductImageModel): + return await start_product_batch_generate(request_batch_item) + + +@router.post("/batch_generate_relight_image") +async def design(request_batch_item: BatchGenerateRelightImageModel): + return await start_relight_batch_generate(request_batch_item) + + +@router.post("/batch_generate_pose_transform_image") +async def design(request_batch_item: BatchPoseTransformModel): + return await start_pose_transform_batch_generate(request_batch_item) diff --git a/app/core/config.py b/app/core/config.py index ac9181f..aaf32d7 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -135,12 +135,14 @@ GEN_SINGLE_LOGO_RABBITMQ_QUEUES = os.getenv("GEN_SINGLE_LOGO_RABBITMQ_QUEUES", f # Generate Product service config 旧版product img 模型 GPI_RABBITMQ_QUEUES = os.getenv("GEN_PRODUCT_IMAGE_RABBITMQ_QUEUES", f"ToProductImage{RABBITMQ_ENV}") +BATCH_GPI_RABBITMQ_QUEUES = os.getenv("BATCH_GEN_PRODUCT_IMAGE_RABBITMQ_QUEUES", f"BatchToProductImage{RABBITMQ_ENV}") GPI_MODEL_NAME_OVERALL = 'diffusion_ensemble_all' GPI_MODEL_NAME_SINGLE = 'stable_diffusion_1_5_cnet' GPI_MODEL_URL = '10.1.1.243:10051' # Generate Single Logo service config GRI_RABBITMQ_QUEUES = os.getenv("GEN_RELIGHT_IMAGE_RABBITMQ_QUEUES", f"Relight{RABBITMQ_ENV}") +BATCH_GRI_RABBITMQ_QUEUES = os.getenv("BATCH_GEN_RELIGHT_IMAGE_RABBITMQ_QUEUES", f"BatchRelight{RABBITMQ_ENV}") GRI_MODEL_NAME_OVERALL = 'diffusion_relight_ensemble' GRI_MODEL_NAME_SINGLE = 'stable_diffusion_1_5_relight' GRI_MODEL_URL = '10.1.1.240:10051' @@ -148,6 +150,7 @@ GRI_MODEL_URL = '10.1.1.240:10051' # Pose Transform service config PS_RABBITMQ_QUEUES = os.getenv("PS_RABBITMQ_QUEUES", f"PoseTransform{RABBITMQ_ENV}") +BATCH_PS_RABBITMQ_QUEUES = os.getenv("BATCH_PS_RABBITMQ_QUEUES", f"BatchPoseTransform{RABBITMQ_ENV}") PT_MODEL_URL = '10.1.1.243:10061' # SEG service config diff --git a/app/schemas/generate_image.py b/app/schemas/generate_image.py index 7181418..99d1836 100644 --- a/app/schemas/generate_image.py +++ b/app/schemas/generate_image.py @@ -36,3 +36,26 @@ class GenerateRelightImageModel(BaseModel): image_url: str direction: str product_type: str + + +""" + batch generate image +""" + + +class BatchGenerateProductImageModel(BaseModel): + tasks_id: str + prompt: str + image_url: str + image_strength: float + product_type: str + batch_size: int + + +class BatchGenerateRelightImageModel(BaseModel): + tasks_id: str + prompt: str + image_url: str + direction: str + product_type: str + batch_size: int diff --git a/app/schemas/pose_transform.py b/app/schemas/pose_transform.py index 045d8b9..22526ff 100644 --- a/app/schemas/pose_transform.py +++ b/app/schemas/pose_transform.py @@ -5,3 +5,10 @@ class PoseTransformModel(BaseModel): image_url: str tasks_id: str pose_id: str + + +class BatchPoseTransformModel(BaseModel): + image_url: str + tasks_id: str + pose_id: str + batch_size: int diff --git a/app/service/generate_batch_image/service.py b/app/service/generate_batch_image/service.py new file mode 100644 index 0000000..2279382 --- /dev/null +++ b/app/service/generate_batch_image/service.py @@ -0,0 +1,24 @@ +from app.service.generate_batch_image.service_batch_generate_product_image import batch_generate_product, publish_status as product_publish_status +from app.service.generate_batch_image.service_batch_generate_relight_image import batch_generate_relight, publish_status as relight_publish_status +from app.service.generate_batch_image.service_batch_pose_transform import batch_generate_pose_transform, publish_status as pose_transform_publish_status + + +async def start_product_batch_generate(data): + generate_clothes_task = batch_generate_product.delay(data.dict()) + print(generate_clothes_task) + product_publish_status(data.tasks_id, f"0/{data.batch_size}", "") + return {"task_id": data.tasks_id, "state": generate_clothes_task.state} + + +async def start_relight_batch_generate(data): + generate_clothes_task = batch_generate_relight.delay(data.dict()) + print(generate_clothes_task) + relight_publish_status(data.tasks_id, f"0/{data.batch_size}", "") + return {"task_id": data.tasks_id, "state": generate_clothes_task.state} + + +async def start_pose_transform_batch_generate(data): + generate_clothes_task = batch_generate_pose_transform.delay(data.dict()) + print(generate_clothes_task) + pose_transform_publish_status(data.tasks_id, f"0/{data.batch_size}", "") + return {"task_id": data.tasks_id, "state": generate_clothes_task.state} diff --git a/app/service/generate_batch_image/service_batch_generate_product_image.py b/app/service/generate_batch_image/service_batch_generate_product_image.py new file mode 100644 index 0000000..438ec99 --- /dev/null +++ b/app/service/generate_batch_image/service_batch_generate_product_image.py @@ -0,0 +1,191 @@ +# 旧版product +# !/usr/bin/env python +# -*- coding: UTF-8 -*- +""" +@Project :trinity_client +@File :service_att_recognition.py +@Author :周成融 +@Date :2023/7/26 12:01:05 +@detail : +""" +import json +import logging + +import cv2 +import numpy as np +import tritonclient.grpc as grpcclient +from PIL import Image +from celery import Celery +from tritonclient.utils import np_to_triton_dtype + +from app.core.config import * +from app.schemas.generate_image import BatchGenerateProductImageModel +from app.service.generate_image.utils.upload_sd_image import upload_SDXL_image +from app.service.utils.oss_client import oss_get_image + +celery_app = Celery('product_tasks', broker=f'amqp://rabbit:123456@18.167.251.121:5672//', backend='rpc://', BROKER_CONNECTION_RETRY_ON_STARTUP=True) +celery_app.conf.task_default_queue = 'queue_product' +celery_app.conf.worker_log_format = '%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s' +celery_app.conf.worker_hijack_root_logger = False +logger = logging.getLogger() +logging.getLogger('pika').setLevel(logging.WARNING) +grpc_client = grpcclient.InferenceServerClient(url=GPI_MODEL_URL) +category = "product_image" + + +@celery_app.task +def batch_generate_product(batch_request_data): + logger.info(f"batch_generate_product batch_request_data:{batch_request_data}") + tasks_id = batch_request_data['tasks_id'] + user_id = tasks_id.rsplit('-', 1)[1] + batch_size = batch_request_data['batch_size'] + image = pre_processing_image(batch_request_data['image_url']) + image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB) + images = [image.astype(np.uint8)] * 1 + + prompts = [batch_request_data['prompt']] * 1 + + if batch_request_data['product_type'] == "single": + text_obj = np.array(prompts, dtype="object").reshape((-1, 1)) + image_obj = np.array(images, dtype=np.uint8).reshape((-1, 768, 512, 3)) + image_strength_obj = np.array(batch_request_data['image_strength'], dtype=np.float32).reshape((-1, 1)) + else: + text_obj = np.array(prompts, dtype="object").reshape((1)) + image_obj = np.array(images, dtype=np.uint8).reshape((768, 512, 3)) + image_strength_obj = np.array(batch_request_data['image_strength'], dtype=np.float32).reshape((1)) + input_text = grpcclient.InferInput("prompt", text_obj.shape, np_to_triton_dtype(text_obj.dtype)) + input_image = grpcclient.InferInput("input_image", image_obj.shape, "UINT8") + input_image_strength = grpcclient.InferInput("image_strength", image_strength_obj.shape, np_to_triton_dtype(image_strength_obj.dtype)) + + input_text.set_data_from_numpy(text_obj) + input_image.set_data_from_numpy(image_obj) + input_image_strength.set_data_from_numpy(image_strength_obj) + + inputs = [input_text, input_image, input_image_strength] + + image_url_list = [] + for i in range(batch_size): + try: + if batch_request_data['product_type'] == "single": + result = grpc_client.infer(model_name=GPI_MODEL_NAME_SINGLE, inputs=inputs, priority=100) + image = result.as_numpy("generated_cnet_image") + else: + result = grpc_client.infer(model_name=GPI_MODEL_NAME_OVERALL, inputs=inputs, priority=100) + image = result.as_numpy("generated_inpaint_image") + image_result = Image.fromarray(np.squeeze(image.astype(np.uint8))) + except Exception as e: + if 'mask_list' in str(e): + e_text_obj = np.array(prompts, dtype="object").reshape((-1, 1)) + e_image_obj = np.array(images, dtype=np.uint8).reshape((-1, 768, 512, 3)) + e_image_strength_obj = np.array(batch_request_data['image_strength'], dtype=np.float32).reshape((-1, 1)) + + e_input_text = grpcclient.InferInput("prompt", e_text_obj.shape, np_to_triton_dtype(e_text_obj.dtype)) + e_input_image = grpcclient.InferInput("input_image", e_image_obj.shape, "UINT8") + e_input_image_strength = grpcclient.InferInput("image_strength", e_image_strength_obj.shape, np_to_triton_dtype(e_image_strength_obj.dtype)) + + e_input_text.set_data_from_numpy(e_text_obj) + e_input_image.set_data_from_numpy(e_image_obj) + e_input_image_strength.set_data_from_numpy(e_image_strength_obj) + + result = grpc_client.infer(model_name=GPI_MODEL_NAME_SINGLE, inputs=[e_input_text, e_input_image, e_input_image_strength], priority=100) + image = result.as_numpy("generated_cnet_image") + image_result = Image.fromarray(np.squeeze(image.astype(np.uint8))) + else: + image_result = str(e) + logger.error(image_result) + + if isinstance(image_result, Image.Image): + image_url = upload_SDXL_image(image_result, user_id=user_id, category=f"{category}", file_name=f"{tasks_id}-batch-{i}.png") + image_url_list.append(image_url) + else: + image_url = image_result + if DEBUG is False: + if i + 1 < batch_size: + publish_status(tasks_id, f"{i + 1}/{batch_size}", image_url) + logger.info(f" [x] {tasks_id}:tasks_id *** progress:{i + 1}/{batch_size} *** image_url:{image_url}") + print(f" [x] {tasks_id}:tasks_id *** progress:{i + 1}/{batch_size} *** image_url:{image_url}") + else: + publish_status(tasks_id, f"OK", image_url_list) + logger.info(f" [x] {tasks_id}:tasks_id *** progress:OK *** image_url:{image_url_list}") + print(f" [x] {tasks_id}:tasks_id *** progress:OK *** image_url:{image_url_list}") + + +def pre_processing_image(image_url): + image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL") + # 目标图片的尺寸 + target_width = 512 + target_height = 768 + + # 原始图片的尺寸 + original_width, original_height = image.size + + # 计算宽度和高度的缩放比例 + width_ratio = target_width / original_width + height_ratio = target_height / original_height + + # 选择较小的缩放比例,确保图片能完整放入目标图片中 + scale_ratio = min(width_ratio, height_ratio) + + # 计算调整后的尺寸 + new_width = int(original_width * scale_ratio) + new_height = int(original_height * scale_ratio) + + # 调整图片大小 + resized_image = image.resize((new_width, new_height)) + + # 创建一个 512x768 的透明图片 + result_image = Image.new("RGBA", (target_width, target_height), (255, 255, 255, 0)) + + # 计算需要粘贴的位置,使图片居中 + x_offset = (target_width - new_width) // 2 + y_offset = (target_height - new_height) // 2 + + # 将调整大小后的图片粘贴到透明图片上 + if resized_image.mode == "RGBA": + result_image.paste(resized_image, (x_offset, y_offset), mask=resized_image.split()[3]) + else: + result_image.paste(resized_image, (x_offset, y_offset)) + + image = np.array(result_image) + + # image = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA) + return image + + +def post_processing_image(image, left, top): + resized_image = image.resize((int(image.width * (768 / image.height)), 768)) + # 计算裁剪的坐标 + left = (resized_image.width - 512) // 2 + upper = 0 + right = left + 512 + lower = 768 + + # 进行裁剪 + cropped_image = resized_image.crop((left, upper, right, lower)) + return cropped_image + + +def publish_status(task_id, progress, result): + connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) + channel = connection.channel() + channel.queue_declare(queue=BATCH_GPI_RABBITMQ_QUEUES, durable=True) + message = {'task_id': task_id, 'progress': progress, "result": result} + channel.basic_publish(exchange='', + routing_key=BATCH_GPI_RABBITMQ_QUEUES, + body=json.dumps(message), + properties=pika.BasicProperties( + delivery_mode=2, + )) + connection.close() + + +if __name__ == '__main__': + rd = BatchGenerateProductImageModel( + tasks_id="123-15-51-89", + image_strength=0.7, + prompt=" The best quality, masterpiece, real image.Outwear,high quality clothing details,8K realistic,HDR", + image_url="aida-results/result_40b1a2fe-e220-11ef-9bfa-0242ac150003.png", + product_type="overall", + batch_size=20 + ) + batch_generate_product(rd.dict()) diff --git a/app/service/generate_batch_image/service_batch_generate_relight_image.py b/app/service/generate_batch_image/service_batch_generate_relight_image.py new file mode 100644 index 0000000..fa53f26 --- /dev/null +++ b/app/service/generate_batch_image/service_batch_generate_relight_image.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +""" +@Project :trinity_client +@File :service_att_recognition.py +@Author :周成融 +@Date :2023/7/26 12:01:05 +@detail : +""" +import json +import logging + +import cv2 +import numpy as np +import tritonclient.grpc as grpcclient +from PIL import Image +from celery import Celery +from tritonclient.utils import np_to_triton_dtype + +from app.core.config import * +from app.schemas.generate_image import BatchGenerateRelightImageModel +from app.service.generate_image.utils.upload_sd_image import upload_SDXL_image +from app.service.utils.oss_client import oss_get_image + +logger = logging.getLogger() +celery_app = Celery('relight_tasks', broker=f'amqp://rabbit:123456@18.167.251.121:5672//', backend='rpc://', BROKER_CONNECTION_RETRY_ON_STARTUP=True) +celery_app.conf.task_default_queue = 'queue_relight' +celery_app.conf.worker_log_format = '%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s' +celery_app.conf.worker_hijack_root_logger = False +logging.getLogger('pika').setLevel(logging.WARNING) +grpc_client = grpcclient.InferenceServerClient(url=GRI_MODEL_URL) +category = "relight_image" + + +@celery_app.task +def batch_generate_relight(batch_request_data): + logger.info(f"batch_generate_relight batch_request_data: {batch_request_data}") + negative_prompt = 'lowres, bad anatomy, bad hands, cropped, worst quality' + direction = batch_request_data['direction'] + seed = "1" + prompt = batch_request_data['prompt'] + product_type = batch_request_data['product_type'] + image_url = batch_request_data['image_url'] + image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url.split('/', 1)[1], data_type="cv2") + tasks_id = batch_request_data['tasks_id'] + user_id = tasks_id.rsplit('-', 1)[1] + batch_size = batch_request_data['batch_size'] + + prompts = [prompt] * 1 + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + image = cv2.resize(image, (512, 768)) + images = [image.astype(np.uint8)] * 1 + seeds = [seed] * 1 + nagetive_prompts = [negative_prompt] * 1 + directions = [direction] * 1 + + if product_type == 'single': + text_obj = np.array(prompts, dtype="object").reshape((-1, 1)) + image_obj = np.array(images, dtype=np.uint8).reshape((-1, 768, 512, 3)) + na_text_obj = np.array(nagetive_prompts, dtype="object").reshape((-1, 1)) + seed_obj = np.array(seeds, dtype="object").reshape((-1, 1)) + direction_obj = np.array(directions, dtype="object").reshape((-1, 1)) + else: + text_obj = np.array(prompts, dtype="object").reshape((1)) + image_obj = np.array(images, dtype=np.uint8).reshape((768, 512, 3)) + na_text_obj = np.array(nagetive_prompts, dtype="object").reshape((1)) + seed_obj = np.array(seeds, dtype="object").reshape((1)) + direction_obj = np.array(directions, dtype="object").reshape((1)) + input_text = grpcclient.InferInput("prompt", text_obj.shape, np_to_triton_dtype(text_obj.dtype)) + input_image = grpcclient.InferInput("input_image", image_obj.shape, "UINT8") + input_natext = grpcclient.InferInput("negative_prompt", na_text_obj.shape, np_to_triton_dtype(na_text_obj.dtype)) + input_seed = grpcclient.InferInput("seed", seed_obj.shape, np_to_triton_dtype(seed_obj.dtype)) + input_direction = grpcclient.InferInput("direction", direction_obj.shape, np_to_triton_dtype(direction_obj.dtype)) + + input_text.set_data_from_numpy(text_obj) + input_image.set_data_from_numpy(image_obj) + input_natext.set_data_from_numpy(na_text_obj) + input_seed.set_data_from_numpy(seed_obj) + input_direction.set_data_from_numpy(direction_obj) + + inputs = [input_text, input_natext, input_image, input_seed, input_direction] + image_url_list = [] + for i in range(batch_size): + try: + if batch_request_data['product_type'] == "single": + result = grpc_client.infer(model_name=GRI_MODEL_NAME_SINGLE, inputs=inputs, priority=100) + image = result.as_numpy("generated_relight_image") + else: + result = grpc_client.infer(model_name=GRI_MODEL_NAME_OVERALL, inputs=inputs, priority=100) + image = result.as_numpy("generated_inpaint_image") + image_result = Image.fromarray(np.squeeze(image.astype(np.uint8))) + + except Exception as e: + print(e) + if 'mask_list' in str(e): + e_text_obj = np.array(prompts, dtype="object").reshape((-1, 1)) + e_image_obj = np.array(images, dtype=np.uint8).reshape((-1, 768, 512, 3)) + e_na_text_obj = np.array(nagetive_prompts, dtype="object").reshape((-1, 1)) + e_seed_obj = np.array(seeds, dtype="object").reshape((-1, 1)) + e_direction_obj = np.array(directions, dtype="object").reshape((-1, 1)) + + e_input_text = grpcclient.InferInput("prompt", e_text_obj.shape, np_to_triton_dtype(e_text_obj.dtype)) + e_input_image = grpcclient.InferInput("input_image", e_image_obj.shape, "UINT8") + e_input_natext = grpcclient.InferInput("negative_prompt", e_na_text_obj.shape, np_to_triton_dtype(e_na_text_obj.dtype)) + e_input_seed = grpcclient.InferInput("seed", e_seed_obj.shape, np_to_triton_dtype(e_seed_obj.dtype)) + e_input_direction = grpcclient.InferInput("direction", e_direction_obj.shape, np_to_triton_dtype(e_direction_obj.dtype)) + + e_input_text.set_data_from_numpy(e_text_obj) + e_input_image.set_data_from_numpy(e_image_obj) + e_input_natext.set_data_from_numpy(e_na_text_obj) + e_input_seed.set_data_from_numpy(e_seed_obj) + e_input_direction.set_data_from_numpy(e_direction_obj) + + e_inputs = [e_input_text, e_input_natext, e_input_image, e_input_seed, e_input_direction] + + result = grpc_client.infer(model_name=GRI_MODEL_NAME_SINGLE, inputs=e_inputs, priority=100) + image = result.as_numpy("generated_relight_image") + image_result = Image.fromarray(np.squeeze(image.astype(np.uint8))) + else: + image_result = str(e) + logger.error(e) + if isinstance(image_result, Image.Image): + image_url = upload_SDXL_image(image_result, user_id=user_id, category=f"{category}", file_name=f"{tasks_id}-batch-{i}.png") + image_url_list.append(image_url) + else: + image_url = image_result + if DEBUG is False: + if i + 1 < batch_size: + publish_status(tasks_id, f"{i + 1}/{batch_size}", image_url) + logger.info(f" [x] {tasks_id}:tasks_id *** progress:{i + 1}/{batch_size} *** image_url:{image_url}") + print(f" [x] {tasks_id}:tasks_id *** progress:{i + 1}/{batch_size} *** image_url:{image_url}") + else: + publish_status(tasks_id, f"OK", image_url_list) + logger.info(f" [x] {tasks_id}:tasks_id *** progress:OK *** image_url:{image_url_list}") + print(f" [x] {tasks_id}:tasks_id *** progress:OK *** image_url:{image_url_list}") + + +def publish_status(task_id, progress, result): + connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) + channel = connection.channel() + channel.queue_declare(queue=BATCH_GRI_RABBITMQ_QUEUES, durable=True) + message = {'task_id': task_id, 'progress': progress, "result": result} + channel.basic_publish(exchange='', + routing_key=BATCH_GRI_RABBITMQ_QUEUES, + body=json.dumps(message), + properties=pika.BasicProperties( + delivery_mode=2, + )) + connection.close() + + +if __name__ == '__main__': + rd = BatchGenerateRelightImageModel( + tasks_id="123-89", + # prompt="beautiful woman, detailed face, sunshine, outdoor, warm atmosphere", + prompt="Colorful black", + image_url='aida-users/89/clothing_seg/283c5c82-1a92-11f0-b72a-0242ac150002.png', + direction="Right Light", + product_type="overall", + batch_size=10 + ) + batch_generate_relight(rd.dict()) diff --git a/app/service/generate_batch_image/service_batch_pose_transform.py b/app/service/generate_batch_image/service_batch_pose_transform.py new file mode 100644 index 0000000..3507a43 --- /dev/null +++ b/app/service/generate_batch_image/service_batch_pose_transform.py @@ -0,0 +1,176 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +""" +@Project :trinity_client +@File :service_att_recognition.py +@Author :周成融 +@Date :2023/7/26 12:01:05 +@detail : +""" +import io +import json +import logging +from io import BytesIO + +import imageio +import numpy as np +import tritonclient.grpc as grpcclient +from PIL import Image +from celery import Celery +from minio import Minio +from tritonclient.utils import np_to_triton_dtype + +from app.core.config import * +from app.schemas.pose_transform import BatchPoseTransformModel +from app.service.generate_image.utils.pose_transform_upload import upload_gif, upload_video +from app.service.utils.new_oss_client import oss_upload_image +from app.service.utils.oss_client import oss_get_image + +minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) + +logger = logging.getLogger() +celery_app = Celery('tasks', broker=f'amqp://rabbit:123456@18.167.251.121:5672//', backend='rpc://', BROKER_CONNECTION_RETRY_ON_STARTUP=True) +celery_app.conf.task_default_queue = 'queue_post_transform' +celery_app.conf.worker_log_format = '%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s' +celery_app.conf.worker_hijack_root_logger = False +logging.getLogger('pika').setLevel(logging.WARNING) +grpc_client = grpcclient.InferenceServerClient(url=PT_MODEL_URL) +category = "pose_transform" + + +def upload_first_image(image, user_id, category, file_name): + try: + image_data = io.BytesIO() + image.save(image_data, format='PNG') + image_data.seek(0) + image_bytes = image_data.read() + object_name = f'{user_id}/{category}/{file_name}' + req = oss_upload_image(oss_client=minio_client, bucket=GI_MINIO_BUCKET, object_name=object_name, image_bytes=image_bytes) + image_url = f"aida-users/{object_name}" + return image_url + except Exception as e: + logging.warning(f"upload_png_mask runtime exception : {e}") + + +def pre_processing_image(image_url): + image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL") + # 目标图片的尺寸 + target_width = 512 + target_height = 768 + + # 原始图片的尺寸 + original_width, original_height = image.size + + # 计算宽度和高度的缩放比例 + width_ratio = target_width / original_width + height_ratio = target_height / original_height + + # 选择较小的缩放比例,确保图片能完整放入目标图片中 + scale_ratio = min(width_ratio, height_ratio) + + # 计算调整后的尺寸 + new_width = int(original_width * scale_ratio) + new_height = int(original_height * scale_ratio) + + # 调整图片大小 + resized_image = image.resize((new_width, new_height)) + + # 创建一个 512x768 的透明图片 + result_image = Image.new("RGBA", (target_width, target_height), (255, 255, 255, 0)) + + # 计算需要粘贴的位置,使图片居中 + x_offset = (target_width - new_width) // 2 + y_offset = (target_height - new_height) // 2 + + # 将调整大小后的图片粘贴到透明图片上 + if resized_image.mode == "RGBA": + result_image.paste(resized_image, (x_offset, y_offset), mask=resized_image.split()[3]) + else: + result_image.paste(resized_image, (x_offset, y_offset)) + result_image = result_image.convert("RGB") + image = np.array(result_image) + + # image = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA) + + return image + + +@celery_app.task +def batch_generate_pose_transform(batch_request_data): + logger.info(f"batch_generate_pose_transform batch_request_data: {batch_request_data}") + batch_size = batch_request_data['batch_size'] + image_url = batch_request_data['image_url'] + image = pre_processing_image(image_url) + pose_num = batch_request_data['pose_id'] + tasks_id = batch_request_data['tasks_id'] + user_id = tasks_id.rsplit('-', 1)[1] + + pose_num = [pose_num] * 1 + pose_num_obj = np.array(pose_num, dtype="object").reshape((-1, 1)) + input_pose_num = grpcclient.InferInput("pose_num", pose_num_obj.shape, np_to_triton_dtype(pose_num_obj.dtype)) + input_pose_num.set_data_from_numpy(pose_num_obj) + + image_files = [image.astype(np.uint8)] * 1 + image_files_obj = np.array(image_files, dtype=np.uint8).reshape((-1, 768, 512, 3)) + input_image_files = grpcclient.InferInput("image_file", image_files_obj.shape, "UINT8") + input_image_files.set_data_from_numpy(image_files_obj) + + result_url_list = [] + for i in range(batch_size): + try: + result = grpc_client.infer(model_name="animatex_1", inputs=[input_pose_num, input_image_files], client_timeout=60000, priority=100) + result_data = np.squeeze(result.as_numpy("generated_image_list").astype(np.uint8))[:, :, :, ::-1] + # 第一帧图像 + first_image = Image.fromarray(result_data[0]) + first_image_url = upload_first_image(first_image, user_id=user_id, category=f"{category}_first_img", file_name=f"{tasks_id}_batch_{i}.png") + + # 上传GIF + gif_buffer = BytesIO() + imageio.mimsave(gif_buffer, result_data, format='GIF', fps=5) + gif_buffer.seek(0) + gif_url = upload_gif(gif_buffer=gif_buffer, user_id=user_id, category=f"{category}_gif", file_name=f"{tasks_id}_batch_{i}.gif") + + # 上传video + video_url = upload_video(frames=result_data, user_id=user_id, category=f"{category}_video", file_name=f"{tasks_id}_batch_{i}.mp4") + data = { + "gif_url": gif_url, + "video_url": video_url, + "first_image_url": first_image_url, + } + except Exception as e: + print(e) + data = {} + result_url_list.append(data) + if DEBUG is False: + if i + 1 < batch_size: + publish_status(tasks_id, f"{i + 1}/{batch_size}", data) + logger.info(f" [x] {tasks_id}:tasks_id *** progress:{i + 1}/{batch_size} *** image_url:{data}") + print(f" [x] {tasks_id}:tasks_id *** progress:{i + 1}/{batch_size} *** image_url:{data}") + else: + publish_status(tasks_id, f"OK", result_url_list) + logger.info(f" [x] {tasks_id}:tasks_id *** progress:OK *** image_url:{result_url_list}") + print(f" [x] {tasks_id}:tasks_id *** progress:OK *** image_url:{result_url_list}") + + +def publish_status(task_id, progress, result): + connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) + channel = connection.channel() + channel.queue_declare(queue=BATCH_GRI_RABBITMQ_QUEUES, durable=True) + message = {'task_id': task_id, 'progress': progress, "result": result} + channel.basic_publish(exchange='', + routing_key=BATCH_GRI_RABBITMQ_QUEUES, + body=json.dumps(message), + properties=pika.BasicProperties( + delivery_mode=2, + )) + connection.close() + + +if __name__ == '__main__': + rd = BatchPoseTransformModel( + tasks_id="123-89", + image_url='aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png', + pose_id="1", + batch_size=10 + ) + batch_generate_pose_transform(rd.dict()) diff --git a/app/service/generate_image/service_generate_image.py b/app/service/generate_image/service_generate_image.py index 86912f8..4ed8fd4 100644 --- a/app/service/generate_image/service_generate_image.py +++ b/app/service/generate_image/service_generate_image.py @@ -153,9 +153,9 @@ class GenerateImage: inputs = [input_text, input_image, input_mode] if self.version == "fast": - ctx = self.grpc_client.async_infer(model_name=FAST_GI_MODEL_NAME, inputs=inputs, callback=self.callback) + ctx = self.grpc_client.async_infer(model_name=FAST_GI_MODEL_NAME, inputs=inputs, callback=self.callback, priority=1) else: - ctx = self.grpc_client.async_infer(model_name=GI_MODEL_NAME, inputs=inputs, callback=self.callback) + ctx = self.grpc_client.async_infer(model_name=GI_MODEL_NAME, inputs=inputs, callback=self.callback, priority=1) time_out = 600 generate_data = None diff --git a/app/service/generate_image/service_generate_product_image.py b/app/service/generate_image/service_generate_product_image.py index 235f366..d0fbe74 100644 --- a/app/service/generate_image/service_generate_product_image.py +++ b/app/service/generate_image/service_generate_product_image.py @@ -295,9 +295,9 @@ class GenerateProductImage: inputs = [input_text, input_image, input_image_strength] if self.product_type == "single": - ctx = self.grpc_client.async_infer(model_name=GPI_MODEL_NAME_SINGLE, inputs=inputs, callback=self.callback) + ctx = self.grpc_client.async_infer(model_name=GPI_MODEL_NAME_SINGLE, inputs=inputs, callback=self.callback, priority=1) else: - ctx = self.grpc_client.async_infer(model_name=GPI_MODEL_NAME_OVERALL, inputs=inputs, callback=self.callback) + ctx = self.grpc_client.async_infer(model_name=GPI_MODEL_NAME_OVERALL, inputs=inputs, callback=self.callback, priority=1) time_out = 600 while time_out > 0: diff --git a/app/service/generate_image/service_generate_relight_image.py b/app/service/generate_image/service_generate_relight_image.py index 2e48ae2..668e7fd 100644 --- a/app/service/generate_image/service_generate_relight_image.py +++ b/app/service/generate_image/service_generate_relight_image.py @@ -114,9 +114,9 @@ class GenerateRelightImage: inputs = [input_text, input_natext, input_image, input_seed, input_direction] if self.product_type == 'single': - ctx = self.grpc_client.async_infer(model_name=GRI_MODEL_NAME_SINGLE, inputs=inputs, callback=self.callback) + ctx = self.grpc_client.async_infer(model_name=GRI_MODEL_NAME_SINGLE, inputs=inputs, callback=self.callback, priority=1) else: - ctx = self.grpc_client.async_infer(model_name=GRI_MODEL_NAME_OVERALL, inputs=inputs, callback=self.callback) + ctx = self.grpc_client.async_infer(model_name=GRI_MODEL_NAME_OVERALL, inputs=inputs, callback=self.callback, priority=1) time_out = 600 while time_out > 0: diff --git a/app/service/utils/redis_utils.py b/app/service/utils/redis_utils.py new file mode 100644 index 0000000..012fbe0 --- /dev/null +++ b/app/service/utils/redis_utils.py @@ -0,0 +1,99 @@ +import redis + +from app.core.config import REDIS_HOST, REDIS_PORT + + +class Redis(object): + """ + redis数据库操作 + """ + + @staticmethod + def _get_r(): + host = REDIS_HOST + port = REDIS_PORT + db = 0 + r = redis.StrictRedis(host, port, db) + return r + + @classmethod + def write(cls, key, value, expire=None): + """ + 写入键值对 + """ + # 判断是否有过期时间,没有就设置默认值 + if expire: + expire_in_seconds = expire + else: + expire_in_seconds = 100 + r = cls._get_r() + r.set(key, value, ex=expire_in_seconds) + + @classmethod + def read(cls, key): + """ + 读取键值对内容 + """ + r = cls._get_r() + value = r.get(key) + return value.decode('utf-8') if value else value + + @classmethod + def hset(cls, name, key, value): + """ + 写入hash表 + """ + r = cls._get_r() + r.hset(name, key, value) + + @classmethod + def hget(cls, name, key): + """ + 读取指定hash表的键值 + """ + r = cls._get_r() + value = r.hget(name, key) + return value.decode('utf-8') if value else value + + @classmethod + def hgetall(cls, name): + """ + 获取指定hash表所有的值 + """ + r = cls._get_r() + return r.hgetall(name) + + @classmethod + def delete(cls, *names): + """ + 删除一个或者多个 + """ + r = cls._get_r() + r.delete(*names) + + @classmethod + def hdel(cls, name, key): + """ + 删除指定hash表的键值 + """ + r = cls._get_r() + r.hdel(name, key) + + @classmethod + def expire(cls, name, expire=None): + """ + 设置过期时间 + """ + if expire: + expire_in_seconds = expire + else: + expire_in_seconds = 100 + r = cls._get_r() + r.expire(name, expire_in_seconds) + + +if __name__ == '__main__': + redis_client = Redis() + # print(redis_client.write(key="1230", value=0)) + redis_client.write(key="1230", value=10) + # print(redis_client.read(key="1230")) From e1231d3d0e05ee50bc4b5f27568e453a8eadc6bf Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Tue, 22 Apr 2025 11:09:13 +0800 Subject: [PATCH 053/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20batch=20generate=20=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=8E=A5=E5=8F=A3=E8=AF=B4=E6=98=8E=EF=BC=8Cdesign=20batch?= =?UTF-8?q?=E9=98=9F=E5=88=97=E4=BF=AE=E6=94=B9=20fix=EF=BC=88=E4=BF=AE?= =?UTF-8?q?=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_design.py | 12 +++--- app/api/api_generate_image.py | 62 ++++++++++++++++++++++++++-- app/core/config.py | 5 +++ app/service/design_batch/utils/MQ.py | 6 +-- 4 files changed, 73 insertions(+), 12 deletions(-) diff --git a/app/api/api_design.py b/app/api/api_design.py index 1c77ed8..03e0b25 100644 --- a/app/api/api_design.py +++ b/app/api/api_design.py @@ -433,12 +433,12 @@ def model_process(request_data: ModelProgressModel): @router.post("/design_batch_generate") -async def design(file: UploadFile = File(...), - tasks_id: str = Form(...), - user_id: str = Form(...), - file_name: str = Form(...), - total: int = Form(...) - ): +async def design_batch(file: UploadFile = File(...), + tasks_id: str = Form(...), + user_id: str = Form(...), + file_name: str = Form(...), + total: int = Form(...) + ): dbg_config = DBGConfigModel( tasks_id=tasks_id, user_id=user_id, diff --git a/app/api/api_generate_image.py b/app/api/api_generate_image.py index f151b91..2706abd 100644 --- a/app/api/api_generate_image.py +++ b/app/api/api_generate_image.py @@ -236,15 +236,71 @@ def generate_relight_image(tasks_id: str): @router.post("/batch_generate_product_image") -async def design(request_batch_item: BatchGenerateProductImageModel): +async def batch_generate_product(request_batch_item: BatchGenerateProductImageModel): + """ + 创建一个具有以下参数的请求体: + - **tasks_id**: 任务id 用于获取生成结果 + - **prompt**: 想要生成图片的描述词 + - **image_url**: 被生成图片的S3或minio url地址 + - **image_strength**: 生成强度,越低越接近原图 + - **product_type**: 输入single item 还是 overall item + - **batch_size**: 批生成数量 + + + 示例参数: + { + "tasks_id": "123-89", + "prompt": "the best quality, masterpiece. detailed, high-res, simple background, studio photography, extremely detailed, updo, detailed face, face, close-up, HDR, UHD, 8K realistic, Highly detailed, simple background, Studio lighting", + "image_url": "aida-results/result_00097282-ebb2-11ee-a822-b48351119060.png", + "image_strength": 0.8, + "product_type": "overall", + "batch_size": 1 + } + """ return await start_product_batch_generate(request_batch_item) @router.post("/batch_generate_relight_image") -async def design(request_batch_item: BatchGenerateRelightImageModel): +async def batch_generate_relight(request_batch_item: BatchGenerateRelightImageModel): + """ + 创建一个具有以下参数的请求体: + - **tasks_id**: 任务id 用于获取生成结果 + - **prompt**: 想要生成图片的描述词 + - **image_url**: 被生成图片的S3或minio url地址 + - **direction**: 光源方向 Right Light Left Light Top Light Bottom Light + - **product_type**: 输入single item 还是 overall item + - **batch_size**: 批生成数量 + + + 示例参数: + { + "tasks_id": "123-89", + "prompt": "beautiful woman, detailed face, sunshine, outdoor, warm atmosphere", + "image_url": "aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png", + "direction": "Right Light", + "product_type": "overall", + "batch_size": 1 + } + """ return await start_relight_batch_generate(request_batch_item) @router.post("/batch_generate_pose_transform_image") -async def design(request_batch_item: BatchPoseTransformModel): +async def batch_generate_pose_transform(request_batch_item: BatchPoseTransformModel): + """ + 创建一个具有以下参数的请求体: + - **tasks_id**: 任务id 用于取消生成任务和获取生成结果 + - **image_url**: 被生成图片的S3或minio url地址 + - **pose_id**: 1 + - **batch_size**: 批生成数量 + + + 示例参数: + { + "tasks_id": "123-89", + "image_url": "aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png", + "pose_id": "1", + "batch_size": 1 + } + """ return await start_pose_transform_batch_generate(request_batch_item) diff --git a/app/core/config.py b/app/core/config.py index aaf32d7..9650023 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -162,6 +162,11 @@ SEGMENTATION = { } # ollama config OLLAMA_URL = "http://10.1.1.240:11434/api/embeddings" + + +# design batch +BATCH_DESIGN_RABBITMQ_QUEUES = os.getenv("BATCH_DESIGN_RABBITMQ_QUEUES", f"Design{RABBITMQ_ENV}") + # DESIGN config DESIGN_MODEL_URL = '10.1.1.240:10000' AIDA_CLOTHING = "aida-clothing" diff --git a/app/service/design_batch/utils/MQ.py b/app/service/design_batch/utils/MQ.py index 1b64bf3..4fc839b 100644 --- a/app/service/design_batch/utils/MQ.py +++ b/app/service/design_batch/utils/MQ.py @@ -2,16 +2,16 @@ import json import pika -from app.core.config import RABBITMQ_PARAMS +from app.core.config import RABBITMQ_PARAMS, BATCH_DESIGN_RABBITMQ_QUEUES def publish_status(task_id, progress, result): connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) channel = connection.channel() - channel.queue_declare(queue='DesignBatch', durable=True) + channel.queue_declare(queue=BATCH_DESIGN_RABBITMQ_QUEUES, durable=True) message = {'task_id': task_id, 'progress': progress, "result": result} channel.basic_publish(exchange='', - routing_key='DesignBatch', + routing_key=BATCH_DESIGN_RABBITMQ_QUEUES, body=json.dumps(message), properties=pika.BasicProperties( delivery_mode=2, From af8ed730cc6f735d0d8d16653d807159dd929e95 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Tue, 22 Apr 2025 11:10:47 +0800 Subject: [PATCH 054/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20batch=20generate=20=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=8E=A5=E5=8F=A3=E8=AF=B4=E6=98=8E=EF=BC=8Cdesign=20batch?= =?UTF-8?q?=E9=98=9F=E5=88=97=E4=BF=AE=E6=94=B9=20fix=EF=BC=88=E4=BF=AE?= =?UTF-8?q?=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/core/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/core/config.py b/app/core/config.py index 9650023..6905e09 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -165,7 +165,7 @@ OLLAMA_URL = "http://10.1.1.240:11434/api/embeddings" # design batch -BATCH_DESIGN_RABBITMQ_QUEUES = os.getenv("BATCH_DESIGN_RABBITMQ_QUEUES", f"Design{RABBITMQ_ENV}") +BATCH_DESIGN_RABBITMQ_QUEUES = os.getenv("BATCH_DESIGN_RABBITMQ_QUEUES", f"DesignBatch{RABBITMQ_ENV}") # DESIGN config DESIGN_MODEL_URL = '10.1.1.240:10000' From 96002eb7f238fef9a0694ad9e0686ee490e692fe Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Tue, 22 Apr 2025 13:59:59 +0800 Subject: [PATCH 055/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20batch=20generate=20=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=8E=A5=E5=8F=A3=E8=AF=B4=E6=98=8E=EF=BC=8Cdesign=20batch?= =?UTF-8?q?=E9=98=9F=E5=88=97=E4=BF=AE=E6=94=B9=20fix=EF=BC=88=E4=BF=AE?= =?UTF-8?q?=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/core/config.py | 3 ++- .../service_batch_generate_product_image.py | 8 ++++---- .../service_batch_generate_relight_image.py | 8 ++++---- .../service_batch_pose_transform.py | 14 +++++++------- .../generate_image/utils/pose_transform_upload.py | 4 ++-- 5 files changed, 19 insertions(+), 18 deletions(-) diff --git a/app/core/config.py b/app/core/config.py index 6905e09..ab53e0f 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -25,11 +25,13 @@ if DEBUG: LOGS_PATH = "logs/" CATEGORY_PATH = "service/attribute/config/descriptor/category/category_dis.csv" SEG_CACHE_PATH = "../seg_cache/" + POSE_TRANSFORM_VIDEO_PATH = "../pose_transform_video/" RECOMMEND_PATH_PREFIX = "service/recommend/" else: LOGS_PATH = "app/logs/" CATEGORY_PATH = "app/service/attribute/config/descriptor/category/category_dis.csv" SEG_CACHE_PATH = "/seg_cache/" + POSE_TRANSFORM_VIDEO_PATH = "/pose_transform_video/" RECOMMEND_PATH_PREFIX = "app/service/recommend/" # RABBITMQ_ENV = "" # 生产环境 @@ -163,7 +165,6 @@ SEGMENTATION = { # ollama config OLLAMA_URL = "http://10.1.1.240:11434/api/embeddings" - # design batch BATCH_DESIGN_RABBITMQ_QUEUES = os.getenv("BATCH_DESIGN_RABBITMQ_QUEUES", f"DesignBatch{RABBITMQ_ENV}") diff --git a/app/service/generate_batch_image/service_batch_generate_product_image.py b/app/service/generate_batch_image/service_batch_generate_product_image.py index 438ec99..ee2a424 100644 --- a/app/service/generate_batch_image/service_batch_generate_product_image.py +++ b/app/service/generate_batch_image/service_batch_generate_product_image.py @@ -102,12 +102,12 @@ def batch_generate_product(batch_request_data): if DEBUG is False: if i + 1 < batch_size: publish_status(tasks_id, f"{i + 1}/{batch_size}", image_url) - logger.info(f" [x] {tasks_id}:tasks_id *** progress:{i + 1}/{batch_size} *** image_url:{image_url}") - print(f" [x] {tasks_id}:tasks_id *** progress:{i + 1}/{batch_size} *** image_url:{image_url}") + logger.info(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | image_url:{image_url}") + # print(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | image_url:{image_url}") else: publish_status(tasks_id, f"OK", image_url_list) - logger.info(f" [x] {tasks_id}:tasks_id *** progress:OK *** image_url:{image_url_list}") - print(f" [x] {tasks_id}:tasks_id *** progress:OK *** image_url:{image_url_list}") + logger.info(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:OK | image_url:{image_url}") + # print(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:OK | image_url:{image_url}") def pre_processing_image(image_url): diff --git a/app/service/generate_batch_image/service_batch_generate_relight_image.py b/app/service/generate_batch_image/service_batch_generate_relight_image.py index fa53f26..3ff0978 100644 --- a/app/service/generate_batch_image/service_batch_generate_relight_image.py +++ b/app/service/generate_batch_image/service_batch_generate_relight_image.py @@ -127,12 +127,12 @@ def batch_generate_relight(batch_request_data): if DEBUG is False: if i + 1 < batch_size: publish_status(tasks_id, f"{i + 1}/{batch_size}", image_url) - logger.info(f" [x] {tasks_id}:tasks_id *** progress:{i + 1}/{batch_size} *** image_url:{image_url}") - print(f" [x] {tasks_id}:tasks_id *** progress:{i + 1}/{batch_size} *** image_url:{image_url}") + logger.info(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | image_url:{image_url}") + # print(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | image_url:{image_url}") else: publish_status(tasks_id, f"OK", image_url_list) - logger.info(f" [x] {tasks_id}:tasks_id *** progress:OK *** image_url:{image_url_list}") - print(f" [x] {tasks_id}:tasks_id *** progress:OK *** image_url:{image_url_list}") + logger.info(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:OK | image_url:{image_url}") + # print(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:OK | image_url:{image_url}") def publish_status(task_id, progress, result): diff --git a/app/service/generate_batch_image/service_batch_pose_transform.py b/app/service/generate_batch_image/service_batch_pose_transform.py index 3507a43..29f127c 100644 --- a/app/service/generate_batch_image/service_batch_pose_transform.py +++ b/app/service/generate_batch_image/service_batch_pose_transform.py @@ -29,7 +29,7 @@ from app.service.utils.oss_client import oss_get_image minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) logger = logging.getLogger() -celery_app = Celery('tasks', broker=f'amqp://rabbit:123456@18.167.251.121:5672//', backend='rpc://', BROKER_CONNECTION_RETRY_ON_STARTUP=True) +celery_app = Celery('post_transform_tasks', broker=f'amqp://rabbit:123456@18.167.251.121:5672//', backend='rpc://', BROKER_CONNECTION_RETRY_ON_STARTUP=True) celery_app.conf.task_default_queue = 'queue_post_transform' celery_app.conf.worker_log_format = '%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s' celery_app.conf.worker_hijack_root_logger = False @@ -144,21 +144,21 @@ def batch_generate_pose_transform(batch_request_data): if DEBUG is False: if i + 1 < batch_size: publish_status(tasks_id, f"{i + 1}/{batch_size}", data) - logger.info(f" [x] {tasks_id}:tasks_id *** progress:{i + 1}/{batch_size} *** image_url:{data}") - print(f" [x] {tasks_id}:tasks_id *** progress:{i + 1}/{batch_size} *** image_url:{data}") + logger.info(f" [x]Queue : {BATCH_PS_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | image_url:{image_url}") + # print(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | image_url:{image_url}") else: publish_status(tasks_id, f"OK", result_url_list) - logger.info(f" [x] {tasks_id}:tasks_id *** progress:OK *** image_url:{result_url_list}") - print(f" [x] {tasks_id}:tasks_id *** progress:OK *** image_url:{result_url_list}") + logger.info(f" [x]Queue : {BATCH_PS_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:OK | image_url:{image_url}") + # print(f" [x]Queue : {BATCH_PS_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:OK | image_url:{image_url}") def publish_status(task_id, progress, result): connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) channel = connection.channel() - channel.queue_declare(queue=BATCH_GRI_RABBITMQ_QUEUES, durable=True) + channel.queue_declare(queue=BATCH_PS_RABBITMQ_QUEUES, durable=True) message = {'task_id': task_id, 'progress': progress, "result": result} channel.basic_publish(exchange='', - routing_key=BATCH_GRI_RABBITMQ_QUEUES, + routing_key=BATCH_PS_RABBITMQ_QUEUES, body=json.dumps(message), properties=pika.BasicProperties( delivery_mode=2, diff --git a/app/service/generate_image/utils/pose_transform_upload.py b/app/service/generate_image/utils/pose_transform_upload.py index b6d632d..4ecb35d 100644 --- a/app/service/generate_image/utils/pose_transform_upload.py +++ b/app/service/generate_image/utils/pose_transform_upload.py @@ -1,7 +1,7 @@ import io import logging +import os.path -import cv2 import numpy as np import skvideo.io # import boto3 @@ -66,7 +66,7 @@ def upload_video(frames, user_id, category, file_name): def ndarray_to_video(images, output_path, frame_size=(512, 768), fps=9): # 初始化视频写入器 writer = skvideo.io.FFmpegWriter( - output_path, + os.path.join(POSE_TRANSFORM_VIDEO_PATH,output_path), inputdict={'-r': str(fps)}, outputdict={'-r': str(fps), '-vcodec': 'libx264'} ) From bb3ef39e104c869604633919c525025ce8fade14 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Tue, 22 Apr 2025 14:09:28 +0800 Subject: [PATCH 056/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20batch=20generate=20=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=8E=A5=E5=8F=A3=E8=AF=B4=E6=98=8E=EF=BC=8Cdesign=20batch?= =?UTF-8?q?=E9=98=9F=E5=88=97=E4=BF=AE=E6=94=B9=20fix=EF=BC=88=E4=BF=AE?= =?UTF-8?q?=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../service_batch_generate_product_image.py | 2 +- .../service_batch_generate_relight_image.py | 2 +- .../generate_batch_image/service_batch_pose_transform.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/app/service/generate_batch_image/service_batch_generate_product_image.py b/app/service/generate_batch_image/service_batch_generate_product_image.py index ee2a424..f09fbd5 100644 --- a/app/service/generate_batch_image/service_batch_generate_product_image.py +++ b/app/service/generate_batch_image/service_batch_generate_product_image.py @@ -35,7 +35,7 @@ category = "product_image" @celery_app.task def batch_generate_product(batch_request_data): - logger.info(f"batch_generate_product batch_request_data:{batch_request_data}") + logger.info(f"batch_generate_product batch_request_data:{json.dumps(batch_request_data, indent=4)}") tasks_id = batch_request_data['tasks_id'] user_id = tasks_id.rsplit('-', 1)[1] batch_size = batch_request_data['batch_size'] diff --git a/app/service/generate_batch_image/service_batch_generate_relight_image.py b/app/service/generate_batch_image/service_batch_generate_relight_image.py index 3ff0978..83a5701 100644 --- a/app/service/generate_batch_image/service_batch_generate_relight_image.py +++ b/app/service/generate_batch_image/service_batch_generate_relight_image.py @@ -34,7 +34,7 @@ category = "relight_image" @celery_app.task def batch_generate_relight(batch_request_data): - logger.info(f"batch_generate_relight batch_request_data: {batch_request_data}") + logger.info(f"batch_generate_relight batch_request_data: {json.dumps(batch_request_data, indent=4)}") negative_prompt = 'lowres, bad anatomy, bad hands, cropped, worst quality' direction = batch_request_data['direction'] seed = "1" diff --git a/app/service/generate_batch_image/service_batch_pose_transform.py b/app/service/generate_batch_image/service_batch_pose_transform.py index 29f127c..0114ee5 100644 --- a/app/service/generate_batch_image/service_batch_pose_transform.py +++ b/app/service/generate_batch_image/service_batch_pose_transform.py @@ -97,7 +97,7 @@ def pre_processing_image(image_url): @celery_app.task def batch_generate_pose_transform(batch_request_data): - logger.info(f"batch_generate_pose_transform batch_request_data: {batch_request_data}") + logger.info(f"batch_generate_pose_transform batch_request_data: {json.dumps(batch_request_data, indent=4)}") batch_size = batch_request_data['batch_size'] image_url = batch_request_data['image_url'] image = pre_processing_image(image_url) From 66fa349e9986bf40cee9c7360bfed3c0b05e9a1b Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Tue, 22 Apr 2025 14:09:40 +0800 Subject: [PATCH 057/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20batch=20generate=20=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=8E=A5=E5=8F=A3=E8=AF=B4=E6=98=8E=EF=BC=8Cdesign=20batch?= =?UTF-8?q?=E9=98=9F=E5=88=97=E4=BF=AE=E6=94=B9=20fix=EF=BC=88=E4=BF=AE?= =?UTF-8?q?=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/generate_batch_image/tasks.py | 28 ++++++++++++++++++ app/service/generate_batch_image/test.py | 36 +++++++++++++++++++++++ 2 files changed, 64 insertions(+) create mode 100644 app/service/generate_batch_image/tasks.py create mode 100644 app/service/generate_batch_image/test.py diff --git a/app/service/generate_batch_image/tasks.py b/app/service/generate_batch_image/tasks.py new file mode 100644 index 0000000..a79d402 --- /dev/null +++ b/app/service/generate_batch_image/tasks.py @@ -0,0 +1,28 @@ +# import logging +# +# from celery import Celery +# +# from app.service.generate_batch_image.service_batch_generate_product_image import batch_generate_product +# from app.service.generate_batch_image.service_batch_generate_relight_image import batch_generate_relight +# from app.service.generate_batch_image.service_batch_pose_transform import batch_generate_pose_transform +# +# logger = logging.getLogger() +# celery_app = Celery('tasks', broker=f'amqp://rabbit:123456@18.167.251.121:5672//', backend='rpc://', BROKER_CONNECTION_RETRY_ON_STARTUP=True) +# celery_app.conf.worker_log_format = '%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s' +# celery_app.conf.worker_hijack_root_logger = False +# logging.getLogger('pika').setLevel(logging.WARNING) +# +# +# @celery_app.task +# def batch_pose_transform_tasks(batch_request_data): +# batch_generate_pose_transform(batch_request_data) +# +# +# @celery_app.task +# def batch_generate_relight_tasks(batch_request_data): +# batch_generate_relight(batch_request_data) +# +# +# @celery_app.task +# def batch_generate_product_tasks(batch_request_data): +# batch_generate_product(batch_request_data) \ No newline at end of file diff --git a/app/service/generate_batch_image/test.py b/app/service/generate_batch_image/test.py new file mode 100644 index 0000000..ece4b39 --- /dev/null +++ b/app/service/generate_batch_image/test.py @@ -0,0 +1,36 @@ +from app.schemas.generate_image import BatchGenerateRelightImageModel, BatchGenerateProductImageModel +from app.service.generate_batch_image.service_batch_generate_product_image import batch_generate_product + +from app.service.generate_batch_image.service_batch_generate_relight_image import batch_generate_relight + +if __name__ == '__main__': + rd = BatchGenerateProductImageModel( + tasks_id="test1-89", + image_strength=0.7, + prompt=" The best quality, masterpiece, real image.Outwear,high quality clothing details,8K realistic,HDR", + image_url="aida-results/result_40b1a2fe-e220-11ef-9bfa-0242ac150003.png", + product_type="single", + batch_size=2 + ) + x = batch_generate_product.delay(rd.dict()) + print(x) + + """relight""" + # rd = BatchGenerateRelightImageModel( + # tasks_id="123-89", + # # prompt="beautiful woman, detailed face, sunshine, outdoor, warm atmosphere", + # prompt="Colorful black", + # image_url='aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png', + # direction="Right Light", + # product_type="single", + # batch_size=2 + # ) + # batch_generate_relight.delay(rd.dict()) + """pose transform""" + # rd = BatchPoseTransformModel( + # tasks_id="123-89", + # image_url='aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png', + # pose_id="1", + # batch_size=10 + # ) + # batch_pose_transform_tasks.delay(rd.dict()) From ac9c9b016d0cfe0ed7cbf4ba46cb8ceb92c286b3 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Tue, 22 Apr 2025 14:27:20 +0800 Subject: [PATCH 058/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20=20pose=20transform=20=E6=A8=A1=E5=9D=97=20uplo?= =?UTF-8?q?ad=20video=20=E4=BF=9D=E5=AD=98=E5=9C=B0=E5=9D=80=E4=BF=AE?= =?UTF-8?q?=E6=94=B9=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4?= =?UTF-8?q?=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20te?= =?UTF-8?q?st(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../generate_image/utils/pose_transform_upload.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/app/service/generate_image/utils/pose_transform_upload.py b/app/service/generate_image/utils/pose_transform_upload.py index 4ecb35d..71d31ef 100644 --- a/app/service/generate_image/utils/pose_transform_upload.py +++ b/app/service/generate_image/utils/pose_transform_upload.py @@ -49,24 +49,24 @@ def upload_gif(gif_buffer, user_id, category, file_name): def upload_video(frames, user_id, category, file_name): try: - ndarray_to_video(frames, file_name) + save_path = ndarray_to_video(frames, file_name) object_name = f'{user_id}/{category}/{file_name}' minio_client.fput_object( "aida-users", object_name, - file_name, + save_path, content_type="video/mp4" # 指定MIME类型确保可在线播放[9](@ref) ) - print(file_name) return f"aida-users/{object_name}" except Exception as e: logging.warning(f"upload_video runtime exception : {e}") def ndarray_to_video(images, output_path, frame_size=(512, 768), fps=9): + save_path = os.path.join(POSE_TRANSFORM_VIDEO_PATH, output_path) # 初始化视频写入器 writer = skvideo.io.FFmpegWriter( - os.path.join(POSE_TRANSFORM_VIDEO_PATH,output_path), + save_path, inputdict={'-r': str(fps)}, outputdict={'-r': str(fps), '-vcodec': 'libx264'} ) @@ -80,6 +80,7 @@ def ndarray_to_video(images, output_path, frame_size=(512, 768), fps=9): # 关闭写入器 writer.close() + return save_path if __name__ == '__main__': From 4dbb259b8b31dc0b9e6f3c9d84ae8e5046f2f3cc Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Tue, 22 Apr 2025 15:58:07 +0800 Subject: [PATCH 059/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20=20=E6=89=8B=E5=8A=A8=E6=96=AD=E5=BC=80mq?= =?UTF-8?q?=E8=BF=9E=E6=8E=A5=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98?= =?UTF-8?q?=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84?= =?UTF-8?q?=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../generate_image/service_generate_product_image.py | 1 + .../generate_image/service_generate_relight_image.py | 1 + app/service/generate_image/service_pose_transform.py | 8 +++++--- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/app/service/generate_image/service_generate_product_image.py b/app/service/generate_image/service_generate_product_image.py index d0fbe74..5d67eef 100644 --- a/app/service/generate_image/service_generate_product_image.py +++ b/app/service/generate_image/service_generate_product_image.py @@ -320,6 +320,7 @@ class GenerateProductImage: dict_gen_product_data, str_gen_product_data = self.read_tasks_status() if DEBUG is False: self.channel.basic_publish(exchange='', routing_key=GPI_RABBITMQ_QUEUES, body=str_gen_product_data) + self.connection.close() logger.info(f" [x] Sent to: {GPI_RABBITMQ_QUEUES} data:@@@@ {json.dumps(dict_gen_product_data, indent=4)}") diff --git a/app/service/generate_image/service_generate_relight_image.py b/app/service/generate_image/service_generate_relight_image.py index 668e7fd..06a8954 100644 --- a/app/service/generate_image/service_generate_relight_image.py +++ b/app/service/generate_image/service_generate_relight_image.py @@ -139,6 +139,7 @@ class GenerateRelightImage: dict_gen_product_data, str_gen_product_data = self.read_tasks_status() if DEBUG is False: self.channel.basic_publish(exchange='', routing_key=GRI_RABBITMQ_QUEUES, body=str_gen_product_data) + self.connection.close() logger.info(f" [x] Sent to: {GRI_RABBITMQ_QUEUES} data:@@@@ {json.dumps(dict_gen_product_data, indent=4)}") diff --git a/app/service/generate_image/service_pose_transform.py b/app/service/generate_image/service_pose_transform.py index 3fc65c6..07da8de 100644 --- a/app/service/generate_image/service_pose_transform.py +++ b/app/service/generate_image/service_pose_transform.py @@ -29,6 +29,9 @@ logger = logging.getLogger() class PoseTransformService: def __init__(self, request_data): + if DEBUG is False: + self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) + self.channel = self.connection.channel() self.grpc_client = grpcclient.InferenceServerClient(url=PT_MODEL_URL) self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) self.category = "pose_transform" @@ -108,9 +111,8 @@ class PoseTransformService: finally: dict_pose_transform_data, str_pose_transform_data = self.read_tasks_status() if DEBUG is False: - connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) - channel = connection.channel() - channel.basic_publish(exchange='', routing_key=PS_RABBITMQ_QUEUES, body=str_pose_transform_data) + self.channel.basic_publish(exchange='', routing_key=PS_RABBITMQ_QUEUES, body=str_pose_transform_data) + self.connection.close() logger.info(f" [x] Sent to: {PS_RABBITMQ_QUEUES} data:@@@@ {json.dumps(dict_pose_transform_data, indent=4)}") From f68b5a9f04ea091a132731cc3c10357a2b2c0b30 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Tue, 22 Apr 2025 16:01:54 +0800 Subject: [PATCH 060/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20=20=E6=89=8B=E5=8A=A8=E6=96=AD=E5=BC=80mq?= =?UTF-8?q?=E8=BF=9E=E6=8E=A5=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98?= =?UTF-8?q?=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84?= =?UTF-8?q?=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/generate_image/service_generate_image.py | 1 + app/service/generate_image/service_generate_multi_view.py | 1 + app/service/generate_image/service_generate_single_logo.py | 1 + 3 files changed, 3 insertions(+) diff --git a/app/service/generate_image/service_generate_image.py b/app/service/generate_image/service_generate_image.py index 4ed8fd4..cfd5fd5 100644 --- a/app/service/generate_image/service_generate_image.py +++ b/app/service/generate_image/service_generate_image.py @@ -180,6 +180,7 @@ class GenerateImage: dict_generate_data, str_generate_data = self.read_tasks_status() if DEBUG is False: self.channel.basic_publish(exchange='', routing_key=GI_RABBITMQ_QUEUES, body=str_generate_data) + self.connection.close() # self.channel.basic_publish(exchange='', routing_key=GI_RABBITMQ_QUEUES, body=str_generate_data) logger.info(f" [x] Sent {json.dumps(dict_generate_data, indent=4)}") diff --git a/app/service/generate_image/service_generate_multi_view.py b/app/service/generate_image/service_generate_multi_view.py index c930ab2..248e604 100644 --- a/app/service/generate_image/service_generate_multi_view.py +++ b/app/service/generate_image/service_generate_multi_view.py @@ -105,6 +105,7 @@ class GenerateMultiView: dict_generate_data, str_generate_data = self.read_tasks_status() if DEBUG is False: self.channel.basic_publish(exchange='', routing_key=GMV_RABBITMQ_QUEUES, body=str_generate_data) + self.connection.close() # self.channel.basic_publish(exchange='', routing_key=GI_RABBITMQ_QUEUES, body=str_generate_data) logger.info(f" [x] Sent {json.dumps(dict_generate_data, indent=4)}") diff --git a/app/service/generate_image/service_generate_single_logo.py b/app/service/generate_image/service_generate_single_logo.py index e3def3e..af182b2 100644 --- a/app/service/generate_image/service_generate_single_logo.py +++ b/app/service/generate_image/service_generate_single_logo.py @@ -98,6 +98,7 @@ class GenerateSingleLogoImage: dict_generate_data, str_generate_data = self.read_tasks_status() if DEBUG is False: self.channel.basic_publish(exchange='', routing_key=GI_RABBITMQ_QUEUES, body=str_generate_data) + self.connection.close() logger.info(f" [x] Sent {json.dumps(dict_generate_data, indent=4)}") From b8fe29e73559448704d449f21880823e7f30ef4a Mon Sep 17 00:00:00 2001 From: xupei Date: Wed, 23 Apr 2025 11:51:53 +0800 Subject: [PATCH 061/101] =?UTF-8?q?chat-robot=20=E5=8F=96=E6=B6=88?= =?UTF-8?q?=E6=80=A7=E5=88=AB=E4=BC=A0=E5=85=A5=EF=BC=8C=E4=BB=8E=E7=94=A8?= =?UTF-8?q?=E6=88=B7=E8=BE=93=E5=85=A5=E4=B8=AD=E6=8F=90=E5=8F=96=E6=80=A7?= =?UTF-8?q?=E5=88=AB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/schemas/chat_robot.py | 1 - app/service/chat_robot/script/main.py | 3 +- app/service/chat_robot/script/prompt.py | 33 +++++++++++++++++++ .../chat_robot/script/service/CallQWen.py | 11 ++++--- 4 files changed, 41 insertions(+), 7 deletions(-) diff --git a/app/schemas/chat_robot.py b/app/schemas/chat_robot.py index cebf74a..01feeae 100644 --- a/app/schemas/chat_robot.py +++ b/app/schemas/chat_robot.py @@ -2,7 +2,6 @@ from pydantic import BaseModel class ChatRobotModel(BaseModel): - gender: str message: str session_id: str user_id: int diff --git a/app/service/chat_robot/script/main.py b/app/service/chat_robot/script/main.py index 3342a5c..3890a0e 100644 --- a/app/service/chat_robot/script/main.py +++ b/app/service/chat_robot/script/main.py @@ -92,7 +92,6 @@ def chat(post_data): user_id = post_data.user_id session_id = post_data.session_id input_message = post_data.message - gender = post_data.gender # final_outputs = agent_executor( # {"input": input_message, "gender": gender}, @@ -100,7 +99,7 @@ def chat(post_data): # session_key=f"buffer:{user_id}:{session_id}", # ) - final_outputs = CallQWen.call_with_messages(input_message, gender) + final_outputs = CallQWen.call_with_messages(input_message) # api_response = { # 'user_id': user_id, # 'session_id': session_id, diff --git a/app/service/chat_robot/script/prompt.py b/app/service/chat_robot/script/prompt.py index 121e57d..3cd2fca 100644 --- a/app/service/chat_robot/script/prompt.py +++ b/app/service/chat_robot/script/prompt.py @@ -34,6 +34,39 @@ You may encounter the following types of questions: Be careful to use the tools, since you are actually a chat bot. Tools can only be used when essential. """ +FASHION_CHAT_BOT_PREFIX_TEMP = """ +You are a fashion design assistant with the following capabilities: +1. Direct conversation: Answer general questions (e.g., greetings, opinions). +2. Tool usage: + - `get_image_from_vector_db`: Retrieve clothing items (requires gender parameter). + - `internet_search`: Fetch real-time fashion trends. + - `tutorial_tool`: Provide styling guides. + +Key Rules: +1. Tool Selection: + - Use `get_image_from_vector_db` for clothing queries (e.g., "show men's jackets"). + - Use `internet_search` for time-sensitive queries (e.g., "2024 Paris Fashion Week trends"). + - Use `tutorial_tool` for educational requests (e.g., "how to layer outfits"). + +2. Gender Handling (for `get_image_from_vector_db` only): + - Step 1: Check the **current user input** for gender keywords (e.g., "women/men/she/he"). If found, extract and pass as `gender`. + - Step 2: If no gender in current input, scan the **chat history** for the most recent gender reference. + - Step 3: If undetermined, default to `"unisex"`. + +3. Output Format: + - Direct replies: Keep responses under 20 words. + - Tool calls: + - Always include required parameters (e.g., `gender` for `get_image_from_vector_db`). + - Auto-fill `gender` using the above rules if unspecified. + +Examples: +1. User: "Find red dresses for women" + → `get_image_from_vector_db(gender="female", query="dress")` +2. User: "show men's jackets" + → `get_image_from_vector_db(gender="male", query="outwear")` +3. User: "Show casual outfits" + → `get_image_from_vector_db(gender="unisex", query="casual outfits")`""" + TOOL_SELECT_SUFFIX = """ Prior to proceeding, it is essential to carefully assess the question and select the appropriate tools or approach accordingly. For database-related questions, use SQL tools to identify relevant tables and query their schemas. diff --git a/app/service/chat_robot/script/service/CallQWen.py b/app/service/chat_robot/script/service/CallQWen.py index d2f28a0..5ab74ba 100644 --- a/app/service/chat_robot/script/service/CallQWen.py +++ b/app/service/chat_robot/script/service/CallQWen.py @@ -9,7 +9,7 @@ from app.core.config import * from app.service.chat_robot.script.callbacks.qwen_callback_handler import QWenCallbackHandler from app.service.chat_robot.script.database import CustomDatabase from app.service.chat_robot.script.prompt import FASHION_CHAT_BOT_PREFIX, TOOLS_FUNCTIONS_SUFFIX, TUTORIAL_TOOL_RETURN, \ - GET_LANGUAGE_PREFIX + GET_LANGUAGE_PREFIX, FASHION_CHAT_BOT_PREFIX_TEMP from app.service.search_image_with_text.service import query get_database_table_description = "Input is an empty string, output is a comma separated list of tables in the database." @@ -212,14 +212,15 @@ def get_assistant_response(messages): return response -def call_with_messages(message, gender): +def call_with_messages(message): global tool_info user_input = message print('\n') messages = [ { - "content": FASHION_CHAT_BOT_PREFIX, # 系统message + # "content": FASHION_CHAT_BOT_PREFIX, # 系统message + "content": FASHION_CHAT_BOT_PREFIX_TEMP, # 修改后的系统message "role": "system" }, { @@ -255,7 +256,7 @@ def call_with_messages(message, gender): tool_info = {"name": "search_from_internet", "role": "tool"} content = json.loads(assistant_output.tool_calls[0]['function']['arguments']) message = [ - {'role': 'assistant', 'content': content['query']} + {'role': 'assistant', 'content': content['query'] if "query" in content.keys() else user_input} ] tool_info['content'] = search_from_internet(message) flag = False @@ -282,6 +283,8 @@ def call_with_messages(message, gender): result_content = tool_info['content'] elif assistant_output.tool_calls[0]['function']['name'] == 'get_image_from_vector_db': content = json.loads(assistant_output.tool_calls[0]['function']['arguments']) + # todo 从历史对话中获取性别,目前无法获得性别时,默认使用female + gender = content['gender'] if "gender" in content.keys() and content['gender'] != 'unisex' else 'female' tool_info = {"name": "get_image_from_vector_db", "role": "tool", 'content': get_image_from_vector_db(gender, content['parameters']['content'] if "parameters" in content.keys() else content['content'])} flag = False From a14e6051b183874fe62f84325f190027c43eedec Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Wed, 23 Apr 2025 14:40:11 +0800 Subject: [PATCH 062/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20clothing=20seg=20=E5=A2=9E=E5=8A=A0=E7=B1=BB?= =?UTF-8?q?=E5=9E=8B=E4=B8=BAsketch=E7=9A=84=E9=A2=84=E5=A4=84=E7=90=86=20?= =?UTF-8?q?docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20r?= =?UTF-8?q?efactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E?= =?UTF-8?q?=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/clothing_seg/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/service/clothing_seg/service.py b/app/service/clothing_seg/service.py index 7894bff..46cc444 100644 --- a/app/service/clothing_seg/service.py +++ b/app/service/clothing_seg/service.py @@ -62,7 +62,7 @@ class ClothingSeg: image = data["image"] clothing_result = [] if image_type == "sketch": - seg_mask = get_seg_result(1, image) + seg_mask = get_seg_result(1, image[:, :, :3]) temp = seg_mask != 0.0 mask = (255 * (temp + 0).astype(np.uint8)) x_min, y_min, x_max, y_max = get_bounding_box(mask) From 293f90f9d387cf382f8c6b3def369c8c938513f4 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Thu, 8 May 2025 17:46:28 +0800 Subject: [PATCH 063/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20pose=20transform=20mq=E8=BF=9E=E6=8E=A5?= =?UTF-8?q?=E8=B6=85=E6=97=B6bug=E4=BF=AE=E5=A4=8D=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../generate_image/service_pose_transform.py | 44 +++++++++++++------ 1 file changed, 31 insertions(+), 13 deletions(-) diff --git a/app/service/generate_image/service_pose_transform.py b/app/service/generate_image/service_pose_transform.py index 07da8de..2bd81ac 100644 --- a/app/service/generate_image/service_pose_transform.py +++ b/app/service/generate_image/service_pose_transform.py @@ -29,9 +29,6 @@ logger = logging.getLogger() class PoseTransformService: def __init__(self, request_data): - if DEBUG is False: - self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) - self.channel = self.connection.channel() self.grpc_client = grpcclient.InferenceServerClient(url=PT_MODEL_URL) self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) self.category = "pose_transform" @@ -40,7 +37,8 @@ class PoseTransformService: self.image = pre_processing_image(request_data.image_url) self.tasks_id = request_data.tasks_id self.user_id = self.tasks_id[self.tasks_id.rfind('-') + 1:] - self.pose_transform_data = {'tasks_id': self.tasks_id, 'status': 'PENDING', 'message': "pending", 'gif_url': '', 'video_url': '', 'image_url': ''} + self.pose_transform_data = {'tasks_id': self.tasks_id, 'status': 'PENDING', 'message': "pending", 'gif_url': '', + 'video_url': '', 'image_url': ''} self.redis_client.set(self.tasks_id, json.dumps(self.pose_transform_data)) self.redis_client.expire(self.tasks_id, 600) @@ -55,16 +53,20 @@ class PoseTransformService: # 第一帧图像 first_image = Image.fromarray(result_data[0]) - first_image_url = upload_first_image(first_image, user_id=self.user_id, category=f"{self.category}_first_img", file_name=f"{self.tasks_id}.png") + first_image_url = upload_first_image(first_image, user_id=self.user_id, + category=f"{self.category}_first_img", + file_name=f"{self.tasks_id}.png") # 上传GIF gif_buffer = BytesIO() imageio.mimsave(gif_buffer, result_data, format='GIF', fps=5) gif_buffer.seek(0) - gif_url = upload_gif(gif_buffer=gif_buffer, user_id=self.user_id, category=f"{self.category}_gif", file_name=f"{self.tasks_id}.gif") + gif_url = upload_gif(gif_buffer=gif_buffer, user_id=self.user_id, category=f"{self.category}_gif", + file_name=f"{self.tasks_id}.gif") # 上传video - video_url = upload_video(frames=result_data, user_id=self.user_id, category=f"{self.category}_video", file_name=f"{self.tasks_id}.mp4") + video_url = upload_video(frames=result_data, user_id=self.user_id, category=f"{self.category}_video", + file_name=f"{self.tasks_id}.mp4") self.pose_transform_data['status'] = "SUCCESS" self.pose_transform_data['message'] = "success" @@ -82,7 +84,8 @@ class PoseTransformService: try: pose_num = [self.pose_num] * 1 pose_num_obj = np.array(pose_num, dtype="object").reshape((-1, 1)) - input_pose_num = grpcclient.InferInput("pose_num", pose_num_obj.shape, np_to_triton_dtype(pose_num_obj.dtype)) + input_pose_num = grpcclient.InferInput("pose_num", pose_num_obj.shape, + np_to_triton_dtype(pose_num_obj.dtype)) input_pose_num.set_data_from_numpy(pose_num_obj) image_files = [self.image.astype(np.uint8)] * 1 @@ -90,7 +93,8 @@ class PoseTransformService: input_image_files = grpcclient.InferInput("image_file", image_files_obj.shape, "UINT8") input_image_files.set_data_from_numpy(image_files_obj) - ctx = self.grpc_client.async_infer(model_name="animatex_1", inputs=[input_pose_num, input_image_files], callback=self.callback, client_timeout=60000) + ctx = self.grpc_client.async_infer(model_name="animatex_1", inputs=[input_pose_num, input_image_files], + callback=self.callback, client_timeout=60000) time_out = 60000 while time_out > 0: pose_transform_data, _ = self.read_tasks_status() @@ -111,9 +115,22 @@ class PoseTransformService: finally: dict_pose_transform_data, str_pose_transform_data = self.read_tasks_status() if DEBUG is False: - self.channel.basic_publish(exchange='', routing_key=PS_RABBITMQ_QUEUES, body=str_pose_transform_data) - self.connection.close() - logger.info(f" [x] Sent to: {PS_RABBITMQ_QUEUES} data:@@@@ {json.dumps(dict_pose_transform_data, indent=4)}") + publish_status(str_pose_transform_data) + logger.info( + f" [x] Sent to: {PS_RABBITMQ_QUEUES} data:@@@@ {json.dumps(dict_pose_transform_data, indent=4)}") + + +def publish_status(message): + connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) + channel = connection.channel() + channel.queue_declare(queue=PS_RABBITMQ_QUEUES, durable=True) + channel.basic_publish(exchange='', + routing_key=PS_RABBITMQ_QUEUES, + body=json.dumps(message), + properties=pika.BasicProperties( + delivery_mode=2, + )) + connection.close() def infer_cancel(tasks_id): @@ -125,7 +142,8 @@ def infer_cancel(tasks_id): def pre_processing_image(image_url): - image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL") + image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], + data_type="PIL") # 目标图片的尺寸 target_width = 512 target_height = 768 From 3095d2654e8747785ceaa0b5bb1091d29b286c0c Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Thu, 8 May 2025 17:59:14 +0800 Subject: [PATCH 064/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20pose=20transform=20mq=E8=BF=9E=E6=8E=A5?= =?UTF-8?q?=E8=B6=85=E6=97=B6bug=E4=BF=AE=E5=A4=8D=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/generate_image/service_pose_transform.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/service/generate_image/service_pose_transform.py b/app/service/generate_image/service_pose_transform.py index 2bd81ac..78ca227 100644 --- a/app/service/generate_image/service_pose_transform.py +++ b/app/service/generate_image/service_pose_transform.py @@ -104,7 +104,7 @@ class PoseTransformService: elif pose_transform_data['status'] == "SUCCESS": break time_out -= 1 - time.sleep(0.1) + time.sleep(1) pose_transform_data, _ = self.read_tasks_status() return pose_transform_data except Exception as e: From 6cb32d11a8c4a1f333bad099486b9e119cf834f9 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Thu, 15 May 2025 14:49:33 +0800 Subject: [PATCH 065/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20=E9=A1=B9=E7=9B=AE=E4=BF=A1=E6=81=AF?= =?UTF-8?q?=E6=8F=90=E5=8F=96/=E7=94=9F=E6=88=90=E6=8E=A5=E5=8F=A3=20fix?= =?UTF-8?q?=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_extraction_project_info.py | 33 +++++++++++++ app/api/api_route.py | 5 +- app/schemas/project_info_extraction.py | 5 ++ .../service_generate_brand_info.py | 47 +++++++++++++++++++ 4 files changed, 88 insertions(+), 2 deletions(-) create mode 100644 app/api/api_extraction_project_info.py create mode 100644 app/schemas/project_info_extraction.py create mode 100644 app/service/project_info_extraction/service_generate_brand_info.py diff --git a/app/api/api_extraction_project_info.py b/app/api/api_extraction_project_info.py new file mode 100644 index 0000000..51eb473 --- /dev/null +++ b/app/api/api_extraction_project_info.py @@ -0,0 +1,33 @@ +import logging + +from fastapi import APIRouter, HTTPException + +from app.schemas.project_info_extraction import ProjectInfoExtractionModel +from app.schemas.response_template import ResponseModel +from app.service.project_info_extraction.service_generate_brand_info import ProjectInfoExtraction + +router = APIRouter() +logger = logging.getLogger() + + +@router.post("/extraction_project_info") +def extraction_project_info(request_data: ProjectInfoExtractionModel): + """ + 通过prompt 提取project_name,role ,gender ,style。 + 创建一个具有以下参数的请求体: + - **prompt**: + + 示例参数: + { + "prompt": "海边派对主题的系列设计" + } + """ + try: + logger.info(f"extraction_project_info request item is : @@@@@@:{request_data}") + service = ProjectInfoExtraction(request_data) + data = service.get_result() + logger.info(f"extraction_project_info response @@@@@@:{data}") + except Exception as e: + logger.warning(f"extraction_project_info Run Exception @@@@@@:{e}") + raise HTTPException(status_code=404, detail=str(e)) + return ResponseModel(data=data) diff --git a/app/api/api_route.py b/app/api/api_route.py index b82c942..d85cbce 100644 --- a/app/api/api_route.py +++ b/app/api/api_route.py @@ -5,16 +5,16 @@ from app.api import api_attribute_retrieve, api_query_image from app.api import api_brand_dna from app.api import api_brighten from app.api import api_chat_robot +from app.api import api_clothing_seg from app.api import api_design from app.api import api_design_pre_processing +from app.api import api_extraction_project_info from app.api import api_generate_image from app.api import api_image2sketch from app.api import api_mannequins_edit from app.api import api_pose_transform from app.api import api_prompt_generation -from app.api import api_clothing_seg from app.api import api_super_resolution -from app.api import api_recommendation from app.api import api_test router = APIRouter() @@ -36,3 +36,4 @@ router.include_router(api_mannequins_edit.router, tags=['api_mannequins_edit'], router.include_router(api_agent_generate_image.router, tags=['api_agent_generate_image'], prefix="/api") router.include_router(api_pose_transform.router, tags=['api_pose_transform'], prefix="/api") router.include_router(api_clothing_seg.router, tags=['api_clothing_seg'], prefix="/api") +router.include_router(api_extraction_project_info.router, tags=['api_extraction_project_info'], prefix="/api") diff --git a/app/schemas/project_info_extraction.py b/app/schemas/project_info_extraction.py new file mode 100644 index 0000000..90def8b --- /dev/null +++ b/app/schemas/project_info_extraction.py @@ -0,0 +1,5 @@ +from pydantic import BaseModel + + +class ProjectInfoExtractionModel(BaseModel): + prompt: str diff --git a/app/service/project_info_extraction/service_generate_brand_info.py b/app/service/project_info_extraction/service_generate_brand_info.py new file mode 100644 index 0000000..8ee7bcd --- /dev/null +++ b/app/service/project_info_extraction/service_generate_brand_info.py @@ -0,0 +1,47 @@ +from langchain.output_parsers import ResponseSchema, StructuredOutputParser +from langchain_community.chat_models import ChatTongyi +from langchain_core.prompts import PromptTemplate + +from app.schemas.project_info_extraction import ProjectInfoExtractionModel + + +class ProjectInfoExtraction: + def __init__(self, request_data): + # llm generate brand info init + self.model = ChatTongyi(model="qwen2.5-14b-instruct", api_key="sk-7658298c6b99443c98184a5e634fe6ab") + + self.response_schemas = [ + ResponseSchema(name="project_name", description="project name."), + ResponseSchema(name="role", description="The target role of the project."), + ResponseSchema(name="gender", description="The gender targeted by the project."), + ResponseSchema(name="style", description="Project style.") + ] + self.output_parser = StructuredOutputParser.from_response_schemas(self.response_schemas) + self.format_instructions = self.output_parser.get_format_instructions() + self.prompt = PromptTemplate( + template="你是一个时装品牌的设计师助理。根据用户输入提取出project_name,role ,gender ,style ." + "gender部分请用以下:menswear,womenswear,childrenwear,如果全部都适用即all." + "如果没有以上内容,需要你根据用户输入随意发挥.\n{format_instructions}\n{question}", + input_variables=["question"], + partial_variables={"format_instructions": self.format_instructions} + ) + self._input = self.prompt.format_prompt(question=request_data.prompt) + + self.result_data = {} + + def get_result(self): + self.llm_extraction_project_info() + return self.result_data + + def llm_extraction_project_info(self): + output = self.model(self._input.to_messages()) + project_info = self.output_parser.parse(output.content) + self.result_data = project_info + + +if __name__ == '__main__': + request_data = ProjectInfoExtractionModel( + prompt="海边派对主题的系列设计" + ) + service = ProjectInfoExtraction(request_data) + print(service.get_result()) From e4141b9e65b0ce95bb7a436694925d5a5fa561d5 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Thu, 15 May 2025 14:59:29 +0800 Subject: [PATCH 066/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20=E9=A1=B9=E7=9B=AE=E4=BF=A1=E6=81=AF?= =?UTF-8?q?=E6=8F=90=E5=8F=96/=E7=94=9F=E6=88=90=E6=8E=A5=E5=8F=A3=20fix?= =?UTF-8?q?=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_route.py | 1 + 1 file changed, 1 insertion(+) diff --git a/app/api/api_route.py b/app/api/api_route.py index d85cbce..9b48c5a 100644 --- a/app/api/api_route.py +++ b/app/api/api_route.py @@ -14,6 +14,7 @@ from app.api import api_image2sketch from app.api import api_mannequins_edit from app.api import api_pose_transform from app.api import api_prompt_generation +from app.api import api_recommendation from app.api import api_super_resolution from app.api import api_test From 3a28a7e4b91b9911afac05da32976a1fed865163 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Thu, 15 May 2025 16:40:58 +0800 Subject: [PATCH 067/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20=E9=A1=B9=E7=9B=AE=E4=BF=A1=E6=81=AF?= =?UTF-8?q?=E6=8F=90=E5=8F=96/=E7=94=9F=E6=88=90=E6=8E=A5=E5=8F=A3=20fix?= =?UTF-8?q?=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_extraction_project_info.py | 2 +- .../project_info_extraction/service.py | 61 +++++++++++++++++++ .../service_generate_brand_info.py | 47 -------------- 3 files changed, 62 insertions(+), 48 deletions(-) create mode 100644 app/service/project_info_extraction/service.py delete mode 100644 app/service/project_info_extraction/service_generate_brand_info.py diff --git a/app/api/api_extraction_project_info.py b/app/api/api_extraction_project_info.py index 51eb473..ad55552 100644 --- a/app/api/api_extraction_project_info.py +++ b/app/api/api_extraction_project_info.py @@ -4,7 +4,7 @@ from fastapi import APIRouter, HTTPException from app.schemas.project_info_extraction import ProjectInfoExtractionModel from app.schemas.response_template import ResponseModel -from app.service.project_info_extraction.service_generate_brand_info import ProjectInfoExtraction +from app.service.project_info_extraction.service import ProjectInfoExtraction router = APIRouter() logger = logging.getLogger() diff --git a/app/service/project_info_extraction/service.py b/app/service/project_info_extraction/service.py new file mode 100644 index 0000000..40d59ba --- /dev/null +++ b/app/service/project_info_extraction/service.py @@ -0,0 +1,61 @@ +from langchain.output_parsers import ResponseSchema, StructuredOutputParser +from langchain_community.chat_models import ChatTongyi +from langchain_core.prompts import PromptTemplate + +from app.schemas.project_info_extraction import ProjectInfoExtractionModel + +style = ['NEW_CHINESE', 'COUNTRY_STYLE', 'FUTURISM', 'MINIMALISM', 'LOLITA', 'Y2K', 'BUSINESS', 'MERLAD', + 'OUTDOOR_FUNCTIONAL', 'ROCK', 'DOPAMINE', 'GOTHIC', 'POST_APOCALYPTIC', 'ROMANTIC', 'WABI_SABI'] +position = ['Overall', 'Tops', 'Bottoms', 'Outwear', 'Blouse', 'Dress', 'Trousers', 'Skirt'] +gender = ['Female', 'Male'] +age_group = ['Adult', 'Child'] +process = ['SERIES_DESIGN', 'SINGLE_DESIGN'] + + +class ProjectInfoExtraction: + def __init__(self, request_data): + # llm generate brand info init + self.model = ChatTongyi(model="qwen2.5-14b-instruct", api_key="sk-7658298c6b99443c98184a5e634fe6ab") + + self.response_schemas = [ + ResponseSchema(name="project_name", description="项目的名称."), + ResponseSchema(name="process", description="项目的类型,单品还是系列."), + ResponseSchema(name="ageGroup", description="项目设计服装的受众对象."), + ResponseSchema(name="gender", description="项目设计服装的受众性别."), + ResponseSchema(name="position", description="项目单品设计的部位."), + ResponseSchema(name="style", description="项目的设计风格.") + ] + self.output_parser = StructuredOutputParser.from_response_schemas(self.response_schemas) + self.format_instructions = self.output_parser.get_format_instructions() + self.prompt = PromptTemplate( + template="你是一个时装品牌的设计师助理。根据用户输入提取出" + "[project_name] : 项目的名称," + f"[process] : 项目的类型,从{process}选择." + f"[ageGroup] : 服装的受众,从{age_group}选择." + f"[gender] : 服装的适用性别,从{gender}选择" + f"[position] : single_design的部位,如果[process]是SINGLE_DESIGN,从{position}中选择,如果[process]是SERIES_DESIGN,这项为空" + f"[style] : 设计的风格,从{style}中选择" + ".\n{format_instructions}\n{question}", + input_variables=["question"], + partial_variables={"format_instructions": self.format_instructions} + ) + self._input = self.prompt.format_prompt(question=request_data.prompt) + + self.result_data = {} + + def get_result(self): + self.llm_extraction_project_info() + return self.result_data + + def llm_extraction_project_info(self): + output = self.model(self._input.to_messages()) + project_info = self.output_parser.parse(output.content) + self.result_data = project_info + + +if __name__ == '__main__': + request_data = ProjectInfoExtractionModel( + prompt="海边派对主题的衬衫设计" + ) + service = ProjectInfoExtraction(request_data) + print(service.get_result()) diff --git a/app/service/project_info_extraction/service_generate_brand_info.py b/app/service/project_info_extraction/service_generate_brand_info.py deleted file mode 100644 index 8ee7bcd..0000000 --- a/app/service/project_info_extraction/service_generate_brand_info.py +++ /dev/null @@ -1,47 +0,0 @@ -from langchain.output_parsers import ResponseSchema, StructuredOutputParser -from langchain_community.chat_models import ChatTongyi -from langchain_core.prompts import PromptTemplate - -from app.schemas.project_info_extraction import ProjectInfoExtractionModel - - -class ProjectInfoExtraction: - def __init__(self, request_data): - # llm generate brand info init - self.model = ChatTongyi(model="qwen2.5-14b-instruct", api_key="sk-7658298c6b99443c98184a5e634fe6ab") - - self.response_schemas = [ - ResponseSchema(name="project_name", description="project name."), - ResponseSchema(name="role", description="The target role of the project."), - ResponseSchema(name="gender", description="The gender targeted by the project."), - ResponseSchema(name="style", description="Project style.") - ] - self.output_parser = StructuredOutputParser.from_response_schemas(self.response_schemas) - self.format_instructions = self.output_parser.get_format_instructions() - self.prompt = PromptTemplate( - template="你是一个时装品牌的设计师助理。根据用户输入提取出project_name,role ,gender ,style ." - "gender部分请用以下:menswear,womenswear,childrenwear,如果全部都适用即all." - "如果没有以上内容,需要你根据用户输入随意发挥.\n{format_instructions}\n{question}", - input_variables=["question"], - partial_variables={"format_instructions": self.format_instructions} - ) - self._input = self.prompt.format_prompt(question=request_data.prompt) - - self.result_data = {} - - def get_result(self): - self.llm_extraction_project_info() - return self.result_data - - def llm_extraction_project_info(self): - output = self.model(self._input.to_messages()) - project_info = self.output_parser.parse(output.content) - self.result_data = project_info - - -if __name__ == '__main__': - request_data = ProjectInfoExtractionModel( - prompt="海边派对主题的系列设计" - ) - service = ProjectInfoExtraction(request_data) - print(service.get_result()) From fbe939ee22f64108f4921f357d98d92761bf68ab Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Mon, 19 May 2025 13:10:51 +0800 Subject: [PATCH 068/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20=E6=96=B0=E5=A2=9E=E6=96=87=E4=BB=B6?= =?UTF-8?q?=E4=B8=8A=E4=BC=A0=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89?= =?UTF-8?q?:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:?= =?UTF-8?q?=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(?= =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_extraction_project_info.py | 8 +++++++- app/schemas/project_info_extraction.py | 2 ++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/app/api/api_extraction_project_info.py b/app/api/api_extraction_project_info.py index ad55552..798126a 100644 --- a/app/api/api_extraction_project_info.py +++ b/app/api/api_extraction_project_info.py @@ -19,7 +19,13 @@ def extraction_project_info(request_data: ProjectInfoExtractionModel): 示例参数: { - "prompt": "海边派对主题的系列设计" + "prompt": "海边派对主题的系列设计", + "image_list": [ + "https://www.minio-api.aida.com.hk/test/test123.jpg?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=vXKFLSJkYeEq2DrSZvkB%2F20250519%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250519T050808Z&X-Amz-Expires=7200&X-Amz-SignedHeaders=host&X-Amz-Signature=296ff07cc4692d0a26ddffac582064f036494af343389fe60193dc2c5dc883ff" + ], + "file_list": [ + "" + ] } """ try: diff --git a/app/schemas/project_info_extraction.py b/app/schemas/project_info_extraction.py index 90def8b..6f579dd 100644 --- a/app/schemas/project_info_extraction.py +++ b/app/schemas/project_info_extraction.py @@ -3,3 +3,5 @@ from pydantic import BaseModel class ProjectInfoExtractionModel(BaseModel): prompt: str + image_list: list + file_list: list From f234ae29ffb5beead273ccc6205c5a1bfcd802db Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Mon, 2 Jun 2025 10:01:16 +0800 Subject: [PATCH 069/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20=20minio=E9=85=8D=E7=BD=AE=E6=9B=B4=E6=96=B0=20?= =?UTF-8?q?docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20r?= =?UTF-8?q?efactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E?= =?UTF-8?q?=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/core/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/core/config.py b/app/core/config.py index ab53e0f..4930e97 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -43,7 +43,7 @@ JAVA_STREAM_API_URL = os.getenv("JAVA_STREAM_API_URL", "https://api.aida.com.hk/ settings = Settings() # minio 配置 -MINIO_URL = "www.minio.aida.com.hk:12024" +MINIO_URL = "www.minio-api.aida.com.hk" MINIO_ACCESS = 'vXKFLSJkYeEq2DrSZvkB' MINIO_SECRET = 'uKTZT3x7C43WvPN9QTc99DiRkwddWZrG9Uh3JVlR' MINIO_SECURE = True From 90f9879edb53f5bbf0b264a621d3e20d3d2b7c0e Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Wed, 4 Jun 2025 15:55:55 +0800 Subject: [PATCH 070/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20batch=20generate=20product=20=E5=85=A5?= =?UTF-8?q?=E5=8F=82=E5=9B=9E=E5=8F=82=E4=BF=AE=E6=94=B9=20fix=EF=BC=88?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88=E6=96=87?= =?UTF-8?q?=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88?= =?UTF-8?q?=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B?= =?UTF-8?q?=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/schemas/generate_image.py | 13 +- .../service_batch_generate_product_image.py | 139 ++++++++++++------ 2 files changed, 105 insertions(+), 47 deletions(-) diff --git a/app/schemas/generate_image.py b/app/schemas/generate_image.py index 99d1836..a989f2e 100644 --- a/app/schemas/generate_image.py +++ b/app/schemas/generate_image.py @@ -1,3 +1,5 @@ +from typing import List + from pydantic import BaseModel @@ -43,13 +45,18 @@ class GenerateRelightImageModel(BaseModel): """ -class BatchGenerateProductImageModel(BaseModel): +class ProductItemModel(BaseModel): tasks_id: str + image_strength: float prompt: str image_url: str - image_strength: float product_type: str - batch_size: int + + +class BatchGenerateProductImageModel(BaseModel): + batch_tasks_id: str + user_id: str + batch_data_list: List[ProductItemModel] class BatchGenerateRelightImageModel(BaseModel): diff --git a/app/service/generate_batch_image/service_batch_generate_product_image.py b/app/service/generate_batch_image/service_batch_generate_product_image.py index f09fbd5..46a5695 100644 --- a/app/service/generate_batch_image/service_batch_generate_product_image.py +++ b/app/service/generate_batch_image/service_batch_generate_product_image.py @@ -19,7 +19,7 @@ from celery import Celery from tritonclient.utils import np_to_triton_dtype from app.core.config import * -from app.schemas.generate_image import BatchGenerateProductImageModel +from app.schemas.generate_image import BatchGenerateProductImageModel, ProductItemModel from app.service.generate_image.utils.upload_sd_image import upload_SDXL_image from app.service.utils.oss_client import oss_get_image @@ -35,38 +35,38 @@ category = "product_image" @celery_app.task def batch_generate_product(batch_request_data): + batch_size = len(batch_request_data['batch_data_list']) logger.info(f"batch_generate_product batch_request_data:{json.dumps(batch_request_data, indent=4)}") - tasks_id = batch_request_data['tasks_id'] - user_id = tasks_id.rsplit('-', 1)[1] - batch_size = batch_request_data['batch_size'] - image = pre_processing_image(batch_request_data['image_url']) - image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB) - images = [image.astype(np.uint8)] * 1 + batch_tasks_id = batch_request_data['batch_tasks_id'] + user_id = batch_request_data['user_id'] + result_data_list = [] - prompts = [batch_request_data['prompt']] * 1 + for i, data in enumerate(batch_request_data['batch_data_list']): + tasks_id = data['tasks_id'] + image = pre_processing_image(data['image_url']) + image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB) + images = [image.astype(np.uint8)] * 1 + prompts = [data['prompt']] * 1 + if data['product_type'] == "single": + text_obj = np.array(prompts, dtype="object").reshape((-1, 1)) + image_obj = np.array(images, dtype=np.uint8).reshape((-1, 768, 512, 3)) + image_strength_obj = np.array(data['image_strength'], dtype=np.float32).reshape((-1, 1)) + else: + text_obj = np.array(prompts, dtype="object").reshape((1)) + image_obj = np.array(images, dtype=np.uint8).reshape((768, 512, 3)) + image_strength_obj = np.array(data['image_strength'], dtype=np.float32).reshape((1)) + input_text = grpcclient.InferInput("prompt", text_obj.shape, np_to_triton_dtype(text_obj.dtype)) + input_image = grpcclient.InferInput("input_image", image_obj.shape, "UINT8") + input_image_strength = grpcclient.InferInput("image_strength", image_strength_obj.shape, np_to_triton_dtype(image_strength_obj.dtype)) - if batch_request_data['product_type'] == "single": - text_obj = np.array(prompts, dtype="object").reshape((-1, 1)) - image_obj = np.array(images, dtype=np.uint8).reshape((-1, 768, 512, 3)) - image_strength_obj = np.array(batch_request_data['image_strength'], dtype=np.float32).reshape((-1, 1)) - else: - text_obj = np.array(prompts, dtype="object").reshape((1)) - image_obj = np.array(images, dtype=np.uint8).reshape((768, 512, 3)) - image_strength_obj = np.array(batch_request_data['image_strength'], dtype=np.float32).reshape((1)) - input_text = grpcclient.InferInput("prompt", text_obj.shape, np_to_triton_dtype(text_obj.dtype)) - input_image = grpcclient.InferInput("input_image", image_obj.shape, "UINT8") - input_image_strength = grpcclient.InferInput("image_strength", image_strength_obj.shape, np_to_triton_dtype(image_strength_obj.dtype)) + input_text.set_data_from_numpy(text_obj) + input_image.set_data_from_numpy(image_obj) + input_image_strength.set_data_from_numpy(image_strength_obj) - input_text.set_data_from_numpy(text_obj) - input_image.set_data_from_numpy(image_obj) - input_image_strength.set_data_from_numpy(image_strength_obj) + inputs = [input_text, input_image, input_image_strength] - inputs = [input_text, input_image, input_image_strength] - - image_url_list = [] - for i in range(batch_size): try: - if batch_request_data['product_type'] == "single": + if data['product_type'] == "single": result = grpc_client.infer(model_name=GPI_MODEL_NAME_SINGLE, inputs=inputs, priority=100) image = result.as_numpy("generated_cnet_image") else: @@ -77,7 +77,7 @@ def batch_generate_product(batch_request_data): if 'mask_list' in str(e): e_text_obj = np.array(prompts, dtype="object").reshape((-1, 1)) e_image_obj = np.array(images, dtype=np.uint8).reshape((-1, 768, 512, 3)) - e_image_strength_obj = np.array(batch_request_data['image_strength'], dtype=np.float32).reshape((-1, 1)) + e_image_strength_obj = np.array(data['image_strength'], dtype=np.float32).reshape((-1, 1)) e_input_text = grpcclient.InferInput("prompt", e_text_obj.shape, np_to_triton_dtype(e_text_obj.dtype)) e_input_image = grpcclient.InferInput("input_image", e_image_obj.shape, "UINT8") @@ -96,18 +96,29 @@ def batch_generate_product(batch_request_data): if isinstance(image_result, Image.Image): image_url = upload_SDXL_image(image_result, user_id=user_id, category=f"{category}", file_name=f"{tasks_id}-batch-{i}.png") - image_url_list.append(image_url) + data['product_img'] = image_url + result_data_list.append(data) else: image_url = image_result - if DEBUG is False: - if i + 1 < batch_size: - publish_status(tasks_id, f"{i + 1}/{batch_size}", image_url) - logger.info(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | image_url:{image_url}") - # print(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | image_url:{image_url}") - else: - publish_status(tasks_id, f"OK", image_url_list) - logger.info(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:OK | image_url:{image_url}") - # print(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:OK | image_url:{image_url}") + data['product_img'] = image_url + result_data_list.append(data) + + # 发送每条结果 + if DEBUG: + logger.info(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | result_data:{data}") + print(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | result_data:{data}") + else: + publish_status(tasks_id, f"{i + 1}/{batch_size}", image_url) + logger.info(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | image_url:{image_url}") + + # 任务完成,发送所有数据结果 + if DEBUG: + print(result_data_list) + logger.info(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | batch_tasks_id:{batch_tasks_id} | progress:OK | result_data_list:{result_data_list}") + print(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | batch_tasks_id:{batch_tasks_id} | progress:OK | result_data_list:{result_data_list}") + else: + publish_status(batch_tasks_id, f"OK", result_data_list) + logger.info(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | batch_tasks_id:{batch_tasks_id} | progress:OK | result_data_list:{result_data_list}") def pre_processing_image(image_url): @@ -180,12 +191,52 @@ def publish_status(task_id, progress, result): if __name__ == '__main__': + # rd = BatchGenerateProductImageModel( + # tasks_id="123-15-51-89", + # image_strength=0.7, + # prompt=" The best quality, masterpiece, real image.Outwear,high quality clothing details,8K realistic,HDR", + # image_url="aida-results/result_40b1a2fe-e220-11ef-9bfa-0242ac150003.png", + # product_type="overall", + # batch_size=20 + # ) + # batch_generate_product(rd.dict()) + # rd = { + # "user_id": "89", + # "batch_data_list": [ + # { + # "tasks_id": "A-123-15-51-89", + # "image_strength": 0.7, + # "prompt": " The best quality, ma123sterpiece, real image.Outwear,high quality clothing details,8K realistic,HDR", + # "image_url": "aida-results/result_40b1a2fe-e220-11ef-9bfa-0242ac150003.png", + # "product_type": "overall", + # }, + # { + # "tasks_id": "B-123-15-51-89", + # "image_strength": 0.7, + # "prompt": " The best quality, masterpiece, real image.Outwear123,high quality clothing details,8K realistic,HDR", + # "image_url": "aida-results/result_40b1a2fe-e220-11ef-9bfa-0242ac150003.png", + # "product_type": "overall", + # } + # ] + # } rd = BatchGenerateProductImageModel( - tasks_id="123-15-51-89", - image_strength=0.7, - prompt=" The best quality, masterpiece, real image.Outwear,high quality clothing details,8K realistic,HDR", - image_url="aida-results/result_40b1a2fe-e220-11ef-9bfa-0242ac150003.png", - product_type="overall", - batch_size=20 + batch_tasks_id="abcd", + user_id="89", + batch_data_list=[ + ProductItemModel( + tasks_id="123-5464", + image_strength=0.7, + product_type="overall", + image_url="aida-results/result_40b1a2fe-e220-11ef-9bfa-0242ac150003.png", + prompt="123" + ), + ProductItemModel( + tasks_id="123-5464123", + image_strength=0.7, + product_type="overall", + image_url="aida-results/result_40b1a2fe-e220-11ef-9bfa-0242ac150003.png", + prompt="123" + ) + ] ) batch_generate_product(rd.dict()) From 12bb12835126e89e5238e7b6530e5be7e3504a23 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Thu, 5 Jun 2025 15:14:36 +0800 Subject: [PATCH 071/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20batch=20generate=20relight=20=E5=85=A5?= =?UTF-8?q?=E5=8F=82=E5=9B=9E=E5=8F=82=E4=BF=AE=E6=94=B9=20fix=EF=BC=88?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs=EF=BC=88=E6=96=87?= =?UTF-8?q?=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88?= =?UTF-8?q?=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B?= =?UTF-8?q?=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/schemas/generate_image.py | 13 +- .../service_batch_generate_relight_image.py | 144 +++++++++++------- 2 files changed, 96 insertions(+), 61 deletions(-) diff --git a/app/schemas/generate_image.py b/app/schemas/generate_image.py index a989f2e..7d1d864 100644 --- a/app/schemas/generate_image.py +++ b/app/schemas/generate_image.py @@ -45,6 +45,7 @@ class GenerateRelightImageModel(BaseModel): """ +# product任务子项 class ProductItemModel(BaseModel): tasks_id: str image_strength: float @@ -53,16 +54,24 @@ class ProductItemModel(BaseModel): product_type: str +# product批处理 集合 class BatchGenerateProductImageModel(BaseModel): batch_tasks_id: str user_id: str batch_data_list: List[ProductItemModel] -class BatchGenerateRelightImageModel(BaseModel): +# relight任务子项 +class RelightItemModel(BaseModel): tasks_id: str prompt: str image_url: str direction: str product_type: str - batch_size: int + + +# relight批处理集合 +class BatchGenerateRelightImageModel(BaseModel): + batch_tasks_id: str + user_id: str + batch_data_list: List[RelightItemModel] diff --git a/app/service/generate_batch_image/service_batch_generate_relight_image.py b/app/service/generate_batch_image/service_batch_generate_relight_image.py index 83a5701..d75b0a7 100644 --- a/app/service/generate_batch_image/service_batch_generate_relight_image.py +++ b/app/service/generate_batch_image/service_batch_generate_relight_image.py @@ -18,7 +18,7 @@ from celery import Celery from tritonclient.utils import np_to_triton_dtype from app.core.config import * -from app.schemas.generate_image import BatchGenerateRelightImageModel +from app.schemas.generate_image import BatchGenerateRelightImageModel, RelightItemModel from app.service.generate_image.utils.upload_sd_image import upload_SDXL_image from app.service.utils.oss_client import oss_get_image @@ -34,55 +34,58 @@ category = "relight_image" @celery_app.task def batch_generate_relight(batch_request_data): + batch_size = len(batch_request_data['batch_data_list']) logger.info(f"batch_generate_relight batch_request_data: {json.dumps(batch_request_data, indent=4)}") + batch_tasks_id = batch_request_data['batch_tasks_id'] + user_id = batch_request_data['user_id'] + result_data_list = [] negative_prompt = 'lowres, bad anatomy, bad hands, cropped, worst quality' - direction = batch_request_data['direction'] seed = "1" - prompt = batch_request_data['prompt'] - product_type = batch_request_data['product_type'] - image_url = batch_request_data['image_url'] - image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url.split('/', 1)[1], data_type="cv2") - tasks_id = batch_request_data['tasks_id'] - user_id = tasks_id.rsplit('-', 1)[1] - batch_size = batch_request_data['batch_size'] - prompts = [prompt] * 1 - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - image = cv2.resize(image, (512, 768)) - images = [image.astype(np.uint8)] * 1 - seeds = [seed] * 1 - nagetive_prompts = [negative_prompt] * 1 - directions = [direction] * 1 + for i, data in enumerate(batch_request_data['batch_data_list']): + direction = data['direction'] - if product_type == 'single': - text_obj = np.array(prompts, dtype="object").reshape((-1, 1)) - image_obj = np.array(images, dtype=np.uint8).reshape((-1, 768, 512, 3)) - na_text_obj = np.array(nagetive_prompts, dtype="object").reshape((-1, 1)) - seed_obj = np.array(seeds, dtype="object").reshape((-1, 1)) - direction_obj = np.array(directions, dtype="object").reshape((-1, 1)) - else: - text_obj = np.array(prompts, dtype="object").reshape((1)) - image_obj = np.array(images, dtype=np.uint8).reshape((768, 512, 3)) - na_text_obj = np.array(nagetive_prompts, dtype="object").reshape((1)) - seed_obj = np.array(seeds, dtype="object").reshape((1)) - direction_obj = np.array(directions, dtype="object").reshape((1)) - input_text = grpcclient.InferInput("prompt", text_obj.shape, np_to_triton_dtype(text_obj.dtype)) - input_image = grpcclient.InferInput("input_image", image_obj.shape, "UINT8") - input_natext = grpcclient.InferInput("negative_prompt", na_text_obj.shape, np_to_triton_dtype(na_text_obj.dtype)) - input_seed = grpcclient.InferInput("seed", seed_obj.shape, np_to_triton_dtype(seed_obj.dtype)) - input_direction = grpcclient.InferInput("direction", direction_obj.shape, np_to_triton_dtype(direction_obj.dtype)) + prompt = data['prompt'] + product_type = data['product_type'] + image_url = data['image_url'] + image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url.split('/', 1)[1], data_type="cv2") + tasks_id = data['tasks_id'] - input_text.set_data_from_numpy(text_obj) - input_image.set_data_from_numpy(image_obj) - input_natext.set_data_from_numpy(na_text_obj) - input_seed.set_data_from_numpy(seed_obj) - input_direction.set_data_from_numpy(direction_obj) + prompts = [prompt] * 1 + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + image = cv2.resize(image, (512, 768)) + images = [image.astype(np.uint8)] * 1 + seeds = [seed] * 1 + nagetive_prompts = [negative_prompt] * 1 + directions = [direction] * 1 - inputs = [input_text, input_natext, input_image, input_seed, input_direction] - image_url_list = [] - for i in range(batch_size): + if product_type == 'single': + text_obj = np.array(prompts, dtype="object").reshape((-1, 1)) + image_obj = np.array(images, dtype=np.uint8).reshape((-1, 768, 512, 3)) + na_text_obj = np.array(nagetive_prompts, dtype="object").reshape((-1, 1)) + seed_obj = np.array(seeds, dtype="object").reshape((-1, 1)) + direction_obj = np.array(directions, dtype="object").reshape((-1, 1)) + else: + text_obj = np.array(prompts, dtype="object").reshape((1)) + image_obj = np.array(images, dtype=np.uint8).reshape((768, 512, 3)) + na_text_obj = np.array(nagetive_prompts, dtype="object").reshape((1)) + seed_obj = np.array(seeds, dtype="object").reshape((1)) + direction_obj = np.array(directions, dtype="object").reshape((1)) + input_text = grpcclient.InferInput("prompt", text_obj.shape, np_to_triton_dtype(text_obj.dtype)) + input_image = grpcclient.InferInput("input_image", image_obj.shape, "UINT8") + input_natext = grpcclient.InferInput("negative_prompt", na_text_obj.shape, np_to_triton_dtype(na_text_obj.dtype)) + input_seed = grpcclient.InferInput("seed", seed_obj.shape, np_to_triton_dtype(seed_obj.dtype)) + input_direction = grpcclient.InferInput("direction", direction_obj.shape, np_to_triton_dtype(direction_obj.dtype)) + + input_text.set_data_from_numpy(text_obj) + input_image.set_data_from_numpy(image_obj) + input_natext.set_data_from_numpy(na_text_obj) + input_seed.set_data_from_numpy(seed_obj) + input_direction.set_data_from_numpy(direction_obj) + + inputs = [input_text, input_natext, input_image, input_seed, input_direction] try: - if batch_request_data['product_type'] == "single": + if data['product_type'] == "single": result = grpc_client.infer(model_name=GRI_MODEL_NAME_SINGLE, inputs=inputs, priority=100) image = result.as_numpy("generated_relight_image") else: @@ -121,18 +124,29 @@ def batch_generate_relight(batch_request_data): logger.error(e) if isinstance(image_result, Image.Image): image_url = upload_SDXL_image(image_result, user_id=user_id, category=f"{category}", file_name=f"{tasks_id}-batch-{i}.png") - image_url_list.append(image_url) + data['relight_img'] = image_url + + result_data_list.append(data) else: image_url = image_result - if DEBUG is False: - if i + 1 < batch_size: - publish_status(tasks_id, f"{i + 1}/{batch_size}", image_url) - logger.info(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | image_url:{image_url}") - # print(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | image_url:{image_url}") - else: - publish_status(tasks_id, f"OK", image_url_list) - logger.info(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:OK | image_url:{image_url}") - # print(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:OK | image_url:{image_url}") + data['relight_img'] = image_url + result_data_list.append(data) + + # 发送每条结果 + if DEBUG: + logger.info(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | result_data:{data}") + print(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | result_data:{data}") + else: + publish_status(tasks_id, f"{i + 1}/{batch_size}", image_url) + logger.info(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | image_url:{image_url}") + # 任务完成,发送所有数据结果 + if DEBUG: + print(result_data_list) + logger.info(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | batch_tasks_id:{batch_tasks_id} | progress:OK | result_data_list:{result_data_list}") + print(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | batch_tasks_id:{batch_tasks_id} | progress:OK | result_data_list:{result_data_list}") + else: + publish_status(batch_tasks_id, f"OK", result_data_list) + logger.info(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | batch_tasks_id:{batch_tasks_id} | progress:OK | result_data_list:{result_data_list}") def publish_status(task_id, progress, result): @@ -151,12 +165,24 @@ def publish_status(task_id, progress, result): if __name__ == '__main__': rd = BatchGenerateRelightImageModel( - tasks_id="123-89", - # prompt="beautiful woman, detailed face, sunshine, outdoor, warm atmosphere", - prompt="Colorful black", - image_url='aida-users/89/clothing_seg/283c5c82-1a92-11f0-b72a-0242ac150002.png', - direction="Right Light", - product_type="overall", - batch_size=10 + batch_tasks_id="abcd", + user_id="89", + batch_data_list=[ + RelightItemModel( + tasks_id="123-5464", + product_type="overall", + image_url="aida-users/89/product_image/02894523-19b5-46eb-a9c6-2f512f5fec84-0-89.png", + prompt="Colorful black", + direction="Right Light", + ), + RelightItemModel( + tasks_id="123-5464123", + product_type="overall", + image_url="aida-users/89/product_image/02894523-19b5-46eb-a9c6-2f512f5fec84-0-89.png", + direction="Right Light", + prompt="Colorful black", + ) + ] ) + batch_generate_relight(rd.dict()) From be2d1db165fba481d0fa92afdda13b6a38690226 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Fri, 6 Jun 2025 16:50:55 +0800 Subject: [PATCH 072/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20batch=20generate=20product=20/=20relight=20?= =?UTF-8?q?=E6=8E=A5=E5=8F=A3=E5=93=8D=E5=BA=94=E5=BC=82=E5=B8=B8=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4?= =?UTF-8?q?=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20te?= =?UTF-8?q?st(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/generate_batch_image/service.py | 8 +++---- .../service_batch_generate_relight_image.py | 22 ++++++++++++++++++- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/app/service/generate_batch_image/service.py b/app/service/generate_batch_image/service.py index 2279382..6d007c8 100644 --- a/app/service/generate_batch_image/service.py +++ b/app/service/generate_batch_image/service.py @@ -6,15 +6,15 @@ from app.service.generate_batch_image.service_batch_pose_transform import batch_ async def start_product_batch_generate(data): generate_clothes_task = batch_generate_product.delay(data.dict()) print(generate_clothes_task) - product_publish_status(data.tasks_id, f"0/{data.batch_size}", "") - return {"task_id": data.tasks_id, "state": generate_clothes_task.state} + product_publish_status(data.batch_tasks_id, f"0/{len(data.batch_data_list)}", "") + return {"task_id": data.batch_tasks_id, "state": generate_clothes_task.state} async def start_relight_batch_generate(data): generate_clothes_task = batch_generate_relight.delay(data.dict()) print(generate_clothes_task) - relight_publish_status(data.tasks_id, f"0/{data.batch_size}", "") - return {"task_id": data.tasks_id, "state": generate_clothes_task.state} + relight_publish_status(data.batch_tasks_id, f"0/{len(data.batch_data_list)}", "") + return {"task_id": data.batch_tasks_id, "state": generate_clothes_task.state} async def start_pose_transform_batch_generate(data): diff --git a/app/service/generate_batch_image/service_batch_generate_relight_image.py b/app/service/generate_batch_image/service_batch_generate_relight_image.py index d75b0a7..0a90646 100644 --- a/app/service/generate_batch_image/service_batch_generate_relight_image.py +++ b/app/service/generate_batch_image/service_batch_generate_relight_image.py @@ -184,5 +184,25 @@ if __name__ == '__main__': ) ] ) - batch_generate_relight(rd.dict()) + # X = { + # "batch_tasks_id": "abcd", + # "user_id": "89", + # "batch_data_list": [ + # { + # "tasks_id": "123-5464", + # "product_type": "overall", + # "image_url": "aida-users/89/product_image/02894523-19b5-46eb-a9c6-2f512f5fec84-0-89.png", + # "prompt": "Colorful black", + # "direction": "Right Light", + # }, + # { + # "tasks_id": "123-5464", + # "product_type": "overall", + # "image_url": "aida-users/89/product_image/02894523-19b5-46eb-a9c6-2f512f5fec84-0-89.png", + # "prompt": "Colorful black", + # "direction": "Right Light", + # } + # + # ] + # } From e8cbb8569ac890de9369273c3208f1d7cf1009cd Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Fri, 6 Jun 2025 17:04:27 +0800 Subject: [PATCH 073/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20batch=20generate=20product=20/=20relight=20mq?= =?UTF-8?q?=E6=B6=88=E6=81=AF=E7=BB=93=E6=9E=84=E6=9B=B4=E6=96=B0=20docs?= =?UTF-8?q?=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refac?= =?UTF-8?q?tor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E?= =?UTF-8?q?=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../service_batch_generate_product_image.py | 4 ++-- .../service_batch_generate_relight_image.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/app/service/generate_batch_image/service_batch_generate_product_image.py b/app/service/generate_batch_image/service_batch_generate_product_image.py index 46a5695..570354a 100644 --- a/app/service/generate_batch_image/service_batch_generate_product_image.py +++ b/app/service/generate_batch_image/service_batch_generate_product_image.py @@ -108,8 +108,8 @@ def batch_generate_product(batch_request_data): logger.info(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | result_data:{data}") print(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | result_data:{data}") else: - publish_status(tasks_id, f"{i + 1}/{batch_size}", image_url) - logger.info(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | image_url:{image_url}") + publish_status(tasks_id, f"{i + 1}/{batch_size}", data) + logger.info(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | result_data:{data}") # 任务完成,发送所有数据结果 if DEBUG: diff --git a/app/service/generate_batch_image/service_batch_generate_relight_image.py b/app/service/generate_batch_image/service_batch_generate_relight_image.py index 0a90646..e75c0cc 100644 --- a/app/service/generate_batch_image/service_batch_generate_relight_image.py +++ b/app/service/generate_batch_image/service_batch_generate_relight_image.py @@ -137,8 +137,8 @@ def batch_generate_relight(batch_request_data): logger.info(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | result_data:{data}") print(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | result_data:{data}") else: - publish_status(tasks_id, f"{i + 1}/{batch_size}", image_url) - logger.info(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | image_url:{image_url}") + publish_status(tasks_id, f"{i + 1}/{batch_size}", data) + logger.info(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | result_data:{data}") # 任务完成,发送所有数据结果 if DEBUG: print(result_data_list) From d39dee851fcadebc118b87d2a73b224fb28050b8 Mon Sep 17 00:00:00 2001 From: shahaibo <1023316923@qq.com> Date: Tue, 10 Jun 2025 10:54:20 +0800 Subject: [PATCH 074/101] =?UTF-8?q?TASK:=E5=86=B7=E5=90=AF=E5=8A=A8?= =?UTF-8?q?=E7=83=AD=E5=BA=A6=E6=8E=A8=E8=8D=90=EF=BC=9B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_brand_dna_initialize.py | 212 ++++++++++++++++++++++++ app/api/api_recommendation.py | 72 ++++++-- app/service/recommend/scheduled_task.py | 110 +++++++++++- app/service/recommend/service.py | 22 ++- 4 files changed, 400 insertions(+), 16 deletions(-) create mode 100644 app/api/api_brand_dna_initialize.py diff --git a/app/api/api_brand_dna_initialize.py b/app/api/api_brand_dna_initialize.py new file mode 100644 index 0000000..72c0a25 --- /dev/null +++ b/app/api/api_brand_dna_initialize.py @@ -0,0 +1,212 @@ +import io +import logging +import sys +import time +from typing import List +from collections import defaultdict +import numpy as np +from apscheduler.schedulers.background import BackgroundScheduler +from apscheduler.triggers.cron import CronTrigger +from fastapi import HTTPException, APIRouter + +from app.service.recommend.service import load_resources, matrix_data +import pymysql +from app.core.config import DB_CONFIG, TABLE_CATEGORIES, RECOMMEND_PATH_PREFIX +from minio import Minio +import torch +from torchvision import models, transforms +from PIL import Image +import os +from fastapi.responses import JSONResponse + +sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8') +logger = logging.getLogger() +router = APIRouter() + +# MinIO 配置 +minio_client = Minio( + "www.minio.aida.com.hk:12024", + access_key="admin", + secret_key="Aidlab123123!", + secure=True +) + +transform = transforms.Compose([ + transforms.Resize((224, 224)), + transforms.ToTensor(), + transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]), +]) + +# ResNet50(去掉最后全连接层) +resnet_model = models.resnet50(pretrained=True) +resnet_model = torch.nn.Sequential(*list(resnet_model.children())[:-1]) +resnet_model.eval() + + +def get_sketch_image_from_minio(sketch_path: str): + path_parts = sketch_path.split('/', 1) + if len(path_parts) != 2: + return None + bucket_name, file_name = path_parts + try: + obj = minio_client.get_object(bucket_name, file_name) + img = Image.open(io.BytesIO(obj.read())) + return transform(img).unsqueeze(0) + except Exception as e: + logger.warning(f"Fetch image failed [{sketch_path}]: {e}") + return None + + +def extract_feature_vector_from_resnet(sketch_path: str) -> np.ndarray: + img_tensor = get_sketch_image_from_minio(sketch_path) + if img_tensor is None: + return np.zeros(2048, dtype=np.float32) + with torch.no_grad(): + vec = resnet_model(img_tensor) # [1, 2048, 1, 1] + return vec.squeeze().cpu().numpy() + + +# 预加载 +BRAND_FEATURES = np.load(f'{RECOMMEND_PATH_PREFIX}brand_feature.npy', allow_pickle=True).item() +SYSTEM_FEATURES = np.load(f'{RECOMMEND_PATH_PREFIX}sketch_feature_dict.npy', allow_pickle=True).item() + + +def save_sketch_to_iid(): + sketch_to_iid = { + sketch_path: iid + for iid, sketch_path in enumerate(SYSTEM_FEATURES.keys(), start=1) + } + np.save(f"{RECOMMEND_PATH_PREFIX}sketch_to_iid.npy", sketch_to_iid) + + +def load_sketch_to_iid(): + path = f"{RECOMMEND_PATH_PREFIX}sketch_to_iid.npy" + if os.path.exists(path): + return np.load(path, allow_pickle=True).item() + save_sketch_to_iid() + return np.load(path, allow_pickle=True).item() + + +sketch_to_iid = load_sketch_to_iid() + + +def getNewCategory(gender: str, sketch_category: str) -> str: + return f"{gender.lower()}_{sketch_category.lower()}" + + +def get_category_from_path(path: str) -> str: + parts = path.split('/') + if len(parts) >= 4: + return f"{parts[2].lower()}_{parts[3].lower()}" + return "unknown_unknown" + + +def load_brand_matrix(): + """单独加载 brand_matrix 和 brand_index_map""" + mat_path = f"{RECOMMEND_PATH_PREFIX}brand_matrix.npy" + idx_path = f"{RECOMMEND_PATH_PREFIX}brand_index_map.npy" + try: + matrix = np.load(mat_path) + index_map = np.load(idx_path, allow_pickle=True).item() + except FileNotFoundError: + matrix = np.zeros((0, len(sketch_to_iid)), dtype=np.float32) + index_map = {} + return matrix, index_map + +def cosine_similarity(vec1, vec2): + """计算余弦相似度(增加零值处理)""" + norm = np.linalg.norm(vec1) * np.linalg.norm(vec2) + return np.dot(vec1, vec2) / (norm + 1e-10) if norm != 0 else 0.0 + +def calculate_brand_matrix(sketch_data, brand_id: int) -> np.ndarray: + # 1. 收集品牌-分类-特征 + brand_feature = defaultdict(lambda: defaultdict(list)) + for _id, sketch_path, gender, sketch_category in sketch_data: + cat = getNewCategory(gender, sketch_category) + feat = BRAND_FEATURES.get(_id) or extract_feature_vector_from_resnet(sketch_path) + brand_feature[(brand_id, cat)][_id].append(feat) + + # 2. 构建 sketch 索引 + sketch_list = sorted(sketch_to_iid.values()) + sketch_index = {iid: idx for idx, iid in enumerate(sketch_list)} + n_sketch = len(sketch_list) + + # 3. 加载或初始化矩阵 + brand_matrix, brand_index_map = load_brand_matrix() + + # 4. 增加/更新 行 + if brand_id in brand_index_map: + row_idx = brand_index_map[brand_id] + else: + row_idx = brand_matrix.shape[0] + brand_index_map[brand_id] = row_idx + brand_matrix = np.vstack([ + brand_matrix, + np.zeros((1, n_sketch), dtype=np.float32) + ]) + + # 5. 计算品牌-分类平均向量 + brand_avg = {} + for key, id_dict in brand_feature.items(): + all_feats = [v for feats in id_dict.values() for v in feats] + if all_feats: + brand_avg[key] = np.mean(all_feats, axis=0) + + # 6. 填充相似度 + for sketch_path, sys_vec in SYSTEM_FEATURES.items(): + iid = sketch_to_iid.get(sketch_path) + if not iid or iid not in sketch_index: + continue + cat_key = (brand_id, get_category_from_path(sketch_path)) + avg_vec = brand_avg.get(cat_key) + if avg_vec is not None: + cos_sim = cosine_similarity(avg_vec, sys_vec) + brand_matrix[row_idx, sketch_index[iid]] = cos_sim + + # 7. 持久化 + np.save(f"{RECOMMEND_PATH_PREFIX}brand_feature_matrix.npy", brand_matrix) + np.save(f"{RECOMMEND_PATH_PREFIX}brand_index_map.npy", brand_index_map) + + # 返回该品牌对应行 + return brand_matrix[row_idx:row_idx+1] + + +@router.get("/brand_dna_initialize/{brand_id}") +async def brand_dna_initialize(brand_id: int): + conn = None + try: + conn = pymysql.connect(**DB_CONFIG) + cursor = conn.cursor() + cursor.execute(""" + SELECT id, img_url, gender, category + FROM product_image_attribute + WHERE library_id IN ( + SELECT library_id + FROM brand_rel_library + WHERE brand_id = %s + ) + """, (brand_id,)) + sketch_data = cursor.fetchall() + + # 触发计算并持久化,若内部出错会抛异常 + _ = calculate_brand_matrix(sketch_data, brand_id) + + # 返回成功 + return {"success": True} + + except HTTPException: + # 已经是明确的 HTTPException,直接抛出 + raise + + except Exception as e: + logger.error(f"品牌初始化失败 [{brand_id}]: {e}", exc_info=True) + # 返回失败的 JSON,同时设置 500 状态码 + return JSONResponse( + status_code=500, + content={"success": False, "message": "品牌初始化失败"} + ) + + finally: + if conn: + conn.close() diff --git a/app/api/api_recommendation.py b/app/api/api_recommendation.py index 93fb251..5f71d38 100644 --- a/app/api/api_recommendation.py +++ b/app/api/api_recommendation.py @@ -3,7 +3,10 @@ import logging import sys import time from typing import List - +import os +import json +import math +import random import numpy as np from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.triggers.cron import CronTrigger @@ -31,18 +34,44 @@ async def startup_event(): scheduler.start() logger.info("定时任务已启动") -def get_random_recommendations(category: str, num: int) -> List[str]: - """全品类随机推荐""" - all_iids = list(matrix_data["iid_to_sketch"].keys()) - # 优先从当前品类选择 - category_iids = matrix_data["category_to_iids"].get(category, all_iids) - # 确保不超出实际数量 - sample_size = min(num, len(category_iids)) - sampled = np.random.choice(category_iids, size=sample_size, replace=False) - return [matrix_data["iid_to_sketch"][iid] for iid in sampled] +def softmax(scores): + max_score = max(scores) + exp_scores = [math.exp(s - max_score) for s in scores] + sum_exp = sum(exp_scores) + return [s / sum_exp for s in exp_scores] -@router.get("/recommend/{user_id}/{category}/{num_recommendations}", response_model=List[str]) -async def get_recommendations(user_id: int, category: str, num_recommendations: int = 10): +def get_random_recommendations(category: str, num: int) -> List[str]: + """根据预加载热度向量推荐(冷启动)""" + try: + heat_data = matrix_data.get("heat_data", {}) + + if category not in heat_data: + raise ValueError(f"热度数据缺少类别 {category},使用随机推荐") + + heat_dict = heat_data[category] # {url: score} + urls = list(heat_dict.keys()) + scores = list(heat_dict.values()) + + if not urls: + raise ValueError("该类别下无热度记录,使用随机推荐") + + probs = softmax(scores) + sample_size = min(num, len(urls)) + sampled_urls = random.choices(urls, weights=probs, k=sample_size) + + return sampled_urls + + except Exception as e: + # 回退:完全随机推荐 + all_iids = list(matrix_data["iid_to_sketch"].keys()) + category_iids = matrix_data["category_to_iids"].get(category, all_iids) + sample_size = min(num, len(category_iids)) + sampled = np.random.choice(category_iids, size=sample_size, replace=False) + return [matrix_data["iid_to_sketch"][iid] for iid in sampled] + + +@router.get("/recommend/{user_id}/{category}/{num_recommendations}/{brand_id}/{brand_scale}", response_model=List[str]) +async def get_recommendations(user_id: int, category: str, brand_id: int, brand_scale: float, num_recommendations: int = 10): """ :param user_id: 4 :param category: female_skirt @@ -95,7 +124,7 @@ async def get_recommendations(user_id: int, category: str, num_recommendations: raw_feat_scores = matrix_data["feature_matrix"][user_idx_feature, valid_sketch_idxs_feature] raw_feat_scores = (raw_feat_scores - np.min(raw_feat_scores)) / ( np.max(raw_feat_scores) - np.min(raw_feat_scores) + 1e-8) - processed_feat = raw_feat_scores * 0.3 + processed_feat = raw_feat_scores else: processed_feat = np.array([]) @@ -104,7 +133,22 @@ async def get_recommendations(user_id: int, category: str, num_recommendations: matrix_data["cached_valid_idxs"][cache_key] = valid_sketch_idxs_inter # 合并分数 - final_scores = processed_inter + processed_feat + if brand_id is not None: + if brand_id is not None: + brand_idx_feature = matrix_data["brand_index_map"].get(brand_id) + if brand_idx_feature is not None and valid_sketch_idxs_feature: + raw_brand_feat_scores = matrix_data["brand_feature_matrix"][ + brand_idx_feature, valid_sketch_idxs_feature] + raw_brand_feat_scores = (raw_brand_feat_scores - np.min(raw_brand_feat_scores)) / ( + np.max(raw_brand_feat_scores) - np.min(raw_brand_feat_scores) + 1e-8) + processed_brand_feat = raw_brand_feat_scores + final_scores = processed_inter + 0.3 * ((1 - brand_scale) * processed_feat + brand_scale * processed_brand_feat) + else: + final_scores = processed_inter + 0.3 * processed_feat + else: + final_scores = processed_inter + 0.3 * processed_feat + else: + final_scores = processed_inter + 0.3 * processed_feat valid_sketch_idxs = matrix_data["cached_valid_idxs"][cache_key] # 概率采样 diff --git a/app/service/recommend/scheduled_task.py b/app/service/recommend/scheduled_task.py index ec1e4aa..f6b52ef 100644 --- a/app/service/recommend/scheduled_task.py +++ b/app/service/recommend/scheduled_task.py @@ -14,6 +14,9 @@ import matplotlib.pyplot as plt from scipy.sparse import csr_matrix import matplotlib.font_manager as fm from scipy import sparse +import pandas as pd +from datetime import datetime, timedelta +import json from app.core.config import DB_CONFIG, TABLE_CATEGORIES, RECOMMEND_PATH_PREFIX @@ -50,6 +53,13 @@ minio_client = Minio( # 预加载系统sketch特征向量 SYSTEM_FEATURES = np.load(f'{RECOMMEND_PATH_PREFIX}sketch_feature_dict.npy', allow_pickle=True).item() +# 行为权重和衰减系数 +BEHAVIOR_CONFIG = { + 'portfolioClick': {'weight': 1, 'decay': 0.3}, + 'portfolioLike': {'weight': 2, 'decay': 0.2}, + 'secondCreation': {'weight': 3, 'decay': 0.1}, + 'sketchLike': {'weight': 4, 'decay': 0} # 不衰减 +} # 保存sketch_to_iid到文件 def save_sketch_to_iid(): @@ -418,9 +428,107 @@ def cosine_similarity(vec1, vec2): return np.dot(vec1, vec2) / (norm + 1e-10) if norm != 0 else 0.0 +def fetch_user_behavior_data(days=30): + """从MySQL获取用户行为数据(整合旧查询和新需求)""" + conn = None + try: + conn = pymysql.connect(**DB_CONFIG) + + # 计算日期范围 + end_date = datetime.now() + start_date = end_date - timedelta(days=days) + + # 整合查询(获取完整行为数据) + query = f""" + SELECT + account_id, + behavior_type, + gender, + category, + url, + create_time + FROM user_behavior + WHERE create_time BETWEEN '{start_date}' AND '{end_date}' + """ + + df = pd.read_sql(query, conn) + logging.info(f"成功读取{len(df)}条用户行为记录") + return df + + except Exception as e: + logging.error(f"数据库查询失败: {str(e)}") + return pd.DataFrame() + + finally: + if conn: + conn.close() + + +def calculate_heat(row, current_date): + """计算单个行为的热度值(每次行为独立计算,不考虑聚合次数)""" + # 计算时间差(天) + days_passed = (current_date - row['create_time']).days + + # 获取行为配置(默认权重为0) + config = BEHAVIOR_CONFIG.get(row['behavior_type'], {'weight': 0, 'decay': 0}) + + # 计算热度值 = 权重 * e^(-衰减系数 * 天数) + return config['weight'] * np.exp(-config['decay'] * days_passed) + +def load_heat_matrix_as_array(file_path): + """ + 直接加载为二维numpy数组 + 返回: (data_array, row_labels, col_labels) + """ + with open(file_path) as f: + saved = json.load(f) + return ( + np.array(saved['data']), # 二维矩阵 + saved['row_labels'], # 行标签列表 + saved['col_labels'] # 列标签列表 + ) + +def update_heat_matrices(): + """每日计算并存储热度矩阵(gender_category × path)""" + current_date = datetime.now() + + # 获取数据 + df = fetch_user_behavior_data(30) + if df.empty: + logging.warning("无有效数据,跳过今日计算") + return None + + # 计算热度值 + df['heat'] = df.apply(calculate_heat, axis=1, current_date=current_date) + df['gender_category'] = df['gender'] + '_' + df['category'] + + # 构建热度向量 + heat_vectors = {} + grouped = df.groupby(['gender_category', 'url'])['heat'].sum() + for (gender_category, url), heat in grouped.items(): + heat_vectors.setdefault(gender_category, {})[url] = heat + + # 存储结果 + save_path = 'heat_vectors_data' + os.makedirs(save_path, exist_ok=True) + date_str = current_date.strftime('%Y%m%d') + + # vectors_file = f"{save_path}/heat_vectors_{date_str}.json" + vectors_file = f"{save_path}/heat_vectors.json" + with open(vectors_file, 'w', encoding='utf-8') as f: + json.dump({ + 'update_time': current_date.strftime('%Y-%m-%d %H:%M:%S'), + 'data': heat_vectors + }, f, ensure_ascii=False, indent=2) + + logging.info(f"成功存储热度向量,共{len(heat_vectors)}个分组,日期: {date_str}") + return heat_vectors + + if __name__ == "__main__": try: - update_user_matrices() + # update_user_matrices() + update_heat_matrices() # scheduler = BlockingScheduler() # scheduler.add_job(update_user_matrices, 'cron', hour=12, timezone='Asia/Shanghai') # logging.info("定时任务已启动,每天12:00执行") diff --git a/app/service/recommend/service.py b/app/service/recommend/service.py index 1ff9336..b3545f2 100644 --- a/app/service/recommend/service.py +++ b/app/service/recommend/service.py @@ -2,7 +2,8 @@ import logging import time from collections import defaultdict - +import os +import json import numpy as np from app.core.config import DB_CONFIG, RECOMMEND_PATH_PREFIX @@ -11,6 +12,8 @@ logger = logging.getLogger() import pymysql from concurrent.futures import ThreadPoolExecutor +HEAT_VECTOR_FILE = 'heat_vectors_data/heat_vectors.json' # 可动态加载或配置 + matrix_data = { "interaction_matrix": None, "feature_matrix": None, @@ -26,6 +29,9 @@ matrix_data = { "category_sketch_idxs_feature": None, "user_inter_full": dict(), "user_feat_full": dict(), + "brand_feature_matrix": None, + "brand_index_map": None, + "heat_data": {}, } @@ -48,7 +54,13 @@ def load_resources(): allow_pickle=True).item() matrix_data["feature_matrix"] = np.load(f"{RECOMMEND_PATH_PREFIX}feature_matrix.npy", allow_pickle=True) + + matrix_data["brand_feature_matrix"] = np.load(f"{RECOMMEND_PATH_PREFIX}brand_feature_matrix.npy", allow_pickle=True) + + matrix_data["brand_index_map"] = np.load(f"{RECOMMEND_PATH_PREFIX}brand_index_map.npy",allow_pickle=True).item() + matrix_data["user_index_feature"] = np.load(f"{RECOMMEND_PATH_PREFIX}user_index_feature_matrix.npy", allow_pickle=True).item() + matrix_data["sketch_index_feature"] = np.load(f"{RECOMMEND_PATH_PREFIX}sketch_index_feature_matrix.npy", allow_pickle=True).item() category_to_iid_map = np.load(f"{RECOMMEND_PATH_PREFIX}iid_to_category_interaction_matrix.npy", allow_pickle=True).item() @@ -61,6 +73,14 @@ def load_resources(): # 触发预缓存 precache_user_category() + if os.path.exists(HEAT_VECTOR_FILE): + with open(HEAT_VECTOR_FILE, 'r', encoding='utf-8') as f: + heat_json = json.load(f) + matrix_data["heat_data"] = heat_json.get("data", {}) + logger.info(f"热度向量数据加载完成,共加载 {len(matrix_data['heat_data'])} 个类别") + else: + matrix_data["heat_data"] = {} + except Exception as e: logger.error(f"资源加载失败: {str(e)}") raise RuntimeError("初始化失败") From 18c95a88b9cae95978c7a7f32ddc3dc3079d1198 Mon Sep 17 00:00:00 2001 From: shahaibo <1023316923@qq.com> Date: Tue, 10 Jun 2025 12:02:14 +0800 Subject: [PATCH 075/101] =?UTF-8?q?TASK:=E5=86=B7=E5=90=AF=E5=8A=A8?= =?UTF-8?q?=E7=83=AD=E5=BA=A6=E6=8E=A8=E8=8D=90=EF=BC=9B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_recommendation.py | 41 +++++++++++++++++-------- app/service/recommend/scheduled_task.py | 2 +- app/service/recommend/service.py | 15 +++++++-- 3 files changed, 42 insertions(+), 16 deletions(-) diff --git a/app/api/api_recommendation.py b/app/api/api_recommendation.py index 5f71d38..faeb780 100644 --- a/app/api/api_recommendation.py +++ b/app/api/api_recommendation.py @@ -134,21 +134,36 @@ async def get_recommendations(user_id: int, category: str, brand_id: int, brand_ # 合并分数 if brand_id is not None: - if brand_id is not None: - brand_idx_feature = matrix_data["brand_index_map"].get(brand_id) - if brand_idx_feature is not None and valid_sketch_idxs_feature: - raw_brand_feat_scores = matrix_data["brand_feature_matrix"][ - brand_idx_feature, valid_sketch_idxs_feature] - raw_brand_feat_scores = (raw_brand_feat_scores - np.min(raw_brand_feat_scores)) / ( - np.max(raw_brand_feat_scores) - np.min(raw_brand_feat_scores) + 1e-8) - processed_brand_feat = raw_brand_feat_scores - final_scores = processed_inter + 0.3 * ((1 - brand_scale) * processed_feat + brand_scale * processed_brand_feat) - else: - final_scores = processed_inter + 0.3 * processed_feat + brand_idx_feature = matrix_data["brand_index_map"].get(brand_id) + + brand_feat_valid = ( + matrix_data["brand_feature_matrix"].size > 0 and # 矩阵非空 + brand_idx_feature is not None and + valid_sketch_idxs_feature # 有可用索引 + ) + + if brand_feat_valid: + raw_brand_feat_scores = matrix_data["brand_feature_matrix"][ + brand_idx_feature, valid_sketch_idxs_feature + ] + raw_brand_feat_scores = (raw_brand_feat_scores - np.min(raw_brand_feat_scores)) / ( + np.max(raw_brand_feat_scores) - np.min(raw_brand_feat_scores) + 1e-8 + ) + processed_brand_feat = raw_brand_feat_scores + + # 如果 processed_feat 是空的,替换为全 0,避免 shape 不一致 + if processed_feat.size == 0: + processed_feat = np.zeros_like(processed_brand_feat) + + final_scores = processed_inter + 0.3 * ( + (1 - brand_scale) * processed_feat + brand_scale * processed_brand_feat + ) else: - final_scores = processed_inter + 0.3 * processed_feat + # brand 信息不可用 + final_scores = processed_inter + 0.3 * processed_feat if processed_feat.size > 0 else processed_inter else: - final_scores = processed_inter + 0.3 * processed_feat + final_scores = processed_inter + 0.3 * processed_feat if processed_feat.size > 0 else processed_inter + valid_sketch_idxs = matrix_data["cached_valid_idxs"][cache_key] # 概率采样 diff --git a/app/service/recommend/scheduled_task.py b/app/service/recommend/scheduled_task.py index f6b52ef..6dc67e8 100644 --- a/app/service/recommend/scheduled_task.py +++ b/app/service/recommend/scheduled_task.py @@ -527,7 +527,7 @@ def update_heat_matrices(): if __name__ == "__main__": try: - # update_user_matrices() + update_user_matrices() update_heat_matrices() # scheduler = BlockingScheduler() # scheduler.add_job(update_user_matrices, 'cron', hour=12, timezone='Asia/Shanghai') diff --git a/app/service/recommend/service.py b/app/service/recommend/service.py index b3545f2..0db64dd 100644 --- a/app/service/recommend/service.py +++ b/app/service/recommend/service.py @@ -55,9 +55,20 @@ def load_resources(): matrix_data["feature_matrix"] = np.load(f"{RECOMMEND_PATH_PREFIX}feature_matrix.npy", allow_pickle=True) - matrix_data["brand_feature_matrix"] = np.load(f"{RECOMMEND_PATH_PREFIX}brand_feature_matrix.npy", allow_pickle=True) + brand_feature_path = f"{RECOMMEND_PATH_PREFIX}brand_feature_matrix.npy" + if os.path.exists(brand_feature_path): + matrix_data["brand_feature_matrix"] = np.load(brand_feature_path, allow_pickle=True) + else: + logger.warning("brand_feature_matrix 文件不存在,使用空数组") + matrix_data["brand_feature_matrix"] = np.array([]) - matrix_data["brand_index_map"] = np.load(f"{RECOMMEND_PATH_PREFIX}brand_index_map.npy",allow_pickle=True).item() + # brand_index_map + brand_index_path = f"{RECOMMEND_PATH_PREFIX}brand_index_map.npy" + if os.path.exists(brand_index_path): + matrix_data["brand_index_map"] = np.load(brand_index_path, allow_pickle=True).item() + else: + logger.warning("brand_index_map 文件不存在,使用空字典") + matrix_data["brand_index_map"] = {} matrix_data["user_index_feature"] = np.load(f"{RECOMMEND_PATH_PREFIX}user_index_feature_matrix.npy", allow_pickle=True).item() From cee4f033e55afb38ee63fc6a79195434465d074a Mon Sep 17 00:00:00 2001 From: shahaibo <1023316923@qq.com> Date: Tue, 10 Jun 2025 13:38:28 +0800 Subject: [PATCH 076/101] =?UTF-8?q?TASK:=E5=86=B7=E5=90=AF=E5=8A=A8?= =?UTF-8?q?=E7=83=AD=E5=BA=A6=E6=8E=A8=E8=8D=90=E5=9B=9E=E9=80=80=EF=BC=9B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_recommendation.py | 64 ++++++++++++++----------- app/service/recommend/scheduled_task.py | 12 ++--- 2 files changed, 43 insertions(+), 33 deletions(-) diff --git a/app/api/api_recommendation.py b/app/api/api_recommendation.py index faeb780..4084e46 100644 --- a/app/api/api_recommendation.py +++ b/app/api/api_recommendation.py @@ -40,34 +40,44 @@ def softmax(scores): sum_exp = sum(exp_scores) return [s / sum_exp for s in exp_scores] +# def get_random_recommendations(category: str, num: int) -> List[str]: +# """根据预加载热度向量推荐(冷启动)""" +# try: +# heat_data = matrix_data.get("heat_data", {}) +# +# if category not in heat_data: +# raise ValueError(f"热度数据缺少类别 {category},使用随机推荐") +# +# heat_dict = heat_data[category] # {url: score} +# urls = list(heat_dict.keys()) +# scores = list(heat_dict.values()) +# +# if not urls: +# raise ValueError("该类别下无热度记录,使用随机推荐") +# +# probs = softmax(scores) +# sample_size = min(num, len(urls)) +# sampled_urls = random.choices(urls, weights=probs, k=sample_size) +# +# return sampled_urls +# +# except Exception as e: +# # 回退:完全随机推荐 +# all_iids = list(matrix_data["iid_to_sketch"].keys()) +# category_iids = matrix_data["category_to_iids"].get(category, all_iids) +# sample_size = min(num, len(category_iids)) +# sampled = np.random.choice(category_iids, size=sample_size, replace=False) +# return [matrix_data["iid_to_sketch"][iid] for iid in sampled] + def get_random_recommendations(category: str, num: int) -> List[str]: - """根据预加载热度向量推荐(冷启动)""" - try: - heat_data = matrix_data.get("heat_data", {}) - - if category not in heat_data: - raise ValueError(f"热度数据缺少类别 {category},使用随机推荐") - - heat_dict = heat_data[category] # {url: score} - urls = list(heat_dict.keys()) - scores = list(heat_dict.values()) - - if not urls: - raise ValueError("该类别下无热度记录,使用随机推荐") - - probs = softmax(scores) - sample_size = min(num, len(urls)) - sampled_urls = random.choices(urls, weights=probs, k=sample_size) - - return sampled_urls - - except Exception as e: - # 回退:完全随机推荐 - all_iids = list(matrix_data["iid_to_sketch"].keys()) - category_iids = matrix_data["category_to_iids"].get(category, all_iids) - sample_size = min(num, len(category_iids)) - sampled = np.random.choice(category_iids, size=sample_size, replace=False) - return [matrix_data["iid_to_sketch"][iid] for iid in sampled] + """全品类随机推荐""" + all_iids = list(matrix_data["iid_to_sketch"].keys()) + # 优先从当前品类选择 + category_iids = matrix_data["category_to_iids"].get(category, all_iids) + # 确保不超出实际数量 + sample_size = min(num, len(category_iids)) + sampled = np.random.choice(category_iids, size=sample_size, replace=False) + return [matrix_data["iid_to_sketch"][iid] for iid in sampled] @router.get("/recommend/{user_id}/{category}/{num_recommendations}/{brand_id}/{brand_scale}", response_model=List[str]) diff --git a/app/service/recommend/scheduled_task.py b/app/service/recommend/scheduled_task.py index 6dc67e8..d3174ed 100644 --- a/app/service/recommend/scheduled_task.py +++ b/app/service/recommend/scheduled_task.py @@ -527,12 +527,12 @@ def update_heat_matrices(): if __name__ == "__main__": try: - update_user_matrices() - update_heat_matrices() - # scheduler = BlockingScheduler() - # scheduler.add_job(update_user_matrices, 'cron', hour=12, timezone='Asia/Shanghai') - # logging.info("定时任务已启动,每天12:00执行") - # scheduler.start() + # update_user_matrices() + # update_heat_matrices() + scheduler = BlockingScheduler() + scheduler.add_job(update_user_matrices, 'cron', hour=12, timezone='Asia/Shanghai') + logging.info("定时任务已启动,每天12:00执行") + scheduler.start() except KeyboardInterrupt: logging.info("定时任务已停止") except Exception as e: From cde7fe09eef507f786de92ad7787883b5f752d30 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Tue, 10 Jun 2025 17:34:45 +0800 Subject: [PATCH 077/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20pose=20transform=20minio=E5=9C=B0=E5=9D=80?= =?UTF-8?q?=E6=9B=B4=E6=94=B9=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98?= =?UTF-8?q?=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84?= =?UTF-8?q?=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/generate_image/utils/pose_transform_upload.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/service/generate_image/utils/pose_transform_upload.py b/app/service/generate_image/utils/pose_transform_upload.py index 71d31ef..b1368d5 100644 --- a/app/service/generate_image/utils/pose_transform_upload.py +++ b/app/service/generate_image/utils/pose_transform_upload.py @@ -11,7 +11,7 @@ from app.core.config import * from app.service.utils.new_oss_client import oss_upload_image # minio 配置 -MINIO_URL = "www.minio.aida.com.hk:12024" +MINIO_URL = "www.minio-api.aida.com.hk" MINIO_ACCESS = 'vXKFLSJkYeEq2DrSZvkB' MINIO_SECRET = 'uKTZT3x7C43WvPN9QTc99DiRkwddWZrG9Uh3JVlR' MINIO_SECURE = True From 72f24c9d14ecbfe9fb66612730e90ba8afcb71f6 Mon Sep 17 00:00:00 2001 From: xupei Date: Tue, 10 Jun 2025 17:41:32 +0800 Subject: [PATCH 078/101] =?UTF-8?q?=E8=B0=83=E7=94=A8llama3.2-vision,?= =?UTF-8?q?=E8=87=AA=E5=8A=A8=E8=AF=86=E5=88=AB=E5=9B=BE=E7=89=87=EF=BC=8C?= =?UTF-8?q?=E8=BE=93=E5=87=BAprompt?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_prompt_generation.py | 22 +++++++++++-- app/schemas/prompt_generation.py | 4 +++ .../chatgpt_for_translation.py | 33 +++++++++++++++++++ .../prompt_generation/util/minio_util.py | 21 ++++++++++++ 4 files changed, 78 insertions(+), 2 deletions(-) create mode 100644 app/service/prompt_generation/util/minio_util.py diff --git a/app/api/api_prompt_generation.py b/app/api/api_prompt_generation.py index b731e33..4df957f 100644 --- a/app/api/api_prompt_generation.py +++ b/app/api/api_prompt_generation.py @@ -4,9 +4,10 @@ import time from fastapi import APIRouter, HTTPException -from app.schemas.prompt_generation import PromptGenerationImageModel +from app.schemas.prompt_generation import PromptGenerationImageModel, ImageRequest from app.schemas.response_template import ResponseModel -from app.service.prompt_generation.chatgpt_for_translation import translate_to_en, get_translation_from_llama3 +from app.service.prompt_generation.chatgpt_for_translation import get_translation_from_llama3, \ + get_prompt_from_image router = APIRouter() logger = logging.getLogger() @@ -32,3 +33,20 @@ def prompt_generation(request_data: PromptGenerationImageModel): logger.warning(f"prompt_generation Run Exception @@@@@@:{e}") raise HTTPException(status_code=404, detail=str(e)) return ResponseModel(data=data) + + +@router.post("/img2prompt") +def get_prompt_from_img(img: ImageRequest): + """ + 自动识别图片并输出为prompt + + :param img: 图片的minio地址 + :return: 图片的文字描述 + """ + text = ("Please describe the clothing in the image and provide a line art description of the outfit. " + "The description should allow for the reconstruction of the corresponding line art based on the details " + "given.") + logger.info(f"get_prompt_from_img request item is : @@@@@@:{img}") + description = get_prompt_from_image(img, text) + logger.info(f"生成的图片描述 response @@@@@@:{description}") + return description diff --git a/app/schemas/prompt_generation.py b/app/schemas/prompt_generation.py index 195291b..83f72af 100644 --- a/app/schemas/prompt_generation.py +++ b/app/schemas/prompt_generation.py @@ -3,3 +3,7 @@ from pydantic import BaseModel class PromptGenerationImageModel(BaseModel): text: str + + +class ImageRequest(BaseModel): + img: str diff --git a/app/service/prompt_generation/chatgpt_for_translation.py b/app/service/prompt_generation/chatgpt_for_translation.py index b6d8692..4c50d0b 100644 --- a/app/service/prompt_generation/chatgpt_for_translation.py +++ b/app/service/prompt_generation/chatgpt_for_translation.py @@ -9,6 +9,7 @@ from retry import retry from app.core.config import QWEN_API_KEY from app.service.chat_robot.script.service.CallQWen import get_language +from app.service.prompt_generation.util import minio_util logger = logging.getLogger(__name__) @@ -143,6 +144,38 @@ def get_translation_from_llama3(text): # response = requests.post(url, data=json.dumps(payload), headers=headers) +def get_prompt_from_image(image_path, text): + start_time = time.time() + # url = "http://localhost:11434/api/generate" + url = "http://10.1.1.243:11434/api/generate" + + image_base64 = minio_util.minio_url_to_base64(image_path.img) + # image_base64 = minio_url_to_base64(image_path) + + # 创建请求的负载 translator是自定义的翻译模型 + payload = { + "model": "llama3.2-vision", + "images": [image_base64], + "prompt": f"{text}", + "stream": False + } + # 将负载转换为 JSON 格式 + headers = {'Content-Type': 'application/json'} + response = requests.post(url, data=json.dumps(payload), headers=headers) + # 处理响应 + if response.status_code == 200: + # print("Response from server:") + # print(response.json()) + resp = json.loads(response.content).get("response") + logger.info(f"sketch re-generate server runtime is {time.time() - start_time} \n, response is {resp}") + # print("input : {}, sketch re-generate result : {}".format(text, resp)) + return resp + else: + logger.info(f"sketch re-generate server runtime is {time.time() - start_time} , response is {response.content}") + print(f"Request failed with status code {response.status_code}") + print(response.text) + + def main(): """Main function""" text = get_translation_from_llama3("[火焰]") diff --git a/app/service/prompt_generation/util/minio_util.py b/app/service/prompt_generation/util/minio_util.py new file mode 100644 index 0000000..8708ae9 --- /dev/null +++ b/app/service/prompt_generation/util/minio_util.py @@ -0,0 +1,21 @@ +import base64 + +from minio import Minio + +from app.core.config import MINIO_URL, MINIO_ACCESS, MINIO_SECRET, MINIO_SECURE + +minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) + + +def minio_url_to_base64(minio_url: str) -> str: + bucket_name, object_name = minio_url.split("/", 1) + + try: + response = minio_client.get_object(bucket_name, object_name) + image_data = response.read() + return base64.b64encode(image_data).decode('utf-8') + except Exception as e: + raise RuntimeError(f"Failed to get object: {e}") + finally: + if 'response' in locals(): + response.close() \ No newline at end of file From 0214cdffa4cb9c04c8be79651a5d41c09d186818 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Wed, 11 Jun 2025 16:12:39 +0800 Subject: [PATCH 079/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20pose=20transform=20=E5=9B=BE=E7=89=87=E5=90=88?= =?UTF-8?q?=E6=88=90=E8=A7=86=E9=A2=91=E6=96=B9=E6=B3=95=E6=9B=B4=E6=8D=A2?= =?UTF-8?q?=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:?= =?UTF-8?q?=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(?= =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../utils/pose_transform_upload.py | 21 ++++--------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/app/service/generate_image/utils/pose_transform_upload.py b/app/service/generate_image/utils/pose_transform_upload.py index b1368d5..e7925d7 100644 --- a/app/service/generate_image/utils/pose_transform_upload.py +++ b/app/service/generate_image/utils/pose_transform_upload.py @@ -3,9 +3,9 @@ import logging import os.path import numpy as np -import skvideo.io # import boto3 from minio import Minio +from moviepy.video.io.ImageSequenceClip import ImageSequenceClip from app.core.config import * from app.service.utils.new_oss_client import oss_upload_image @@ -63,23 +63,10 @@ def upload_video(frames, user_id, category, file_name): def ndarray_to_video(images, output_path, frame_size=(512, 768), fps=9): - save_path = os.path.join(POSE_TRANSFORM_VIDEO_PATH, output_path) - # 初始化视频写入器 - writer = skvideo.io.FFmpegWriter( - save_path, - inputdict={'-r': str(fps)}, - outputdict={'-r': str(fps), '-vcodec': 'libx264'} - ) - # 逐帧写入 - for frame in images: - # 调整尺寸(可选) - # resized_frame = cv2.resize(frame, frame_size) - # 转换颜色通道(若需从 BGR 转 RGB) - # rgb_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2RGB) - writer.writeFrame(frame) + save_path = os.path.join(r"E:\workspace\trinity_client_aida\app\service\generate_image\pose_transform_video", output_path) + clip = ImageSequenceClip([frame for frame in images], fps=fps) + clip.write_videofile(save_path, codec='libx264') - # 关闭写入器 - writer.close() return save_path From 3fe549f3b19cbe6943b6c855c046a9600d5174be Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Wed, 11 Jun 2025 16:13:13 +0800 Subject: [PATCH 080/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20pose=20transform=20=E5=9B=BE=E7=89=87=E5=90=88?= =?UTF-8?q?=E6=88=90=E8=A7=86=E9=A2=91=E6=96=B9=E6=B3=95=E6=9B=B4=E6=8D=A2?= =?UTF-8?q?=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:?= =?UTF-8?q?=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(?= =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- requirements.txt | Bin 1970 -> 1998 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/requirements.txt b/requirements.txt index 3e5cb55d1b7f913548f31024557f7d9071aa1bad..b7e6c28794103d291a567e6f877e7eabffdd7f8f 100644 GIT binary patch delta 40 scmdnQe~y2H0h@F#Lq0 Date: Wed, 11 Jun 2025 16:28:04 +0800 Subject: [PATCH 081/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20pose=20transform=20=E5=9B=BE=E7=89=87=E5=90=88?= =?UTF-8?q?=E6=88=90=E8=A7=86=E9=A2=91=E6=96=B9=E6=B3=95=E6=9B=B4=E6=8D=A2?= =?UTF-8?q?=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:?= =?UTF-8?q?=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(?= =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/generate_image/utils/pose_transform_upload.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/service/generate_image/utils/pose_transform_upload.py b/app/service/generate_image/utils/pose_transform_upload.py index e7925d7..f5e5318 100644 --- a/app/service/generate_image/utils/pose_transform_upload.py +++ b/app/service/generate_image/utils/pose_transform_upload.py @@ -63,7 +63,7 @@ def upload_video(frames, user_id, category, file_name): def ndarray_to_video(images, output_path, frame_size=(512, 768), fps=9): - save_path = os.path.join(r"E:\workspace\trinity_client_aida\app\service\generate_image\pose_transform_video", output_path) + save_path = os.path.join(POSE_TRANSFORM_VIDEO_PATH, output_path) clip = ImageSequenceClip([frame for frame in images], fps=fps) clip.write_videofile(save_path, codec='libx264') From 41c09be7e93b31059ab21399e824f64a30a0f925 Mon Sep 17 00:00:00 2001 From: zchengrong Date: Mon, 23 Jun 2025 15:09:51 +0800 Subject: [PATCH 082/101] test --- app/core/config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/core/config.py b/app/core/config.py index 4930e97..4e83397 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -35,8 +35,8 @@ else: RECOMMEND_PATH_PREFIX = "app/service/recommend/" # RABBITMQ_ENV = "" # 生产环境 -RABBITMQ_ENV = "-dev" # 开发环境 -# RABBITMQ_ENV = "-local" # 本地测试环境 +# RABBITMQ_ENV = "-dev" # 开发环境 +RABBITMQ_ENV = "-local" # 本地测试环境 JAVA_STREAM_API_URL = os.getenv("JAVA_STREAM_API_URL", "https://api.aida.com.hk/api/third/party/receiveDesignResults") From d64ffa26260f8e58963c4c98f99117a07931f1ae Mon Sep 17 00:00:00 2001 From: zchengrong Date: Mon, 23 Jun 2025 15:11:20 +0800 Subject: [PATCH 083/101] test --- app/core/config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/core/config.py b/app/core/config.py index 4e83397..4930e97 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -35,8 +35,8 @@ else: RECOMMEND_PATH_PREFIX = "app/service/recommend/" # RABBITMQ_ENV = "" # 生产环境 -# RABBITMQ_ENV = "-dev" # 开发环境 -RABBITMQ_ENV = "-local" # 本地测试环境 +RABBITMQ_ENV = "-dev" # 开发环境 +# RABBITMQ_ENV = "-local" # 本地测试环境 JAVA_STREAM_API_URL = os.getenv("JAVA_STREAM_API_URL", "https://api.aida.com.hk/api/third/party/receiveDesignResults") From 87daf3b52dfddc23db9c9018d78181d8bf462f81 Mon Sep 17 00:00:00 2001 From: zchengrong Date: Mon, 23 Jun 2025 15:53:41 +0800 Subject: [PATCH 084/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20clothing=20seg=20=E6=9C=8D=E5=8A=A1=E5=9B=BE?= =?UTF-8?q?=E7=89=87=E7=BC=BA=E5=B0=913=E9=80=9A=E9=81=93=E9=80=BB?= =?UTF-8?q?=E8=BE=91=E5=A2=9E=E5=8A=A0=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/search_image_with_text/service.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/service/search_image_with_text/service.py b/app/service/search_image_with_text/service.py index edd4d93..6d4f490 100644 --- a/app/service/search_image_with_text/service.py +++ b/app/service/search_image_with_text/service.py @@ -6,7 +6,7 @@ from chromadb.config import Settings from chromadb.utils.embedding_functions.ollama_embedding_function import OllamaEmbeddingFunction from tqdm import tqdm -from app.core.config import OLLAMA_URL +from app.core.config import OLLAMA_URL, CHROMADB_PATH # 读取 csv 文件 # csv_file_path = r'D:/Files/csv/output/output.csv' @@ -15,7 +15,7 @@ from app.core.config import OLLAMA_URL # df = pd.read_csv(csv_file_path, encoding='Windows-1252') # 创建 Chroma 客户端 -client = chromadb.Client(Settings(is_persistent=True, persist_directory="/vector_db")) +client = chromadb.Client(Settings(is_persistent=True, persist_directory=CHROMADB_PATH)) # client = chromadb.Client(Settings(is_persistent=True, persist_directory="./service/search_image_with_text/vector_db")) # client = chromadb.Client(Settings(is_persistent=True, persist_directory="D:/workspace/AiDLab/vector_db")) # 创建集合 From 58fbceef97434faeeb9686e3e5bcdb5e4f30fcdc Mon Sep 17 00:00:00 2001 From: zchengrong Date: Mon, 23 Jun 2025 15:54:32 +0800 Subject: [PATCH 085/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20clothing=20seg=20=E6=9C=8D=E5=8A=A1=E5=9B=BE?= =?UTF-8?q?=E7=89=87=E7=BC=BA=E5=B0=913=E9=80=9A=E9=81=93=E9=80=BB?= =?UTF-8?q?=E8=BE=91=E5=A2=9E=E5=8A=A0=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/clothing_seg/service.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/app/service/clothing_seg/service.py b/app/service/clothing_seg/service.py index 46cc444..6d2b4d0 100644 --- a/app/service/clothing_seg/service.py +++ b/app/service/clothing_seg/service.py @@ -62,7 +62,11 @@ class ClothingSeg: image = data["image"] clothing_result = [] if image_type == "sketch": - seg_mask = get_seg_result(1, image[:, :, :3]) + if len(image.shape) == 2: + image = np.stack([image] * 3, axis=-1) + seg_mask = get_seg_result(1, image[:, :, :3]) + else: + seg_mask = get_seg_result(1, image[:, :, :3]) temp = seg_mask != 0.0 mask = (255 * (temp + 0).astype(np.uint8)) x_min, y_min, x_max, y_max = get_bounding_box(mask) From a8af4691b295ccdf1d389183829c00f46b69c0aa Mon Sep 17 00:00:00 2001 From: zchengrong Date: Mon, 23 Jun 2025 16:08:47 +0800 Subject: [PATCH 086/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20clothing=20seg=20=E6=9C=8D=E5=8A=A1=E5=9B=BE?= =?UTF-8?q?=E7=89=87=E7=BC=BA=E5=B0=913=E9=80=9A=E9=81=93=E9=80=BB?= =?UTF-8?q?=E8=BE=91=E5=A2=9E=E5=8A=A0=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/core/config.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/app/core/config.py b/app/core/config.py index 4930e97..b4e26de 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -27,12 +27,15 @@ if DEBUG: SEG_CACHE_PATH = "../seg_cache/" POSE_TRANSFORM_VIDEO_PATH = "../pose_transform_video/" RECOMMEND_PATH_PREFIX = "service/recommend/" + CHROMADB_PATH = "./chromadb/" else: LOGS_PATH = "app/logs/" CATEGORY_PATH = "app/service/attribute/config/descriptor/category/category_dis.csv" SEG_CACHE_PATH = "/seg_cache/" POSE_TRANSFORM_VIDEO_PATH = "/pose_transform_video/" RECOMMEND_PATH_PREFIX = "app/service/recommend/" + CHROMADB_PATH = "/chromadb/" + # RABBITMQ_ENV = "" # 生产环境 RABBITMQ_ENV = "-dev" # 开发环境 From c77540678b0a4a6511e68b0a87c20f63df7e4ada Mon Sep 17 00:00:00 2001 From: zchengrong Date: Tue, 24 Jun 2025 11:12:02 +0800 Subject: [PATCH 087/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20clothing=20seg=20=E6=9C=8D=E5=8A=A1=E5=9B=BE?= =?UTF-8?q?=E7=89=87=E7=BC=BA=E5=B0=913=E9=80=9A=E9=81=93=E9=80=BB?= =?UTF-8?q?=E8=BE=91=E5=A2=9E=E5=8A=A0=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/clothing_seg/service.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/app/service/clothing_seg/service.py b/app/service/clothing_seg/service.py index 6d2b4d0..34ea7ee 100644 --- a/app/service/clothing_seg/service.py +++ b/app/service/clothing_seg/service.py @@ -63,7 +63,7 @@ class ClothingSeg: clothing_result = [] if image_type == "sketch": if len(image.shape) == 2: - image = np.stack([image] * 3, axis=-1) + image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) seg_mask = get_seg_result(1, image[:, :, :3]) else: seg_mask = get_seg_result(1, image[:, :, :3]) @@ -150,8 +150,8 @@ if __name__ == "__main__": # "image_type": "sketch" # }, { - "image_url": "test/clothing_seg/10144613.jpg", - "image_type": "product" + "image_url": "aida-collection-element/87/Sketchboard/ab40e035-547a-48c5-9f97-1db7bf56ad77.jpg", + "image_type": "sketch" } ] ) From 6203dde267214c5a099accb159a6ed923f78c16a Mon Sep 17 00:00:00 2001 From: zchengrong Date: Tue, 24 Jun 2025 16:58:05 +0800 Subject: [PATCH 088/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20=E5=9B=BE=E7=89=87=E7=94=9F=E6=88=90=E6=9C=8D?= =?UTF-8?q?=E5=8A=A1=E4=BC=98=E5=8C=96=EF=BC=8C=E9=81=BF=E5=85=8Dmq?= =?UTF-8?q?=E8=BF=9E=E6=8E=A5=E8=B6=85=E6=97=B6=20docs=EF=BC=88=E6=96=87?= =?UTF-8?q?=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88?= =?UTF-8?q?=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B?= =?UTF-8?q?=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_agent_generate_image.py | 19 - app/api/api_route.py | 2 - app/api/api_test.py | 12 +- app/service/generate_image/agent_generate.py | 72 - .../generate_image/service_generate_image.py | 16 +- .../service_generate_multi_view.py | 20 +- .../service_generate_product_image.py | 13 +- .../service_generate_relight_image.py | 11 +- .../service_generate_single_logo.py | 11 +- .../generate_image/service_pose_transform.py | 16 +- app/service/generate_image/utils/mq.py | 23 + pyproject.toml | 18 + uv.lock | 1193 +++++++++++++++++ 13 files changed, 1259 insertions(+), 167 deletions(-) delete mode 100644 app/api/api_agent_generate_image.py delete mode 100644 app/service/generate_image/agent_generate.py create mode 100644 app/service/generate_image/utils/mq.py create mode 100755 pyproject.toml create mode 100755 uv.lock diff --git a/app/api/api_agent_generate_image.py b/app/api/api_agent_generate_image.py deleted file mode 100644 index e4001ff..0000000 --- a/app/api/api_agent_generate_image.py +++ /dev/null @@ -1,19 +0,0 @@ -import io -import logging - -from fastapi import APIRouter, HTTPException -from starlette.responses import StreamingResponse - -from app.schemas.response_template import ResponseModel -from app.service.generate_image.agent_generate import GenerateImage - -router = APIRouter() -logger = logging.getLogger() - - -@router.get("/agent_generate_image") -def generate_image(prompt: str): - server = GenerateImage() - byte_stream = server.get_result(prompt) - # 返回流式响应 - return StreamingResponse(byte_stream, media_type="image/png") diff --git a/app/api/api_route.py b/app/api/api_route.py index 9b48c5a..eedb6fb 100644 --- a/app/api/api_route.py +++ b/app/api/api_route.py @@ -1,6 +1,5 @@ from fastapi import APIRouter -from app.api import api_agent_generate_image from app.api import api_attribute_retrieve, api_query_image from app.api import api_brand_dna from app.api import api_brighten @@ -34,7 +33,6 @@ router.include_router(api_query_image.router, tags=['api_query_image'], prefix=" router.include_router(api_brand_dna.router, tags=['api_brand_dna'], prefix="/api") router.include_router(api_recommendation.router, tags=['api_recommendation'], prefix="/api") router.include_router(api_mannequins_edit.router, tags=['api_mannequins_edit'], prefix="/api") -router.include_router(api_agent_generate_image.router, tags=['api_agent_generate_image'], prefix="/api") router.include_router(api_pose_transform.router, tags=['api_pose_transform'], prefix="/api") router.include_router(api_clothing_seg.router, tags=['api_clothing_seg'], prefix="/api") router.include_router(api_extraction_project_info.router, tags=['api_extraction_project_info'], prefix="/api") diff --git a/app/api/api_test.py b/app/api/api_test.py index a273b11..0f8b98f 100644 --- a/app/api/api_test.py +++ b/app/api/api_test.py @@ -4,7 +4,7 @@ import logging from fastapi import APIRouter from fastapi import HTTPException -from app.core.config import SR_RABBITMQ_QUEUES, GI_RABBITMQ_QUEUES, GPI_RABBITMQ_QUEUES, GRI_RABBITMQ_QUEUES, OSS, JAVA_STREAM_API_URL +from app.core.config import SR_RABBITMQ_QUEUES, GI_RABBITMQ_QUEUES, GPI_RABBITMQ_QUEUES, GRI_RABBITMQ_QUEUES, OSS, JAVA_STREAM_API_URL, GMV_RABBITMQ_QUEUES, SLOGAN_RABBITMQ_QUEUES, GEN_SINGLE_LOGO_RABBITMQ_QUEUES from app.schemas.response_template import ResponseModel logger = logging.getLogger() @@ -14,10 +14,12 @@ router = APIRouter() @router.get("{id}") def test(id: int): data = { - "SR_RABBITMQ_QUEUES message": SR_RABBITMQ_QUEUES, - "GI_RABBITMQ_QUEUES": GI_RABBITMQ_QUEUES, - "GPI_RABBITMQ_QUEUES": GPI_RABBITMQ_QUEUES, - "GRI_RABBITMQ_QUEUES": GRI_RABBITMQ_QUEUES, + "超分 SR_RABBITMQ_QUEUES": SR_RABBITMQ_QUEUES, + "多视角 GMV_RABBITMQ_QUEUES": GMV_RABBITMQ_QUEUES, + "logan SLOGAN_RABBITMQ_QUEUES": SLOGAN_RABBITMQ_QUEUES, + "image and single logo GI_RABBITMQ_QUEUES": GI_RABBITMQ_QUEUES, + "to product image GPI_RABBITMQ_QUEUES": GPI_RABBITMQ_QUEUES, + "relight GRI_RABBITMQ_QUEUES": GRI_RABBITMQ_QUEUES, "JAVA_STREAM_API_URL": JAVA_STREAM_API_URL, "local_oss_server": OSS } diff --git a/app/service/generate_image/agent_generate.py b/app/service/generate_image/agent_generate.py deleted file mode 100644 index 58ac869..0000000 --- a/app/service/generate_image/agent_generate.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python -# -*- coding: UTF-8 -*- -""" -@Project :trinity_client -@File :service_att_recognition.py -@Author :周成融 -@Date :2023/7/26 12:01:05 -@detail : -""" -import io -import logging -from datetime import timedelta - -import cv2 -import numpy as np -import tritonclient.grpc as grpcclient -from minio import Minio -from tritonclient.utils import np_to_triton_dtype - -from app.core.config import * -from app.service.utils.oss_client import oss_upload_image - -logger = logging.getLogger() - - -class GenerateImage: - def __init__(self): - self.grpc_client = grpcclient.InferenceServerClient(url=GI_MODEL_URL) - self.image = np.random.randint(0, 256, (1024, 1024, 3), dtype=np.uint8) - self.batch_size = 1 - self.mode = 'txt2img' - self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) - - def get_result(self, prompt): - prompts = [prompt] * self.batch_size - modes = [self.mode] * self.batch_size - images = [self.image.astype(np.float16)] * self.batch_size - - text_obj = np.array(prompts, dtype="object").reshape((-1, 1)) - mode_obj = np.array(modes, dtype="object").reshape((-1, 1)) - image_obj = np.array(images, dtype=np.float16).reshape((-1, 1024, 1024, 3)) - - input_text = grpcclient.InferInput("prompt", text_obj.shape, np_to_triton_dtype(text_obj.dtype)) - input_image = grpcclient.InferInput("input_image", image_obj.shape, np_to_triton_dtype(image_obj.dtype)) - input_mode = grpcclient.InferInput("mode", mode_obj.shape, np_to_triton_dtype(mode_obj.dtype)) - - input_text.set_data_from_numpy(text_obj) - input_image.set_data_from_numpy(image_obj) - input_mode.set_data_from_numpy(mode_obj) - - inputs = [input_text, input_image, input_mode] - result = self.grpc_client.infer(model_name=GI_MODEL_NAME, inputs=inputs) - image = result.as_numpy("generated_image") - image_result = cv2.cvtColor(np.squeeze(image.astype(np.uint8)), cv2.COLOR_RGB2BGR) - _, img_byte_array = cv2.imencode('.jpg', image_result) - byte_stream = io.BytesIO(img_byte_array) - byte_stream.seek(0) - - # object_name = f'test.jpg' - # req = oss_upload_image(bucket='test', object_name=object_name, image_bytes=img_byte_array) - # url = self.minio_client.get_presigned_url( - # "GET", - # "test", - # object_name, - # expires=timedelta(hours=2), - # ) - return byte_stream - - -if __name__ == '__main__': - server = GenerateImage() - print(server.get_result("rabbit")) diff --git a/app/service/generate_image/service_generate_image.py b/app/service/generate_image/service_generate_image.py index cfd5fd5..c3ae2d7 100644 --- a/app/service/generate_image/service_generate_image.py +++ b/app/service/generate_image/service_generate_image.py @@ -21,6 +21,7 @@ from tritonclient.utils import np_to_triton_dtype from app.core.config import * from app.schemas.generate_image import GenerateImageModel from app.service.generate_image.utils.image_processing import remove_background, stain_detection, generate_category_recognition, autoLevels, luminance_adjust +from app.service.generate_image.utils.mq import publish_status from app.service.generate_image.utils.upload_sd_image import upload_png_sd from app.service.utils.oss_client import oss_get_image @@ -29,12 +30,6 @@ logger = logging.getLogger() class GenerateImage: def __init__(self, request_data): - if DEBUG is False: - self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) - self.channel = self.connection.channel() - # self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) - # self.channel = self.connection.channel() - # self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) self.version = request_data.version if request_data.version == "fast": self.grpc_client = grpcclient.InferenceServerClient(url=FAST_GI_MODEL_URL) @@ -161,7 +156,6 @@ class GenerateImage: generate_data = None while time_out > 0: generate_data, _ = self.read_tasks_status() - # logger.info(generate_data) if generate_data['status'] in ["REVOKED", "FAILURE"]: ctx.cancel() break @@ -169,7 +163,6 @@ class GenerateImage: break time_out -= 1 time.sleep(0.1) - # logger.info(time_out, generate_data) return generate_data except Exception as e: self.generate_data['status'] = "FAILURE" @@ -178,11 +171,8 @@ class GenerateImage: raise Exception(str(e)) finally: dict_generate_data, str_generate_data = self.read_tasks_status() - if DEBUG is False: - self.channel.basic_publish(exchange='', routing_key=GI_RABBITMQ_QUEUES, body=str_generate_data) - self.connection.close() - # self.channel.basic_publish(exchange='', routing_key=GI_RABBITMQ_QUEUES, body=str_generate_data) - logger.info(f" [x] Sent {json.dumps(dict_generate_data, indent=4)}") + if not DEBUG: + publish_status(str_generate_data, GI_RABBITMQ_QUEUES) def infer_cancel(tasks_id): diff --git a/app/service/generate_image/service_generate_multi_view.py b/app/service/generate_image/service_generate_multi_view.py index 248e604..5ac7819 100644 --- a/app/service/generate_image/service_generate_multi_view.py +++ b/app/service/generate_image/service_generate_multi_view.py @@ -17,6 +17,7 @@ import tritonclient.grpc as grpcclient from app.core.config import * from app.schemas.generate_image import GenerateMultiViewModel +from app.service.generate_image.utils.mq import publish_status from app.service.generate_image.utils.upload_sd_image import upload_png_sd from app.service.utils.oss_client import oss_get_image @@ -25,14 +26,7 @@ logger = logging.getLogger() class GenerateMultiView: def __init__(self, request_data): - if DEBUG is False: - self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) - self.channel = self.connection.channel() - # self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) - # self.channel = self.connection.channel() - # self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) self.grpc_client = grpcclient.InferenceServerClient(url=GMV_MODEL_URL) - self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) self.image = self.get_image(request_data.image_url) self.tasks_id = request_data.tasks_id @@ -52,16 +46,11 @@ class GenerateMultiView: if error: self.generate_data['status'] = "FAILURE" self.generate_data['message'] = str(error) - # self.generate_data['data'] = str(error) self.redis_client.set(self.tasks_id, json.dumps(self.generate_data)) else: # pil图像转成numpy数组 images = result.as_numpy("generated_image") - # for id, img in enumerate(images): - # cv2.imwrite(f"{id}.png", img) - # image_url = "" image_url = upload_png_sd(images[6], user_id=self.user_id, category="multi_view", file_name=f"{self.tasks_id}.png") - # logger.info(f"upload image SUCCESS : {image_url}") self.generate_data['status'] = "SUCCESS" self.generate_data['message'] = "success" self.generate_data['image_url'] = str(image_url) @@ -103,11 +92,8 @@ class GenerateMultiView: raise Exception(str(e)) finally: dict_generate_data, str_generate_data = self.read_tasks_status() - if DEBUG is False: - self.channel.basic_publish(exchange='', routing_key=GMV_RABBITMQ_QUEUES, body=str_generate_data) - self.connection.close() - # self.channel.basic_publish(exchange='', routing_key=GI_RABBITMQ_QUEUES, body=str_generate_data) - logger.info(f" [x] Sent {json.dumps(dict_generate_data, indent=4)}") + if not DEBUG: + publish_status(str_generate_data, GMV_RABBITMQ_QUEUES) def infer_cancel(tasks_id): diff --git a/app/service/generate_image/service_generate_product_image.py b/app/service/generate_image/service_generate_product_image.py index 5d67eef..1191352 100644 --- a/app/service/generate_image/service_generate_product_image.py +++ b/app/service/generate_image/service_generate_product_image.py @@ -212,6 +212,7 @@ from tritonclient.utils import np_to_triton_dtype from app.core.config import * from app.schemas.generate_image import GenerateProductImageModel +from app.service.generate_image.utils.mq import publish_status from app.service.generate_image.utils.upload_sd_image import upload_SDXL_image from app.service.utils.oss_client import oss_get_image @@ -220,12 +221,6 @@ logger = logging.getLogger() class GenerateProductImage: def __init__(self, request_data): - if DEBUG is False: - self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) - self.channel = self.connection.channel() - # self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) - # self.channel = self.connection.channel() - # self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) self.grpc_client = grpcclient.InferenceServerClient(url=GPI_MODEL_URL) self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) self.category = "product_image" @@ -318,10 +313,8 @@ class GenerateProductImage: raise Exception(str(e)) finally: dict_gen_product_data, str_gen_product_data = self.read_tasks_status() - if DEBUG is False: - self.channel.basic_publish(exchange='', routing_key=GPI_RABBITMQ_QUEUES, body=str_gen_product_data) - self.connection.close() - logger.info(f" [x] Sent to: {GPI_RABBITMQ_QUEUES} data:@@@@ {json.dumps(dict_gen_product_data, indent=4)}") + if not DEBUG: + publish_status(str_gen_product_data, GPI_RABBITMQ_QUEUES) def infer_cancel(tasks_id): diff --git a/app/service/generate_image/service_generate_relight_image.py b/app/service/generate_image/service_generate_relight_image.py index 06a8954..3723ee9 100644 --- a/app/service/generate_image/service_generate_relight_image.py +++ b/app/service/generate_image/service_generate_relight_image.py @@ -20,6 +20,7 @@ from tritonclient.utils import np_to_triton_dtype from app.core.config import * from app.schemas.generate_image import GenerateRelightImageModel +from app.service.generate_image.utils.mq import publish_status from app.service.generate_image.utils.upload_sd_image import upload_SDXL_image from app.service.utils.oss_client import oss_get_image @@ -28,10 +29,6 @@ logger = logging.getLogger() class GenerateRelightImage: def __init__(self, request_data): - if DEBUG is False: - self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) - self.channel = self.connection.channel() - # self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) self.grpc_client = grpcclient.InferenceServerClient(url=GRI_MODEL_URL) self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) self.category = "relight_image" @@ -137,10 +134,8 @@ class GenerateRelightImage: raise Exception(str(e)) finally: dict_gen_product_data, str_gen_product_data = self.read_tasks_status() - if DEBUG is False: - self.channel.basic_publish(exchange='', routing_key=GRI_RABBITMQ_QUEUES, body=str_gen_product_data) - self.connection.close() - logger.info(f" [x] Sent to: {GRI_RABBITMQ_QUEUES} data:@@@@ {json.dumps(dict_gen_product_data, indent=4)}") + if not DEBUG: + publish_status(str_gen_product_data, GRI_RABBITMQ_QUEUES) def infer_cancel(tasks_id): diff --git a/app/service/generate_image/service_generate_single_logo.py b/app/service/generate_image/service_generate_single_logo.py index af182b2..1e6b0d2 100644 --- a/app/service/generate_image/service_generate_single_logo.py +++ b/app/service/generate_image/service_generate_single_logo.py @@ -21,6 +21,7 @@ from tritonclient.utils import np_to_triton_dtype from app.core.config import * import tritonclient.grpc as grpcclient from app.schemas.generate_image import GenerateSingleLogoImageModel +from app.service.generate_image.utils.mq import publish_status from app.service.generate_image.utils.upload_sd_image import upload_png_sd, upload_SDXL_image logger = logging.getLogger() @@ -28,10 +29,6 @@ logger = logging.getLogger() class GenerateSingleLogoImage: def __init__(self, request_data): - if DEBUG is False: - self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) - self.channel = self.connection.channel() - self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) self.grpc_client = grpcclient.InferenceServerClient(url=GSL_MODEL_URL) self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) self.batch_size = 1 @@ -96,10 +93,8 @@ class GenerateSingleLogoImage: raise Exception(str(e)) finally: dict_generate_data, str_generate_data = self.read_tasks_status() - if DEBUG is False: - self.channel.basic_publish(exchange='', routing_key=GI_RABBITMQ_QUEUES, body=str_generate_data) - self.connection.close() - logger.info(f" [x] Sent {json.dumps(dict_generate_data, indent=4)}") + if not DEBUG: + publish_status(str_generate_data, GI_RABBITMQ_QUEUES) def infer_cancel(tasks_id): diff --git a/app/service/generate_image/service_pose_transform.py b/app/service/generate_image/service_pose_transform.py index 78ca227..1616d76 100644 --- a/app/service/generate_image/service_pose_transform.py +++ b/app/service/generate_image/service_pose_transform.py @@ -21,6 +21,7 @@ from tritonclient.utils import np_to_triton_dtype from app.core.config import * from app.schemas.pose_transform import PoseTransformModel +from app.service.generate_image.utils.mq import publish_status from app.service.generate_image.utils.pose_transform_upload import upload_gif, upload_video, upload_first_image from app.service.utils.oss_client import oss_get_image @@ -114,23 +115,12 @@ class PoseTransformService: raise Exception(str(e)) finally: dict_pose_transform_data, str_pose_transform_data = self.read_tasks_status() - if DEBUG is False: - publish_status(str_pose_transform_data) + if not DEBUG: + publish_status(json.dumps(str_pose_transform_data), PS_RABBITMQ_QUEUES) logger.info( f" [x] Sent to: {PS_RABBITMQ_QUEUES} data:@@@@ {json.dumps(dict_pose_transform_data, indent=4)}") -def publish_status(message): - connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) - channel = connection.channel() - channel.queue_declare(queue=PS_RABBITMQ_QUEUES, durable=True) - channel.basic_publish(exchange='', - routing_key=PS_RABBITMQ_QUEUES, - body=json.dumps(message), - properties=pika.BasicProperties( - delivery_mode=2, - )) - connection.close() def infer_cancel(tasks_id): diff --git a/app/service/generate_image/utils/mq.py b/app/service/generate_image/utils/mq.py new file mode 100644 index 0000000..86e1df6 --- /dev/null +++ b/app/service/generate_image/utils/mq.py @@ -0,0 +1,23 @@ +import json + +import pika +import logging + +from app.core.config import RABBITMQ_PARAMS + +logger = logging.getLogger(__name__) + + +def publish_status(message, queue_name): + connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) + channel = connection.channel() + channel.queue_declare(queue=queue_name, durable=True) + channel.basic_publish(exchange='', + routing_key=queue_name, + body=message, + properties=pika.BasicProperties( + delivery_mode=2, + )) + connection.close() + + logger.info(f" [x] Queue : {queue_name} | Sent message : {json.dumps(json.loads(message), indent=4)}") diff --git a/pyproject.toml b/pyproject.toml new file mode 100755 index 0000000..aa10a24 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,18 @@ +[project] +name = "trinity-client-aida" +version = "0.1.0" +description = "Add your description here" +requires-python = ">=3.12" +dependencies = [ + "apscheduler>=3.11.0", + "celery>=5.5.3", + "geventhttpclient>=2.3.4", + "google-search-results>=2.4.2", + "moviepy>=2.2.1", + "numpy==1.26.4", + "pandas-stubs==2.2.3.250527", + "pika-stubs==0.1.3", + "python-multipart>=0.0.20", + "tritonclient[all]>=2.58.0", + "types-urllib3==1.26.25.14", +] diff --git a/uv.lock b/uv.lock new file mode 100755 index 0000000..0c28838 --- /dev/null +++ b/uv.lock @@ -0,0 +1,1193 @@ +version = 1 +revision = 2 +requires-python = ">=3.12" + +[[package]] +name = "aiohappyeyeballs" +version = "2.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/26/30/f84a107a9c4331c14b2b586036f40965c128aa4fee4dda5d3d51cb14ad54/aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558", size = 22760, upload-time = "2025-03-12T01:42:48.764Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/15/5bf3b99495fb160b63f95972b81750f18f7f4e02ad051373b669d17d44f2/aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8", size = 15265, upload-time = "2025-03-12T01:42:47.083Z" }, +] + +[[package]] +name = "aiohttp" +version = "3.12.13" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohappyeyeballs" }, + { name = "aiosignal" }, + { name = "attrs" }, + { name = "frozenlist" }, + { name = "multidict" }, + { name = "propcache" }, + { name = "yarl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/6e/ab88e7cb2a4058bed2f7870276454f85a7c56cd6da79349eb314fc7bbcaa/aiohttp-3.12.13.tar.gz", hash = "sha256:47e2da578528264a12e4e3dd8dd72a7289e5f812758fe086473fab037a10fcce", size = 7819160, upload-time = "2025-06-14T15:15:41.354Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b4/6a/ce40e329788013cd190b1d62bbabb2b6a9673ecb6d836298635b939562ef/aiohttp-3.12.13-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0aa580cf80558557285b49452151b9c69f2fa3ad94c5c9e76e684719a8791b73", size = 700491, upload-time = "2025-06-14T15:14:00.048Z" }, + { url = "https://files.pythonhosted.org/packages/28/d9/7150d5cf9163e05081f1c5c64a0cdf3c32d2f56e2ac95db2a28fe90eca69/aiohttp-3.12.13-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b103a7e414b57e6939cc4dece8e282cfb22043efd0c7298044f6594cf83ab347", size = 475104, upload-time = "2025-06-14T15:14:01.691Z" }, + { url = "https://files.pythonhosted.org/packages/f8/91/d42ba4aed039ce6e449b3e2db694328756c152a79804e64e3da5bc19dffc/aiohttp-3.12.13-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78f64e748e9e741d2eccff9597d09fb3cd962210e5b5716047cbb646dc8fe06f", size = 467948, upload-time = "2025-06-14T15:14:03.561Z" }, + { url = "https://files.pythonhosted.org/packages/99/3b/06f0a632775946981d7c4e5a865cddb6e8dfdbaed2f56f9ade7bb4a1039b/aiohttp-3.12.13-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29c955989bf4c696d2ededc6b0ccb85a73623ae6e112439398935362bacfaaf6", size = 1714742, upload-time = "2025-06-14T15:14:05.558Z" }, + { url = "https://files.pythonhosted.org/packages/92/a6/2552eebad9ec5e3581a89256276009e6a974dc0793632796af144df8b740/aiohttp-3.12.13-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d640191016763fab76072c87d8854a19e8e65d7a6fcfcbf017926bdbbb30a7e5", size = 1697393, upload-time = "2025-06-14T15:14:07.194Z" }, + { url = "https://files.pythonhosted.org/packages/d8/9f/bd08fdde114b3fec7a021381b537b21920cdd2aa29ad48c5dffd8ee314f1/aiohttp-3.12.13-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4dc507481266b410dede95dd9f26c8d6f5a14315372cc48a6e43eac652237d9b", size = 1752486, upload-time = "2025-06-14T15:14:08.808Z" }, + { url = "https://files.pythonhosted.org/packages/f7/e1/affdea8723aec5bd0959171b5490dccd9a91fcc505c8c26c9f1dca73474d/aiohttp-3.12.13-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8a94daa873465d518db073bd95d75f14302e0208a08e8c942b2f3f1c07288a75", size = 1798643, upload-time = "2025-06-14T15:14:10.767Z" }, + { url = "https://files.pythonhosted.org/packages/f3/9d/666d856cc3af3a62ae86393baa3074cc1d591a47d89dc3bf16f6eb2c8d32/aiohttp-3.12.13-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f52420cde4ce0bb9425a375d95577fe082cb5721ecb61da3049b55189e4e6", size = 1718082, upload-time = "2025-06-14T15:14:12.38Z" }, + { url = "https://files.pythonhosted.org/packages/f3/ce/3c185293843d17be063dada45efd2712bb6bf6370b37104b4eda908ffdbd/aiohttp-3.12.13-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f7df1f620ec40f1a7fbcb99ea17d7326ea6996715e78f71a1c9a021e31b96b8", size = 1633884, upload-time = "2025-06-14T15:14:14.415Z" }, + { url = "https://files.pythonhosted.org/packages/3a/5b/f3413f4b238113be35dfd6794e65029250d4b93caa0974ca572217745bdb/aiohttp-3.12.13-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3062d4ad53b36e17796dce1c0d6da0ad27a015c321e663657ba1cc7659cfc710", size = 1694943, upload-time = "2025-06-14T15:14:16.48Z" }, + { url = "https://files.pythonhosted.org/packages/82/c8/0e56e8bf12081faca85d14a6929ad5c1263c146149cd66caa7bc12255b6d/aiohttp-3.12.13-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:8605e22d2a86b8e51ffb5253d9045ea73683d92d47c0b1438e11a359bdb94462", size = 1716398, upload-time = "2025-06-14T15:14:18.589Z" }, + { url = "https://files.pythonhosted.org/packages/ea/f3/33192b4761f7f9b2f7f4281365d925d663629cfaea093a64b658b94fc8e1/aiohttp-3.12.13-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:54fbbe6beafc2820de71ece2198458a711e224e116efefa01b7969f3e2b3ddae", size = 1657051, upload-time = "2025-06-14T15:14:20.223Z" }, + { url = "https://files.pythonhosted.org/packages/5e/0b/26ddd91ca8f84c48452431cb4c5dd9523b13bc0c9766bda468e072ac9e29/aiohttp-3.12.13-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:050bd277dfc3768b606fd4eae79dd58ceda67d8b0b3c565656a89ae34525d15e", size = 1736611, upload-time = "2025-06-14T15:14:21.988Z" }, + { url = "https://files.pythonhosted.org/packages/c3/8d/e04569aae853302648e2c138a680a6a2f02e374c5b6711732b29f1e129cc/aiohttp-3.12.13-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2637a60910b58f50f22379b6797466c3aa6ae28a6ab6404e09175ce4955b4e6a", size = 1764586, upload-time = "2025-06-14T15:14:23.979Z" }, + { url = "https://files.pythonhosted.org/packages/ac/98/c193c1d1198571d988454e4ed75adc21c55af247a9fda08236602921c8c8/aiohttp-3.12.13-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e986067357550d1aaa21cfe9897fa19e680110551518a5a7cf44e6c5638cb8b5", size = 1724197, upload-time = "2025-06-14T15:14:25.692Z" }, + { url = "https://files.pythonhosted.org/packages/e7/9e/07bb8aa11eec762c6b1ff61575eeeb2657df11ab3d3abfa528d95f3e9337/aiohttp-3.12.13-cp312-cp312-win32.whl", hash = "sha256:ac941a80aeea2aaae2875c9500861a3ba356f9ff17b9cb2dbfb5cbf91baaf5bf", size = 421771, upload-time = "2025-06-14T15:14:27.364Z" }, + { url = "https://files.pythonhosted.org/packages/52/66/3ce877e56ec0813069cdc9607cd979575859c597b6fb9b4182c6d5f31886/aiohttp-3.12.13-cp312-cp312-win_amd64.whl", hash = "sha256:671f41e6146a749b6c81cb7fd07f5a8356d46febdaaaf07b0e774ff04830461e", size = 447869, upload-time = "2025-06-14T15:14:29.05Z" }, + { url = "https://files.pythonhosted.org/packages/11/0f/db19abdf2d86aa1deec3c1e0e5ea46a587b97c07a16516b6438428b3a3f8/aiohttp-3.12.13-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d4a18e61f271127465bdb0e8ff36e8f02ac4a32a80d8927aa52371e93cd87938", size = 694910, upload-time = "2025-06-14T15:14:30.604Z" }, + { url = "https://files.pythonhosted.org/packages/d5/81/0ab551e1b5d7f1339e2d6eb482456ccbe9025605b28eed2b1c0203aaaade/aiohttp-3.12.13-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:532542cb48691179455fab429cdb0d558b5e5290b033b87478f2aa6af5d20ace", size = 472566, upload-time = "2025-06-14T15:14:32.275Z" }, + { url = "https://files.pythonhosted.org/packages/34/3f/6b7d336663337672d29b1f82d1f252ec1a040fe2d548f709d3f90fa2218a/aiohttp-3.12.13-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d7eea18b52f23c050ae9db5d01f3d264ab08f09e7356d6f68e3f3ac2de9dfabb", size = 464856, upload-time = "2025-06-14T15:14:34.132Z" }, + { url = "https://files.pythonhosted.org/packages/26/7f/32ca0f170496aa2ab9b812630fac0c2372c531b797e1deb3deb4cea904bd/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad7c8e5c25f2a26842a7c239de3f7b6bfb92304593ef997c04ac49fb703ff4d7", size = 1703683, upload-time = "2025-06-14T15:14:36.034Z" }, + { url = "https://files.pythonhosted.org/packages/ec/53/d5513624b33a811c0abea8461e30a732294112318276ce3dbf047dbd9d8b/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6af355b483e3fe9d7336d84539fef460120c2f6e50e06c658fe2907c69262d6b", size = 1684946, upload-time = "2025-06-14T15:14:38Z" }, + { url = "https://files.pythonhosted.org/packages/37/72/4c237dd127827b0247dc138d3ebd49c2ded6114c6991bbe969058575f25f/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a95cf9f097498f35c88e3609f55bb47b28a5ef67f6888f4390b3d73e2bac6177", size = 1737017, upload-time = "2025-06-14T15:14:39.951Z" }, + { url = "https://files.pythonhosted.org/packages/0d/67/8a7eb3afa01e9d0acc26e1ef847c1a9111f8b42b82955fcd9faeb84edeb4/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8ed8c38a1c584fe99a475a8f60eefc0b682ea413a84c6ce769bb19a7ff1c5ef", size = 1786390, upload-time = "2025-06-14T15:14:42.151Z" }, + { url = "https://files.pythonhosted.org/packages/48/19/0377df97dd0176ad23cd8cad4fd4232cfeadcec6c1b7f036315305c98e3f/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0b9170d5d800126b5bc89d3053a2363406d6e327afb6afaeda2d19ee8bb103", size = 1708719, upload-time = "2025-06-14T15:14:44.039Z" }, + { url = "https://files.pythonhosted.org/packages/61/97/ade1982a5c642b45f3622255173e40c3eed289c169f89d00eeac29a89906/aiohttp-3.12.13-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:372feeace612ef8eb41f05ae014a92121a512bd5067db8f25101dd88a8db11da", size = 1622424, upload-time = "2025-06-14T15:14:45.945Z" }, + { url = "https://files.pythonhosted.org/packages/99/ab/00ad3eea004e1d07ccc406e44cfe2b8da5acb72f8c66aeeb11a096798868/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a946d3702f7965d81f7af7ea8fb03bb33fe53d311df48a46eeca17e9e0beed2d", size = 1675447, upload-time = "2025-06-14T15:14:47.911Z" }, + { url = "https://files.pythonhosted.org/packages/3f/fe/74e5ce8b2ccaba445fe0087abc201bfd7259431d92ae608f684fcac5d143/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a0c4725fae86555bbb1d4082129e21de7264f4ab14baf735278c974785cd2041", size = 1707110, upload-time = "2025-06-14T15:14:50.334Z" }, + { url = "https://files.pythonhosted.org/packages/ef/c4/39af17807f694f7a267bd8ab1fbacf16ad66740862192a6c8abac2bff813/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9b28ea2f708234f0a5c44eb6c7d9eb63a148ce3252ba0140d050b091b6e842d1", size = 1649706, upload-time = "2025-06-14T15:14:52.378Z" }, + { url = "https://files.pythonhosted.org/packages/38/e8/f5a0a5f44f19f171d8477059aa5f28a158d7d57fe1a46c553e231f698435/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d4f5becd2a5791829f79608c6f3dc745388162376f310eb9c142c985f9441cc1", size = 1725839, upload-time = "2025-06-14T15:14:54.617Z" }, + { url = "https://files.pythonhosted.org/packages/fd/ac/81acc594c7f529ef4419d3866913f628cd4fa9cab17f7bf410a5c3c04c53/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:60f2ce6b944e97649051d5f5cc0f439360690b73909230e107fd45a359d3e911", size = 1759311, upload-time = "2025-06-14T15:14:56.597Z" }, + { url = "https://files.pythonhosted.org/packages/38/0d/aabe636bd25c6ab7b18825e5a97d40024da75152bec39aa6ac8b7a677630/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:69fc1909857401b67bf599c793f2183fbc4804717388b0b888f27f9929aa41f3", size = 1708202, upload-time = "2025-06-14T15:14:58.598Z" }, + { url = "https://files.pythonhosted.org/packages/1f/ab/561ef2d8a223261683fb95a6283ad0d36cb66c87503f3a7dde7afe208bb2/aiohttp-3.12.13-cp313-cp313-win32.whl", hash = "sha256:7d7e68787a2046b0e44ba5587aa723ce05d711e3a3665b6b7545328ac8e3c0dd", size = 420794, upload-time = "2025-06-14T15:15:00.939Z" }, + { url = "https://files.pythonhosted.org/packages/9d/47/b11d0089875a23bff0abd3edb5516bcd454db3fefab8604f5e4b07bd6210/aiohttp-3.12.13-cp313-cp313-win_amd64.whl", hash = "sha256:5a178390ca90419bfd41419a809688c368e63c86bd725e1186dd97f6b89c2706", size = 446735, upload-time = "2025-06-14T15:15:02.858Z" }, +] + +[[package]] +name = "aiosignal" +version = "1.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "frozenlist" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ba/b5/6d55e80f6d8a08ce22b982eafa278d823b541c925f11ee774b0b9c43473d/aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54", size = 19424, upload-time = "2024-12-13T17:10:40.86Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/6a/bc7e17a3e87a2985d3e8f4da4cd0f481060eb78fb08596c42be62c90a4d9/aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5", size = 7597, upload-time = "2024-12-13T17:10:38.469Z" }, +] + +[[package]] +name = "amqp" +version = "5.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "vine" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/79/fc/ec94a357dfc6683d8c86f8b4cfa5416a4c36b28052ec8260c77aca96a443/amqp-5.3.1.tar.gz", hash = "sha256:cddc00c725449522023bad949f70fff7b48f0b1ade74d170a6f10ab044739432", size = 129013, upload-time = "2024-11-12T19:55:44.051Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/99/fc813cd978842c26c82534010ea849eee9ab3a13ea2b74e95cb9c99e747b/amqp-5.3.1-py3-none-any.whl", hash = "sha256:43b3319e1b4e7d1251833a93d672b4af1e40f3d632d479b98661a95f117880a2", size = 50944, upload-time = "2024-11-12T19:55:41.782Z" }, +] + +[[package]] +name = "apscheduler" +version = "3.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "tzlocal" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4e/00/6d6814ddc19be2df62c8c898c4df6b5b1914f3bd024b780028caa392d186/apscheduler-3.11.0.tar.gz", hash = "sha256:4c622d250b0955a65d5d0eb91c33e6d43fd879834bf541e0a18661ae60460133", size = 107347, upload-time = "2024-11-24T19:39:26.463Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/ae/9a053dd9229c0fde6b1f1f33f609ccff1ee79ddda364c756a924c6d8563b/APScheduler-3.11.0-py3-none-any.whl", hash = "sha256:fc134ca32e50f5eadcc4938e3a4545ab19131435e851abb40b34d63d5141c6da", size = 64004, upload-time = "2024-11-24T19:39:24.442Z" }, +] + +[[package]] +name = "attrs" +version = "25.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" }, +] + +[[package]] +name = "billiard" +version = "4.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7c/58/1546c970afcd2a2428b1bfafecf2371d8951cc34b46701bea73f4280989e/billiard-4.2.1.tar.gz", hash = "sha256:12b641b0c539073fc8d3f5b8b7be998956665c4233c7c1fcd66a7e677c4fb36f", size = 155031, upload-time = "2024-09-21T13:40:22.491Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/30/da/43b15f28fe5f9e027b41c539abc5469052e9d48fd75f8ff094ba2a0ae767/billiard-4.2.1-py3-none-any.whl", hash = "sha256:40b59a4ac8806ba2c2369ea98d876bc6108b051c227baffd928c644d15d8f3cb", size = 86766, upload-time = "2024-09-21T13:40:20.188Z" }, +] + +[[package]] +name = "brotli" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2f/c2/f9e977608bdf958650638c3f1e28f85a1b075f075ebbe77db8555463787b/Brotli-1.1.0.tar.gz", hash = "sha256:81de08ac11bcb85841e440c13611c00b67d3bf82698314928d0b676362546724", size = 7372270, upload-time = "2023-09-07T14:05:41.643Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/d0/5373ae13b93fe00095a58efcbce837fd470ca39f703a235d2a999baadfbc/Brotli-1.1.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:32d95b80260d79926f5fab3c41701dbb818fde1c9da590e77e571eefd14abe28", size = 815693, upload-time = "2024-10-18T12:32:23.824Z" }, + { url = "https://files.pythonhosted.org/packages/8e/48/f6e1cdf86751300c288c1459724bfa6917a80e30dbfc326f92cea5d3683a/Brotli-1.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b760c65308ff1e462f65d69c12e4ae085cff3b332d894637f6273a12a482d09f", size = 422489, upload-time = "2024-10-18T12:32:25.641Z" }, + { url = "https://files.pythonhosted.org/packages/06/88/564958cedce636d0f1bed313381dfc4b4e3d3f6015a63dae6146e1b8c65c/Brotli-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:316cc9b17edf613ac76b1f1f305d2a748f1b976b033b049a6ecdfd5612c70409", size = 873081, upload-time = "2023-09-07T14:03:57.967Z" }, + { url = "https://files.pythonhosted.org/packages/58/79/b7026a8bb65da9a6bb7d14329fd2bd48d2b7f86d7329d5cc8ddc6a90526f/Brotli-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:caf9ee9a5775f3111642d33b86237b05808dafcd6268faa492250e9b78046eb2", size = 446244, upload-time = "2023-09-07T14:03:59.319Z" }, + { url = "https://files.pythonhosted.org/packages/e5/18/c18c32ecea41b6c0004e15606e274006366fe19436b6adccc1ae7b2e50c2/Brotli-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70051525001750221daa10907c77830bc889cb6d865cc0b813d9db7fefc21451", size = 2906505, upload-time = "2023-09-07T14:04:01.327Z" }, + { url = "https://files.pythonhosted.org/packages/08/c8/69ec0496b1ada7569b62d85893d928e865df29b90736558d6c98c2031208/Brotli-1.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7f4bf76817c14aa98cc6697ac02f3972cb8c3da93e9ef16b9c66573a68014f91", size = 2944152, upload-time = "2023-09-07T14:04:03.033Z" }, + { url = "https://files.pythonhosted.org/packages/ab/fb/0517cea182219d6768113a38167ef6d4eb157a033178cc938033a552ed6d/Brotli-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0c5516f0aed654134a2fc936325cc2e642f8a0e096d075209672eb321cff408", size = 2919252, upload-time = "2023-09-07T14:04:04.675Z" }, + { url = "https://files.pythonhosted.org/packages/c7/53/73a3431662e33ae61a5c80b1b9d2d18f58dfa910ae8dd696e57d39f1a2f5/Brotli-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c3020404e0b5eefd7c9485ccf8393cfb75ec38ce75586e046573c9dc29967a0", size = 2845955, upload-time = "2023-09-07T14:04:06.585Z" }, + { url = "https://files.pythonhosted.org/packages/55/ac/bd280708d9c5ebdbf9de01459e625a3e3803cce0784f47d633562cf40e83/Brotli-1.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4ed11165dd45ce798d99a136808a794a748d5dc38511303239d4e2363c0695dc", size = 2914304, upload-time = "2023-09-07T14:04:08.668Z" }, + { url = "https://files.pythonhosted.org/packages/76/58/5c391b41ecfc4527d2cc3350719b02e87cb424ef8ba2023fb662f9bf743c/Brotli-1.1.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4093c631e96fdd49e0377a9c167bfd75b6d0bad2ace734c6eb20b348bc3ea180", size = 2814452, upload-time = "2023-09-07T14:04:10.736Z" }, + { url = "https://files.pythonhosted.org/packages/c7/4e/91b8256dfe99c407f174924b65a01f5305e303f486cc7a2e8a5d43c8bec3/Brotli-1.1.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e4c4629ddad63006efa0ef968c8e4751c5868ff0b1c5c40f76524e894c50248", size = 2938751, upload-time = "2023-09-07T14:04:12.875Z" }, + { url = "https://files.pythonhosted.org/packages/5a/a6/e2a39a5d3b412938362bbbeba5af904092bf3f95b867b4a3eb856104074e/Brotli-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:861bf317735688269936f755fa136a99d1ed526883859f86e41a5d43c61d8966", size = 2933757, upload-time = "2023-09-07T14:04:14.551Z" }, + { url = "https://files.pythonhosted.org/packages/13/f0/358354786280a509482e0e77c1a5459e439766597d280f28cb097642fc26/Brotli-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:87a3044c3a35055527ac75e419dfa9f4f3667a1e887ee80360589eb8c90aabb9", size = 2936146, upload-time = "2024-10-18T12:32:27.257Z" }, + { url = "https://files.pythonhosted.org/packages/80/f7/daf538c1060d3a88266b80ecc1d1c98b79553b3f117a485653f17070ea2a/Brotli-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c5529b34c1c9d937168297f2c1fde7ebe9ebdd5e121297ff9c043bdb2ae3d6fb", size = 2848055, upload-time = "2024-10-18T12:32:29.376Z" }, + { url = "https://files.pythonhosted.org/packages/ad/cf/0eaa0585c4077d3c2d1edf322d8e97aabf317941d3a72d7b3ad8bce004b0/Brotli-1.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ca63e1890ede90b2e4454f9a65135a4d387a4585ff8282bb72964fab893f2111", size = 3035102, upload-time = "2024-10-18T12:32:31.371Z" }, + { url = "https://files.pythonhosted.org/packages/d8/63/1c1585b2aa554fe6dbce30f0c18bdbc877fa9a1bf5ff17677d9cca0ac122/Brotli-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e79e6520141d792237c70bcd7a3b122d00f2613769ae0cb61c52e89fd3443839", size = 2930029, upload-time = "2024-10-18T12:32:33.293Z" }, + { url = "https://files.pythonhosted.org/packages/5f/3b/4e3fd1893eb3bbfef8e5a80d4508bec17a57bb92d586c85c12d28666bb13/Brotli-1.1.0-cp312-cp312-win32.whl", hash = "sha256:5f4d5ea15c9382135076d2fb28dde923352fe02951e66935a9efaac8f10e81b0", size = 333276, upload-time = "2023-09-07T14:04:16.49Z" }, + { url = "https://files.pythonhosted.org/packages/3d/d5/942051b45a9e883b5b6e98c041698b1eb2012d25e5948c58d6bf85b1bb43/Brotli-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:906bc3a79de8c4ae5b86d3d75a8b77e44404b0f4261714306e3ad248d8ab0951", size = 357255, upload-time = "2023-09-07T14:04:17.83Z" }, + { url = "https://files.pythonhosted.org/packages/0a/9f/fb37bb8ffc52a8da37b1c03c459a8cd55df7a57bdccd8831d500e994a0ca/Brotli-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8bf32b98b75c13ec7cf774164172683d6e7891088f6316e54425fde1efc276d5", size = 815681, upload-time = "2024-10-18T12:32:34.942Z" }, + { url = "https://files.pythonhosted.org/packages/06/b3/dbd332a988586fefb0aa49c779f59f47cae76855c2d00f450364bb574cac/Brotli-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7bc37c4d6b87fb1017ea28c9508b36bbcb0c3d18b4260fcdf08b200c74a6aee8", size = 422475, upload-time = "2024-10-18T12:32:36.485Z" }, + { url = "https://files.pythonhosted.org/packages/bb/80/6aaddc2f63dbcf2d93c2d204e49c11a9ec93a8c7c63261e2b4bd35198283/Brotli-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c0ef38c7a7014ffac184db9e04debe495d317cc9c6fb10071f7fefd93100a4f", size = 2906173, upload-time = "2024-10-18T12:32:37.978Z" }, + { url = "https://files.pythonhosted.org/packages/ea/1d/e6ca79c96ff5b641df6097d299347507d39a9604bde8915e76bf026d6c77/Brotli-1.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91d7cc2a76b5567591d12c01f019dd7afce6ba8cba6571187e21e2fc418ae648", size = 2943803, upload-time = "2024-10-18T12:32:39.606Z" }, + { url = "https://files.pythonhosted.org/packages/ac/a3/d98d2472e0130b7dd3acdbb7f390d478123dbf62b7d32bda5c830a96116d/Brotli-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a93dde851926f4f2678e704fadeb39e16c35d8baebd5252c9fd94ce8ce68c4a0", size = 2918946, upload-time = "2024-10-18T12:32:41.679Z" }, + { url = "https://files.pythonhosted.org/packages/c4/a5/c69e6d272aee3e1423ed005d8915a7eaa0384c7de503da987f2d224d0721/Brotli-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f0db75f47be8b8abc8d9e31bc7aad0547ca26f24a54e6fd10231d623f183d089", size = 2845707, upload-time = "2024-10-18T12:32:43.478Z" }, + { url = "https://files.pythonhosted.org/packages/58/9f/4149d38b52725afa39067350696c09526de0125ebfbaab5acc5af28b42ea/Brotli-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6967ced6730aed543b8673008b5a391c3b1076d834ca438bbd70635c73775368", size = 2936231, upload-time = "2024-10-18T12:32:45.224Z" }, + { url = "https://files.pythonhosted.org/packages/5a/5a/145de884285611838a16bebfdb060c231c52b8f84dfbe52b852a15780386/Brotli-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7eedaa5d036d9336c95915035fb57422054014ebdeb6f3b42eac809928e40d0c", size = 2848157, upload-time = "2024-10-18T12:32:46.894Z" }, + { url = "https://files.pythonhosted.org/packages/50/ae/408b6bfb8525dadebd3b3dd5b19d631da4f7d46420321db44cd99dcf2f2c/Brotli-1.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d487f5432bf35b60ed625d7e1b448e2dc855422e87469e3f450aa5552b0eb284", size = 3035122, upload-time = "2024-10-18T12:32:48.844Z" }, + { url = "https://files.pythonhosted.org/packages/af/85/a94e5cfaa0ca449d8f91c3d6f78313ebf919a0dbd55a100c711c6e9655bc/Brotli-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:832436e59afb93e1836081a20f324cb185836c617659b07b129141a8426973c7", size = 2930206, upload-time = "2024-10-18T12:32:51.198Z" }, + { url = "https://files.pythonhosted.org/packages/c2/f0/a61d9262cd01351df22e57ad7c34f66794709acab13f34be2675f45bf89d/Brotli-1.1.0-cp313-cp313-win32.whl", hash = "sha256:43395e90523f9c23a3d5bdf004733246fba087f2948f87ab28015f12359ca6a0", size = 333804, upload-time = "2024-10-18T12:32:52.661Z" }, + { url = "https://files.pythonhosted.org/packages/7e/c1/ec214e9c94000d1c1974ec67ced1c970c148aa6b8d8373066123fc3dbf06/Brotli-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:9011560a466d2eb3f5a6e4929cf4a09be405c64154e12df0dd72713f6500e32b", size = 358517, upload-time = "2024-10-18T12:32:54.066Z" }, +] + +[[package]] +name = "celery" +version = "5.5.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "billiard" }, + { name = "click" }, + { name = "click-didyoumean" }, + { name = "click-plugins" }, + { name = "click-repl" }, + { name = "kombu" }, + { name = "python-dateutil" }, + { name = "vine" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bb/7d/6c289f407d219ba36d8b384b42489ebdd0c84ce9c413875a8aae0c85f35b/celery-5.5.3.tar.gz", hash = "sha256:6c972ae7968c2b5281227f01c3a3f984037d21c5129d07bf3550cc2afc6b10a5", size = 1667144, upload-time = "2025-06-01T11:08:12.563Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/af/0dcccc7fdcdf170f9a1585e5e96b6fb0ba1749ef6be8c89a6202284759bd/celery-5.5.3-py3-none-any.whl", hash = "sha256:0b5761a07057acee94694464ca482416b959568904c9dfa41ce8413a7d65d525", size = 438775, upload-time = "2025-06-01T11:08:09.94Z" }, +] + +[[package]] +name = "certifi" +version = "2025.6.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/73/f7/f14b46d4bcd21092d7d3ccef689615220d8a08fb25e564b65d20738e672e/certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b", size = 158753, upload-time = "2025-06-15T02:45:51.329Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/ae/320161bd181fc06471eed047ecce67b693fd7515b16d495d8932db763426/certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057", size = 157650, upload-time = "2025-06-15T02:45:49.977Z" }, +] + +[[package]] +name = "cffi" +version = "1.17.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621, upload-time = "2024-09-04T20:45:21.852Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448, upload-time = "2024-09-04T20:44:26.208Z" }, + { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976, upload-time = "2024-09-04T20:44:27.578Z" }, + { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475, upload-time = "2024-09-04T20:44:43.733Z" }, + { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009, upload-time = "2024-09-04T20:44:45.309Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e4/33/89c2ced2b67d1c2a61c19c6751aa8902d46ce3dacb23600a283619f5a12d/charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63", size = 126367, upload-time = "2025-05-02T08:34:42.01Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d7/a4/37f4d6035c89cac7930395a35cc0f1b872e652eaafb76a6075943754f095/charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7", size = 199936, upload-time = "2025-05-02T08:32:33.712Z" }, + { url = "https://files.pythonhosted.org/packages/ee/8a/1a5e33b73e0d9287274f899d967907cd0bf9c343e651755d9307e0dbf2b3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3", size = 143790, upload-time = "2025-05-02T08:32:35.768Z" }, + { url = "https://files.pythonhosted.org/packages/66/52/59521f1d8e6ab1482164fa21409c5ef44da3e9f653c13ba71becdd98dec3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a", size = 153924, upload-time = "2025-05-02T08:32:37.284Z" }, + { url = "https://files.pythonhosted.org/packages/86/2d/fb55fdf41964ec782febbf33cb64be480a6b8f16ded2dbe8db27a405c09f/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214", size = 146626, upload-time = "2025-05-02T08:32:38.803Z" }, + { url = "https://files.pythonhosted.org/packages/8c/73/6ede2ec59bce19b3edf4209d70004253ec5f4e319f9a2e3f2f15601ed5f7/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a", size = 148567, upload-time = "2025-05-02T08:32:40.251Z" }, + { url = "https://files.pythonhosted.org/packages/09/14/957d03c6dc343c04904530b6bef4e5efae5ec7d7990a7cbb868e4595ee30/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd", size = 150957, upload-time = "2025-05-02T08:32:41.705Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c8/8174d0e5c10ccebdcb1b53cc959591c4c722a3ad92461a273e86b9f5a302/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981", size = 145408, upload-time = "2025-05-02T08:32:43.709Z" }, + { url = "https://files.pythonhosted.org/packages/58/aa/8904b84bc8084ac19dc52feb4f5952c6df03ffb460a887b42615ee1382e8/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c", size = 153399, upload-time = "2025-05-02T08:32:46.197Z" }, + { url = "https://files.pythonhosted.org/packages/c2/26/89ee1f0e264d201cb65cf054aca6038c03b1a0c6b4ae998070392a3ce605/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b", size = 156815, upload-time = "2025-05-02T08:32:48.105Z" }, + { url = "https://files.pythonhosted.org/packages/fd/07/68e95b4b345bad3dbbd3a8681737b4338ff2c9df29856a6d6d23ac4c73cb/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d", size = 154537, upload-time = "2025-05-02T08:32:49.719Z" }, + { url = "https://files.pythonhosted.org/packages/77/1a/5eefc0ce04affb98af07bc05f3bac9094513c0e23b0562d64af46a06aae4/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f", size = 149565, upload-time = "2025-05-02T08:32:51.404Z" }, + { url = "https://files.pythonhosted.org/packages/37/a0/2410e5e6032a174c95e0806b1a6585eb21e12f445ebe239fac441995226a/charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c", size = 98357, upload-time = "2025-05-02T08:32:53.079Z" }, + { url = "https://files.pythonhosted.org/packages/6c/4f/c02d5c493967af3eda9c771ad4d2bbc8df6f99ddbeb37ceea6e8716a32bc/charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e", size = 105776, upload-time = "2025-05-02T08:32:54.573Z" }, + { url = "https://files.pythonhosted.org/packages/ea/12/a93df3366ed32db1d907d7593a94f1fe6293903e3e92967bebd6950ed12c/charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0", size = 199622, upload-time = "2025-05-02T08:32:56.363Z" }, + { url = "https://files.pythonhosted.org/packages/04/93/bf204e6f344c39d9937d3c13c8cd5bbfc266472e51fc8c07cb7f64fcd2de/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf", size = 143435, upload-time = "2025-05-02T08:32:58.551Z" }, + { url = "https://files.pythonhosted.org/packages/22/2a/ea8a2095b0bafa6c5b5a55ffdc2f924455233ee7b91c69b7edfcc9e02284/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e", size = 153653, upload-time = "2025-05-02T08:33:00.342Z" }, + { url = "https://files.pythonhosted.org/packages/b6/57/1b090ff183d13cef485dfbe272e2fe57622a76694061353c59da52c9a659/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1", size = 146231, upload-time = "2025-05-02T08:33:02.081Z" }, + { url = "https://files.pythonhosted.org/packages/e2/28/ffc026b26f441fc67bd21ab7f03b313ab3fe46714a14b516f931abe1a2d8/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c", size = 148243, upload-time = "2025-05-02T08:33:04.063Z" }, + { url = "https://files.pythonhosted.org/packages/c0/0f/9abe9bd191629c33e69e47c6ef45ef99773320e9ad8e9cb08b8ab4a8d4cb/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691", size = 150442, upload-time = "2025-05-02T08:33:06.418Z" }, + { url = "https://files.pythonhosted.org/packages/67/7c/a123bbcedca91d5916c056407f89a7f5e8fdfce12ba825d7d6b9954a1a3c/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0", size = 145147, upload-time = "2025-05-02T08:33:08.183Z" }, + { url = "https://files.pythonhosted.org/packages/ec/fe/1ac556fa4899d967b83e9893788e86b6af4d83e4726511eaaad035e36595/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b", size = 153057, upload-time = "2025-05-02T08:33:09.986Z" }, + { url = "https://files.pythonhosted.org/packages/2b/ff/acfc0b0a70b19e3e54febdd5301a98b72fa07635e56f24f60502e954c461/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff", size = 156454, upload-time = "2025-05-02T08:33:11.814Z" }, + { url = "https://files.pythonhosted.org/packages/92/08/95b458ce9c740d0645feb0e96cea1f5ec946ea9c580a94adfe0b617f3573/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b", size = 154174, upload-time = "2025-05-02T08:33:13.707Z" }, + { url = "https://files.pythonhosted.org/packages/78/be/8392efc43487ac051eee6c36d5fbd63032d78f7728cb37aebcc98191f1ff/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148", size = 149166, upload-time = "2025-05-02T08:33:15.458Z" }, + { url = "https://files.pythonhosted.org/packages/44/96/392abd49b094d30b91d9fbda6a69519e95802250b777841cf3bda8fe136c/charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7", size = 98064, upload-time = "2025-05-02T08:33:17.06Z" }, + { url = "https://files.pythonhosted.org/packages/e9/b0/0200da600134e001d91851ddc797809e2fe0ea72de90e09bec5a2fbdaccb/charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980", size = 105641, upload-time = "2025-05-02T08:33:18.753Z" }, + { url = "https://files.pythonhosted.org/packages/20/94/c5790835a017658cbfabd07f3bfb549140c3ac458cfc196323996b10095a/charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0", size = 52626, upload-time = "2025-05-02T08:34:40.053Z" }, +] + +[[package]] +name = "click" +version = "8.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342, upload-time = "2025-05-20T23:19:49.832Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215, upload-time = "2025-05-20T23:19:47.796Z" }, +] + +[[package]] +name = "click-didyoumean" +version = "0.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/30/ce/217289b77c590ea1e7c24242d9ddd6e249e52c795ff10fac2c50062c48cb/click_didyoumean-0.3.1.tar.gz", hash = "sha256:4f82fdff0dbe64ef8ab2279bd6aa3f6a99c3b28c05aa09cbfc07c9d7fbb5a463", size = 3089, upload-time = "2024-03-24T08:22:07.499Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1b/5b/974430b5ffdb7a4f1941d13d83c64a0395114503cc357c6b9ae4ce5047ed/click_didyoumean-0.3.1-py3-none-any.whl", hash = "sha256:5c4bb6007cfea5f2fd6583a2fb6701a22a41eb98957e63d0fac41c10e7c3117c", size = 3631, upload-time = "2024-03-24T08:22:06.356Z" }, +] + +[[package]] +name = "click-plugins" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5f/1d/45434f64ed749540af821fd7e42b8e4d23ac04b1eda7c26613288d6cd8a8/click-plugins-1.1.1.tar.gz", hash = "sha256:46ab999744a9d831159c3411bb0c79346d94a444df9a3a3742e9ed63645f264b", size = 8164, upload-time = "2019-04-04T04:27:04.82Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/da/824b92d9942f4e472702488857914bdd50f73021efea15b4cad9aca8ecef/click_plugins-1.1.1-py2.py3-none-any.whl", hash = "sha256:5d262006d3222f5057fd81e1623d4443e41dcda5dc815c06b442aa3c02889fc8", size = 7497, upload-time = "2019-04-04T04:27:03.36Z" }, +] + +[[package]] +name = "click-repl" +version = "0.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "prompt-toolkit" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cb/a2/57f4ac79838cfae6912f997b4d1a64a858fb0c86d7fcaae6f7b58d267fca/click-repl-0.3.0.tar.gz", hash = "sha256:17849c23dba3d667247dc4defe1757fff98694e90fe37474f3feebb69ced26a9", size = 10449, upload-time = "2023-06-15T12:43:51.141Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/40/9d857001228658f0d59e97ebd4c346fe73e138c6de1bce61dc568a57c7f8/click_repl-0.3.0-py3-none-any.whl", hash = "sha256:fb7e06deb8da8de86180a33a9da97ac316751c094c6899382da7feeeeb51b812", size = 10289, upload-time = "2023-06-15T12:43:48.626Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "cuda-bindings" +version = "12.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pywin32", marker = "sys_platform == 'win32'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/bf/23a583b8453f580bb1c7749c7abf57017176e0053197384ce81e73977ab3/cuda_bindings-12.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34318c3a1b442854f072f5bb410aea6834172fd1ee7a5ecf49f1d125ea7498a0", size = 11820737, upload-time = "2025-05-06T19:10:38.601Z" }, + { url = "https://files.pythonhosted.org/packages/e3/03/40fc1488727a8d72ecc35f58f9df4939277892a837614339c3366d520426/cuda_bindings-12.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff0e28d1e34758654b9c961e1f55e4786e49aee6a4dbceaf3cc24c46c672df7e", size = 12154006, upload-time = "2025-05-06T19:10:41.642Z" }, + { url = "https://files.pythonhosted.org/packages/2c/6a/2808871d0b519364db2b460dc1b17d4fff3e340d5875144a303254f996e5/cuda_bindings-12.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:57bdaa778116ee50fdcdd31810e0f345c23549ffb045452dc88d5c63601d35d4", size = 12223544, upload-time = "2025-05-06T19:10:43.928Z" }, + { url = "https://files.pythonhosted.org/packages/a0/29/7b9e64e3078e31516dad683d6a23f5e5a0d5c2b642c58fb23786ec4bfac6/cuda_bindings-12.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b9d039981412575c1713915a889934ec750b8c2ed3dbfaa739292e0478a3f6f", size = 11810588, upload-time = "2025-05-06T19:10:46.653Z" }, + { url = "https://files.pythonhosted.org/packages/01/fd/1c30778265488c6797c6c17a69c09ba5636df6dc6b0ebfc96d950be2f9e7/cuda_bindings-12.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6d7314b2e5db025bb88ddba4df6db2127cc39610ccf4f74c0e1ead05241da29", size = 12149149, upload-time = "2025-05-06T19:10:49.334Z" }, + { url = "https://files.pythonhosted.org/packages/d0/86/fdf309b334db8c6555f303c0f6a1538db53135103d13a78d8445b4981f15/cuda_bindings-12.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:44eae9c854a55c7f464f08fa895a1fe0846e36097697d8c255051789d59bf55b", size = 12188603, upload-time = "2025-05-06T19:10:52.066Z" }, +] + +[[package]] +name = "cuda-python" +version = "12.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cuda-bindings" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/24/3c/4475aebeaab9651f2e61000fbe76f91a476d371dbfbf0a1cf46e689af253/cuda_python-12.9.0-py3-none-any.whl", hash = "sha256:926acba49b2c0a0374c61b7c98f337c085199cf51cdfe4d6423c4129c20547a7", size = 7532, upload-time = "2025-05-06T19:14:07.771Z" }, +] + +[[package]] +name = "decorator" +version = "5.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/fa/6d96a0978d19e17b68d634497769987b16c8f4cd0a7a05048bec693caa6b/decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360", size = 56711, upload-time = "2025-02-24T04:41:34.073Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" }, +] + +[[package]] +name = "frozenlist" +version = "1.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/79/b1/b64018016eeb087db503b038296fd782586432b9c077fc5c7839e9cb6ef6/frozenlist-1.7.0.tar.gz", hash = "sha256:2e310d81923c2437ea8670467121cc3e9b0f76d3043cc1d2331d56c7fb7a3a8f", size = 45078, upload-time = "2025-06-09T23:02:35.538Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a2/c8131383f1e66adad5f6ecfcce383d584ca94055a34d683bbb24ac5f2f1c/frozenlist-1.7.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3dbf9952c4bb0e90e98aec1bd992b3318685005702656bc6f67c1a32b76787f2", size = 81424, upload-time = "2025-06-09T23:00:42.24Z" }, + { url = "https://files.pythonhosted.org/packages/4c/9d/02754159955088cb52567337d1113f945b9e444c4960771ea90eb73de8db/frozenlist-1.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1f5906d3359300b8a9bb194239491122e6cf1444c2efb88865426f170c262cdb", size = 47952, upload-time = "2025-06-09T23:00:43.481Z" }, + { url = "https://files.pythonhosted.org/packages/01/7a/0046ef1bd6699b40acd2067ed6d6670b4db2f425c56980fa21c982c2a9db/frozenlist-1.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3dabd5a8f84573c8d10d8859a50ea2dec01eea372031929871368c09fa103478", size = 46688, upload-time = "2025-06-09T23:00:44.793Z" }, + { url = "https://files.pythonhosted.org/packages/d6/a2/a910bafe29c86997363fb4c02069df4ff0b5bc39d33c5198b4e9dd42d8f8/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa57daa5917f1738064f302bf2626281a1cb01920c32f711fbc7bc36111058a8", size = 243084, upload-time = "2025-06-09T23:00:46.125Z" }, + { url = "https://files.pythonhosted.org/packages/64/3e/5036af9d5031374c64c387469bfcc3af537fc0f5b1187d83a1cf6fab1639/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c193dda2b6d49f4c4398962810fa7d7c78f032bf45572b3e04dd5249dff27e08", size = 233524, upload-time = "2025-06-09T23:00:47.73Z" }, + { url = "https://files.pythonhosted.org/packages/06/39/6a17b7c107a2887e781a48ecf20ad20f1c39d94b2a548c83615b5b879f28/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfe2b675cf0aaa6d61bf8fbffd3c274b3c9b7b1623beb3809df8a81399a4a9c4", size = 248493, upload-time = "2025-06-09T23:00:49.742Z" }, + { url = "https://files.pythonhosted.org/packages/be/00/711d1337c7327d88c44d91dd0f556a1c47fb99afc060ae0ef66b4d24793d/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8fc5d5cda37f62b262405cf9652cf0856839c4be8ee41be0afe8858f17f4c94b", size = 244116, upload-time = "2025-06-09T23:00:51.352Z" }, + { url = "https://files.pythonhosted.org/packages/24/fe/74e6ec0639c115df13d5850e75722750adabdc7de24e37e05a40527ca539/frozenlist-1.7.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0d5ce521d1dd7d620198829b87ea002956e4319002ef0bc8d3e6d045cb4646e", size = 224557, upload-time = "2025-06-09T23:00:52.855Z" }, + { url = "https://files.pythonhosted.org/packages/8d/db/48421f62a6f77c553575201e89048e97198046b793f4a089c79a6e3268bd/frozenlist-1.7.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:488d0a7d6a0008ca0db273c542098a0fa9e7dfaa7e57f70acef43f32b3f69dca", size = 241820, upload-time = "2025-06-09T23:00:54.43Z" }, + { url = "https://files.pythonhosted.org/packages/1d/fa/cb4a76bea23047c8462976ea7b7a2bf53997a0ca171302deae9d6dd12096/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:15a7eaba63983d22c54d255b854e8108e7e5f3e89f647fc854bd77a237e767df", size = 236542, upload-time = "2025-06-09T23:00:56.409Z" }, + { url = "https://files.pythonhosted.org/packages/5d/32/476a4b5cfaa0ec94d3f808f193301debff2ea42288a099afe60757ef6282/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1eaa7e9c6d15df825bf255649e05bd8a74b04a4d2baa1ae46d9c2d00b2ca2cb5", size = 249350, upload-time = "2025-06-09T23:00:58.468Z" }, + { url = "https://files.pythonhosted.org/packages/8d/ba/9a28042f84a6bf8ea5dbc81cfff8eaef18d78b2a1ad9d51c7bc5b029ad16/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4389e06714cfa9d47ab87f784a7c5be91d3934cd6e9a7b85beef808297cc025", size = 225093, upload-time = "2025-06-09T23:01:00.015Z" }, + { url = "https://files.pythonhosted.org/packages/bc/29/3a32959e68f9cf000b04e79ba574527c17e8842e38c91d68214a37455786/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:73bd45e1488c40b63fe5a7df892baf9e2a4d4bb6409a2b3b78ac1c6236178e01", size = 245482, upload-time = "2025-06-09T23:01:01.474Z" }, + { url = "https://files.pythonhosted.org/packages/80/e8/edf2f9e00da553f07f5fa165325cfc302dead715cab6ac8336a5f3d0adc2/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:99886d98e1643269760e5fe0df31e5ae7050788dd288947f7f007209b8c33f08", size = 249590, upload-time = "2025-06-09T23:01:02.961Z" }, + { url = "https://files.pythonhosted.org/packages/1c/80/9a0eb48b944050f94cc51ee1c413eb14a39543cc4f760ed12657a5a3c45a/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:290a172aae5a4c278c6da8a96222e6337744cd9c77313efe33d5670b9f65fc43", size = 237785, upload-time = "2025-06-09T23:01:05.095Z" }, + { url = "https://files.pythonhosted.org/packages/f3/74/87601e0fb0369b7a2baf404ea921769c53b7ae00dee7dcfe5162c8c6dbf0/frozenlist-1.7.0-cp312-cp312-win32.whl", hash = "sha256:426c7bc70e07cfebc178bc4c2bf2d861d720c4fff172181eeb4a4c41d4ca2ad3", size = 39487, upload-time = "2025-06-09T23:01:06.54Z" }, + { url = "https://files.pythonhosted.org/packages/0b/15/c026e9a9fc17585a9d461f65d8593d281fedf55fbf7eb53f16c6df2392f9/frozenlist-1.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:563b72efe5da92e02eb68c59cb37205457c977aa7a449ed1b37e6939e5c47c6a", size = 43874, upload-time = "2025-06-09T23:01:07.752Z" }, + { url = "https://files.pythonhosted.org/packages/24/90/6b2cebdabdbd50367273c20ff6b57a3dfa89bd0762de02c3a1eb42cb6462/frozenlist-1.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee80eeda5e2a4e660651370ebffd1286542b67e268aa1ac8d6dbe973120ef7ee", size = 79791, upload-time = "2025-06-09T23:01:09.368Z" }, + { url = "https://files.pythonhosted.org/packages/83/2e/5b70b6a3325363293fe5fc3ae74cdcbc3e996c2a11dde2fd9f1fb0776d19/frozenlist-1.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d1a81c85417b914139e3a9b995d4a1c84559afc839a93cf2cb7f15e6e5f6ed2d", size = 47165, upload-time = "2025-06-09T23:01:10.653Z" }, + { url = "https://files.pythonhosted.org/packages/f4/25/a0895c99270ca6966110f4ad98e87e5662eab416a17e7fd53c364bf8b954/frozenlist-1.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cbb65198a9132ebc334f237d7b0df163e4de83fb4f2bdfe46c1e654bdb0c5d43", size = 45881, upload-time = "2025-06-09T23:01:12.296Z" }, + { url = "https://files.pythonhosted.org/packages/19/7c/71bb0bbe0832793c601fff68cd0cf6143753d0c667f9aec93d3c323f4b55/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dab46c723eeb2c255a64f9dc05b8dd601fde66d6b19cdb82b2e09cc6ff8d8b5d", size = 232409, upload-time = "2025-06-09T23:01:13.641Z" }, + { url = "https://files.pythonhosted.org/packages/c0/45/ed2798718910fe6eb3ba574082aaceff4528e6323f9a8570be0f7028d8e9/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6aeac207a759d0dedd2e40745575ae32ab30926ff4fa49b1635def65806fddee", size = 225132, upload-time = "2025-06-09T23:01:15.264Z" }, + { url = "https://files.pythonhosted.org/packages/ba/e2/8417ae0f8eacb1d071d4950f32f229aa6bf68ab69aab797b72a07ea68d4f/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bd8c4e58ad14b4fa7802b8be49d47993182fdd4023393899632c88fd8cd994eb", size = 237638, upload-time = "2025-06-09T23:01:16.752Z" }, + { url = "https://files.pythonhosted.org/packages/f8/b7/2ace5450ce85f2af05a871b8c8719b341294775a0a6c5585d5e6170f2ce7/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04fb24d104f425da3540ed83cbfc31388a586a7696142004c577fa61c6298c3f", size = 233539, upload-time = "2025-06-09T23:01:18.202Z" }, + { url = "https://files.pythonhosted.org/packages/46/b9/6989292c5539553dba63f3c83dc4598186ab2888f67c0dc1d917e6887db6/frozenlist-1.7.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a5c505156368e4ea6b53b5ac23c92d7edc864537ff911d2fb24c140bb175e60", size = 215646, upload-time = "2025-06-09T23:01:19.649Z" }, + { url = "https://files.pythonhosted.org/packages/72/31/bc8c5c99c7818293458fe745dab4fd5730ff49697ccc82b554eb69f16a24/frozenlist-1.7.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bd7eb96a675f18aa5c553eb7ddc24a43c8c18f22e1f9925528128c052cdbe00", size = 232233, upload-time = "2025-06-09T23:01:21.175Z" }, + { url = "https://files.pythonhosted.org/packages/59/52/460db4d7ba0811b9ccb85af996019f5d70831f2f5f255f7cc61f86199795/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:05579bf020096fe05a764f1f84cd104a12f78eaab68842d036772dc6d4870b4b", size = 227996, upload-time = "2025-06-09T23:01:23.098Z" }, + { url = "https://files.pythonhosted.org/packages/ba/c9/f4b39e904c03927b7ecf891804fd3b4df3db29b9e487c6418e37988d6e9d/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:376b6222d114e97eeec13d46c486facd41d4f43bab626b7c3f6a8b4e81a5192c", size = 242280, upload-time = "2025-06-09T23:01:24.808Z" }, + { url = "https://files.pythonhosted.org/packages/b8/33/3f8d6ced42f162d743e3517781566b8481322be321b486d9d262adf70bfb/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0aa7e176ebe115379b5b1c95b4096fb1c17cce0847402e227e712c27bdb5a949", size = 217717, upload-time = "2025-06-09T23:01:26.28Z" }, + { url = "https://files.pythonhosted.org/packages/3e/e8/ad683e75da6ccef50d0ab0c2b2324b32f84fc88ceee778ed79b8e2d2fe2e/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3fbba20e662b9c2130dc771e332a99eff5da078b2b2648153a40669a6d0e36ca", size = 236644, upload-time = "2025-06-09T23:01:27.887Z" }, + { url = "https://files.pythonhosted.org/packages/b2/14/8d19ccdd3799310722195a72ac94ddc677541fb4bef4091d8e7775752360/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:f3f4410a0a601d349dd406b5713fec59b4cee7e71678d5b17edda7f4655a940b", size = 238879, upload-time = "2025-06-09T23:01:29.524Z" }, + { url = "https://files.pythonhosted.org/packages/ce/13/c12bf657494c2fd1079a48b2db49fa4196325909249a52d8f09bc9123fd7/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e2cdfaaec6a2f9327bf43c933c0319a7c429058e8537c508964a133dffee412e", size = 232502, upload-time = "2025-06-09T23:01:31.287Z" }, + { url = "https://files.pythonhosted.org/packages/d7/8b/e7f9dfde869825489382bc0d512c15e96d3964180c9499efcec72e85db7e/frozenlist-1.7.0-cp313-cp313-win32.whl", hash = "sha256:5fc4df05a6591c7768459caba1b342d9ec23fa16195e744939ba5914596ae3e1", size = 39169, upload-time = "2025-06-09T23:01:35.503Z" }, + { url = "https://files.pythonhosted.org/packages/35/89/a487a98d94205d85745080a37860ff5744b9820a2c9acbcdd9440bfddf98/frozenlist-1.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:52109052b9791a3e6b5d1b65f4b909703984b770694d3eb64fad124c835d7cba", size = 43219, upload-time = "2025-06-09T23:01:36.784Z" }, + { url = "https://files.pythonhosted.org/packages/56/d5/5c4cf2319a49eddd9dd7145e66c4866bdc6f3dbc67ca3d59685149c11e0d/frozenlist-1.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:a6f86e4193bb0e235ef6ce3dde5cbabed887e0b11f516ce8a0f4d3b33078ec2d", size = 84345, upload-time = "2025-06-09T23:01:38.295Z" }, + { url = "https://files.pythonhosted.org/packages/a4/7d/ec2c1e1dc16b85bc9d526009961953df9cec8481b6886debb36ec9107799/frozenlist-1.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:82d664628865abeb32d90ae497fb93df398a69bb3434463d172b80fc25b0dd7d", size = 48880, upload-time = "2025-06-09T23:01:39.887Z" }, + { url = "https://files.pythonhosted.org/packages/69/86/f9596807b03de126e11e7d42ac91e3d0b19a6599c714a1989a4e85eeefc4/frozenlist-1.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:912a7e8375a1c9a68325a902f3953191b7b292aa3c3fb0d71a216221deca460b", size = 48498, upload-time = "2025-06-09T23:01:41.318Z" }, + { url = "https://files.pythonhosted.org/packages/5e/cb/df6de220f5036001005f2d726b789b2c0b65f2363b104bbc16f5be8084f8/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9537c2777167488d539bc5de2ad262efc44388230e5118868e172dd4a552b146", size = 292296, upload-time = "2025-06-09T23:01:42.685Z" }, + { url = "https://files.pythonhosted.org/packages/83/1f/de84c642f17c8f851a2905cee2dae401e5e0daca9b5ef121e120e19aa825/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f34560fb1b4c3e30ba35fa9a13894ba39e5acfc5f60f57d8accde65f46cc5e74", size = 273103, upload-time = "2025-06-09T23:01:44.166Z" }, + { url = "https://files.pythonhosted.org/packages/88/3c/c840bfa474ba3fa13c772b93070893c6e9d5c0350885760376cbe3b6c1b3/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:acd03d224b0175f5a850edc104ac19040d35419eddad04e7cf2d5986d98427f1", size = 292869, upload-time = "2025-06-09T23:01:45.681Z" }, + { url = "https://files.pythonhosted.org/packages/a6/1c/3efa6e7d5a39a1d5ef0abeb51c48fb657765794a46cf124e5aca2c7a592c/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2038310bc582f3d6a09b3816ab01737d60bf7b1ec70f5356b09e84fb7408ab1", size = 291467, upload-time = "2025-06-09T23:01:47.234Z" }, + { url = "https://files.pythonhosted.org/packages/4f/00/d5c5e09d4922c395e2f2f6b79b9a20dab4b67daaf78ab92e7729341f61f6/frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8c05e4c8e5f36e5e088caa1bf78a687528f83c043706640a92cb76cd6999384", size = 266028, upload-time = "2025-06-09T23:01:48.819Z" }, + { url = "https://files.pythonhosted.org/packages/4e/27/72765be905619dfde25a7f33813ac0341eb6b076abede17a2e3fbfade0cb/frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:765bb588c86e47d0b68f23c1bee323d4b703218037765dcf3f25c838c6fecceb", size = 284294, upload-time = "2025-06-09T23:01:50.394Z" }, + { url = "https://files.pythonhosted.org/packages/88/67/c94103a23001b17808eb7dd1200c156bb69fb68e63fcf0693dde4cd6228c/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:32dc2e08c67d86d0969714dd484fd60ff08ff81d1a1e40a77dd34a387e6ebc0c", size = 281898, upload-time = "2025-06-09T23:01:52.234Z" }, + { url = "https://files.pythonhosted.org/packages/42/34/a3e2c00c00f9e2a9db5653bca3fec306349e71aff14ae45ecc6d0951dd24/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:c0303e597eb5a5321b4de9c68e9845ac8f290d2ab3f3e2c864437d3c5a30cd65", size = 290465, upload-time = "2025-06-09T23:01:53.788Z" }, + { url = "https://files.pythonhosted.org/packages/bb/73/f89b7fbce8b0b0c095d82b008afd0590f71ccb3dee6eee41791cf8cd25fd/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:a47f2abb4e29b3a8d0b530f7c3598badc6b134562b1a5caee867f7c62fee51e3", size = 266385, upload-time = "2025-06-09T23:01:55.769Z" }, + { url = "https://files.pythonhosted.org/packages/cd/45/e365fdb554159462ca12df54bc59bfa7a9a273ecc21e99e72e597564d1ae/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:3d688126c242a6fabbd92e02633414d40f50bb6002fa4cf995a1d18051525657", size = 288771, upload-time = "2025-06-09T23:01:57.4Z" }, + { url = "https://files.pythonhosted.org/packages/00/11/47b6117002a0e904f004d70ec5194fe9144f117c33c851e3d51c765962d0/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:4e7e9652b3d367c7bd449a727dc79d5043f48b88d0cbfd4f9f1060cf2b414104", size = 288206, upload-time = "2025-06-09T23:01:58.936Z" }, + { url = "https://files.pythonhosted.org/packages/40/37/5f9f3c3fd7f7746082ec67bcdc204db72dad081f4f83a503d33220a92973/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1a85e345b4c43db8b842cab1feb41be5cc0b10a1830e6295b69d7310f99becaf", size = 282620, upload-time = "2025-06-09T23:02:00.493Z" }, + { url = "https://files.pythonhosted.org/packages/0b/31/8fbc5af2d183bff20f21aa743b4088eac4445d2bb1cdece449ae80e4e2d1/frozenlist-1.7.0-cp313-cp313t-win32.whl", hash = "sha256:3a14027124ddb70dfcee5148979998066897e79f89f64b13328595c4bdf77c81", size = 43059, upload-time = "2025-06-09T23:02:02.072Z" }, + { url = "https://files.pythonhosted.org/packages/bb/ed/41956f52105b8dbc26e457c5705340c67c8cc2b79f394b79bffc09d0e938/frozenlist-1.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3bf8010d71d4507775f658e9823210b7427be36625b387221642725b515dcf3e", size = 47516, upload-time = "2025-06-09T23:02:03.779Z" }, + { url = "https://files.pythonhosted.org/packages/ee/45/b82e3c16be2182bff01179db177fe144d58b5dc787a7d4492c6ed8b9317f/frozenlist-1.7.0-py3-none-any.whl", hash = "sha256:9a5af342e34f7e97caf8c995864c7a396418ae2859cc6fdf1b1073020d516a7e", size = 13106, upload-time = "2025-06-09T23:02:34.204Z" }, +] + +[[package]] +name = "gevent" +version = "25.5.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation == 'CPython' and sys_platform == 'win32'" }, + { name = "greenlet", marker = "platform_python_implementation == 'CPython'" }, + { name = "zope-event" }, + { name = "zope-interface" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f1/58/267e8160aea00ab00acd2de97197eecfe307064a376fb5c892870a8a6159/gevent-25.5.1.tar.gz", hash = "sha256:582c948fa9a23188b890d0bc130734a506d039a2e5ad87dae276a456cc683e61", size = 6388207, upload-time = "2025-05-12T12:57:59.833Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/58/c5/cf71423666a0b83db3d7e3f85788bc47d573fca5fe62b798fe2c4273de7c/gevent-25.5.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:d87c0a1bd809d8f70f96b9b229779ec6647339830b8888a192beed33ac8d129f", size = 2909333, upload-time = "2025-05-12T11:11:34.883Z" }, + { url = "https://files.pythonhosted.org/packages/26/7e/d2f174ee8bec6eb85d961ca203bc599d059c857b8412e367b8fa206603a5/gevent-25.5.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b87a4b66edb3808d4d07bbdb0deed5a710cf3d3c531e082759afd283758bb649", size = 1788420, upload-time = "2025-05-12T11:52:30.306Z" }, + { url = "https://files.pythonhosted.org/packages/fe/f3/3aba8c147b9108e62ba348c726fe38ae69735a233db425565227336e8ce6/gevent-25.5.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f076779050029a82feb0cb1462021d3404d22f80fa76a181b1a7889cd4d6b519", size = 1868854, upload-time = "2025-05-12T11:54:21.564Z" }, + { url = "https://files.pythonhosted.org/packages/c6/b1/11a5453f8fcebe90a456471fad48bd154c6a62fcb96e3475a5e408d05fc8/gevent-25.5.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bb673eb291c19370f69295f7a881a536451408481e2e3deec3f41dedb7c281ec", size = 1833946, upload-time = "2025-05-12T12:00:05.514Z" }, + { url = "https://files.pythonhosted.org/packages/70/1c/37d4a62303f86e6af67660a8df38c1171b7290df61b358e618c6fea79567/gevent-25.5.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1325ed44225c8309c0dd188bdbbbee79e1df8c11ceccac226b861c7d52e4837", size = 2070583, upload-time = "2025-05-12T11:33:02.803Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8f/3b14929ff28263aba1d268ea97bcf104be1a86ba6f6bb4633838e7a1905e/gevent-25.5.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fcd5bcad3102bde686d0adcc341fade6245186050ce14386d547ccab4bd54310", size = 1808341, upload-time = "2025-05-12T11:59:59.154Z" }, + { url = "https://files.pythonhosted.org/packages/2f/fc/674ec819fb8a96e482e4d21f8baa43d34602dba09dfce7bbdc8700899d1b/gevent-25.5.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1a93062609e8fa67ec97cd5fb9206886774b2a09b24887f40148c9c37e6fb71c", size = 2137974, upload-time = "2025-05-12T11:40:54.78Z" }, + { url = "https://files.pythonhosted.org/packages/05/9a/048b7f5e28c54e4595ad4a8ad3c338fa89560e558db2bbe8273f44f030de/gevent-25.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:2534c23dc32bed62b659ed4fd9e198906179e68b26c9276a897e04163bdde806", size = 1638344, upload-time = "2025-05-12T12:08:31.776Z" }, + { url = "https://files.pythonhosted.org/packages/10/25/2162b38d7b48e08865db6772d632bd1648136ce2bb50e340565e45607cad/gevent-25.5.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:a022a9de9275ce0b390b7315595454258c525dc8287a03f1a6cacc5878ab7cbc", size = 2928044, upload-time = "2025-05-12T11:11:36.33Z" }, + { url = "https://files.pythonhosted.org/packages/1b/e0/dbd597a964ed00176da122ea759bf2a6c1504f1e9f08e185379f92dc355f/gevent-25.5.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fae8533f9d0ef3348a1f503edcfb531ef7a0236b57da1e24339aceb0ce52922", size = 1788751, upload-time = "2025-05-12T11:52:32.643Z" }, + { url = "https://files.pythonhosted.org/packages/f1/74/960cc4cf4c9c90eafbe0efc238cdf588862e8e278d0b8c0d15a0da4ed480/gevent-25.5.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c7b32d9c3b5294b39ea9060e20c582e49e1ec81edbfeae6cf05f8ad0829cb13d", size = 1869766, upload-time = "2025-05-12T11:54:23.903Z" }, + { url = "https://files.pythonhosted.org/packages/56/78/fa84b1c7db79b156929685db09a7c18c3127361dca18a09e998e98118506/gevent-25.5.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b95815fe44f318ebbfd733b6428b4cb18cc5e68f1c40e8501dd69cc1f42a83d", size = 1835358, upload-time = "2025-05-12T12:00:06.794Z" }, + { url = "https://files.pythonhosted.org/packages/00/5c/bfefe3822bbca5b83bfad256c82251b3f5be13d52d14e17a786847b9b625/gevent-25.5.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d316529b70d325b183b2f3f5cde958911ff7be12eb2b532b5c301f915dbbf1e", size = 2073071, upload-time = "2025-05-12T11:33:04.2Z" }, + { url = "https://files.pythonhosted.org/packages/20/e4/08a77a3839a37db96393dea952e992d5846a881b887986dde62ead6b48a1/gevent-25.5.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f6ba33c13db91ffdbb489a4f3d177a261ea1843923e1d68a5636c53fe98fa5ce", size = 1809805, upload-time = "2025-05-12T12:00:00.537Z" }, + { url = "https://files.pythonhosted.org/packages/2b/ac/28848348f790c1283df74b0fc0a554271d0606676470f848eccf84eae42a/gevent-25.5.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37ee34b77c7553777c0b8379915f75934c3f9c8cd32f7cd098ea43c9323c2276", size = 2138305, upload-time = "2025-05-12T11:40:56.566Z" }, + { url = "https://files.pythonhosted.org/packages/52/9e/0e9e40facd2d714bfb00f71fc6dacaacc82c24c1c2e097bf6461e00dec9f/gevent-25.5.1-cp313-cp313-win_amd64.whl", hash = "sha256:9fa6aa0da224ed807d3b76cdb4ee8b54d4d4d5e018aed2478098e685baae7896", size = 1637444, upload-time = "2025-05-12T12:17:45.995Z" }, + { url = "https://files.pythonhosted.org/packages/60/16/b71171e97ec7b4ded8669542f4369d88d5a289e2704efbbde51e858e062a/gevent-25.5.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:0bacf89a65489d26c7087669af89938d5bfd9f7afb12a07b57855b9fad6ccbd0", size = 2937113, upload-time = "2025-05-12T11:12:03.191Z" }, +] + +[[package]] +name = "geventhttpclient" +version = "2.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "brotli" }, + { name = "certifi" }, + { name = "gevent" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/89/19/1ca8de73dcc0596d3df01be299e940d7fc3bccbeb6f62bb8dd2d427a3a50/geventhttpclient-2.3.4.tar.gz", hash = "sha256:1749f75810435a001fc6d4d7526c92cf02b39b30ab6217a886102f941c874222", size = 83545, upload-time = "2025-06-11T13:18:14.144Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4f/72/dcbc6dbf838549b7b0c2c18c1365d2580eb7456939e4b608c3ab213fce78/geventhttpclient-2.3.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9ac30c38d86d888b42bb2ab2738ab9881199609e9fa9a153eb0c66fc9188c6cb", size = 71984, upload-time = "2025-06-11T13:17:09.126Z" }, + { url = "https://files.pythonhosted.org/packages/4c/f9/74aa8c556364ad39b238919c954a0da01a6154ad5e85a1d1ab5f9f5ac186/geventhttpclient-2.3.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4b802000a4fad80fa57e895009671d6e8af56777e3adf0d8aee0807e96188fd9", size = 52631, upload-time = "2025-06-11T13:17:10.061Z" }, + { url = "https://files.pythonhosted.org/packages/11/1a/bc4b70cba8b46be8b2c6ca5b8067c4f086f8c90915eb68086ab40ff6243d/geventhttpclient-2.3.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:461e4d9f4caee481788ec95ac64e0a4a087c1964ddbfae9b6f2dc51715ba706c", size = 51991, upload-time = "2025-06-11T13:17:11.049Z" }, + { url = "https://files.pythonhosted.org/packages/b0/f5/8d0f1e998f6d933c251b51ef92d11f7eb5211e3cd579018973a2b455f7c5/geventhttpclient-2.3.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41f2dcc0805551ea9d49f9392c3b9296505a89b9387417b148655d0d8251b36e", size = 119012, upload-time = "2025-06-11T13:17:11.956Z" }, + { url = "https://files.pythonhosted.org/packages/ea/0e/59e4ab506b3c19fc72e88ca344d150a9028a00c400b1099637100bec26fc/geventhttpclient-2.3.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:62f3a29bf242ecca6360d497304900683fd8f42cbf1de8d0546c871819251dad", size = 124565, upload-time = "2025-06-11T13:17:12.896Z" }, + { url = "https://files.pythonhosted.org/packages/39/5d/dcbd34dfcda0c016b4970bd583cb260cc5ebfc35b33d0ec9ccdb2293587a/geventhttpclient-2.3.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8714a3f2c093aeda3ffdb14c03571d349cb3ed1b8b461d9f321890659f4a5dbf", size = 115573, upload-time = "2025-06-11T13:17:13.937Z" }, + { url = "https://files.pythonhosted.org/packages/03/51/89af99e4805e9ce7f95562dfbd23c0b0391830831e43d58f940ec74489ac/geventhttpclient-2.3.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b11f38b74bab75282db66226197024a731250dcbe25542fd4e85ac5313547332", size = 114260, upload-time = "2025-06-11T13:17:14.913Z" }, + { url = "https://files.pythonhosted.org/packages/b3/ec/3a3000bda432953abcc6f51d008166fa7abc1eeddd1f0246933d83854f73/geventhttpclient-2.3.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fccc2023a89dfbce2e1b1409b967011e45d41808df81b7fa0259397db79ba647", size = 111592, upload-time = "2025-06-11T13:17:15.879Z" }, + { url = "https://files.pythonhosted.org/packages/d8/a3/88fd71fe6bbe1315a2d161cbe2cc7810c357d99bced113bea1668ede8bcf/geventhttpclient-2.3.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9d54b8e9a44890159ae36ba4ae44efd8bb79ff519055137a340d357538a68aa3", size = 113216, upload-time = "2025-06-11T13:17:16.883Z" }, + { url = "https://files.pythonhosted.org/packages/52/eb/20435585a6911b26e65f901a827ef13551c053133926f8c28a7cca0fb08e/geventhttpclient-2.3.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:407cb68a3c3a2c4f5d503930298f2b26ae68137d520e8846d8e230a9981d9334", size = 118450, upload-time = "2025-06-11T13:17:17.968Z" }, + { url = "https://files.pythonhosted.org/packages/2f/79/82782283d613570373990b676a0966c1062a38ca8f41a0f20843c5808e01/geventhttpclient-2.3.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:54fbbcca2dcf06f12a337dd8f98417a09a49aa9d9706aa530fc93acb59b7d83c", size = 112226, upload-time = "2025-06-11T13:17:18.942Z" }, + { url = "https://files.pythonhosted.org/packages/9c/c4/417d12fc2a31ad93172b03309c7f8c3a8bbd0cf25b95eb7835de26b24453/geventhttpclient-2.3.4-cp312-cp312-win32.whl", hash = "sha256:83143b41bde2eb010c7056f142cb764cfbf77f16bf78bda2323a160767455cf5", size = 48365, upload-time = "2025-06-11T13:17:20.096Z" }, + { url = "https://files.pythonhosted.org/packages/cf/f4/7e5ee2f460bbbd09cb5d90ff63a1cf80d60f1c60c29dac20326324242377/geventhttpclient-2.3.4-cp312-cp312-win_amd64.whl", hash = "sha256:46eda9a9137b0ca7886369b40995d2a43a5dff033d0a839a54241015d1845d41", size = 48961, upload-time = "2025-06-11T13:17:21.111Z" }, + { url = "https://files.pythonhosted.org/packages/ff/ad/132fddde6e2dca46d6a86316962437acd2bfaeb264db4e0fae83c529eb04/geventhttpclient-2.3.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:be64c5583884c407fc748dedbcb083475d5b138afb23c6bc0836cbad228402cc", size = 71967, upload-time = "2025-06-11T13:17:22.121Z" }, + { url = "https://files.pythonhosted.org/packages/f4/34/5e77d9a31d93409a8519cf573843288565272ae5a016be9c9293f56c50a1/geventhttpclient-2.3.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:15b2567137734183efda18e4d6245b18772e648b6a25adea0eba8b3a8b0d17e8", size = 52632, upload-time = "2025-06-11T13:17:23.016Z" }, + { url = "https://files.pythonhosted.org/packages/47/d2/cf0dbc333304700e68cee9347f654b56e8b0f93a341b8b0d027ee96800d6/geventhttpclient-2.3.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a4bca1151b8cd207eef6d5cb3c720c562b2aa7293cf113a68874e235cfa19c31", size = 51980, upload-time = "2025-06-11T13:17:23.933Z" }, + { url = "https://files.pythonhosted.org/packages/ec/5b/c0c30ccd9d06c603add3f2d6abd68bd98430ee9730dc5478815759cf07f7/geventhttpclient-2.3.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b50d9daded5d36193d67e2fc30e59752262fcbbdc86e8222c7df6b93af0346a", size = 118987, upload-time = "2025-06-11T13:17:24.97Z" }, + { url = "https://files.pythonhosted.org/packages/4f/56/095a46af86476372064128162eccbd2ba4a7721503759890d32ea701d5fd/geventhttpclient-2.3.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fe705e7656bc6982a463a4ed7f9b1db8c78c08323f1d45d0d1d77063efa0ce96", size = 124519, upload-time = "2025-06-11T13:17:25.933Z" }, + { url = "https://files.pythonhosted.org/packages/ae/12/7c9ba94b58f7954a83d33183152ce6bf5bda10c08ebe47d79a314cd33e29/geventhttpclient-2.3.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:69668589359db4cbb9efa327dda5735d1e74145e6f0a9ffa50236d15cf904053", size = 115574, upload-time = "2025-06-11T13:17:27.331Z" }, + { url = "https://files.pythonhosted.org/packages/73/77/c4e7c5bce0199428fdb811d6adf6e347180d89eaa1b9b723f711f6bbc830/geventhttpclient-2.3.4-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9ba526e07ccaf4f1c2cd3395dda221139f01468b6eee1190d4a616f187a0378", size = 114222, upload-time = "2025-06-11T13:17:28.289Z" }, + { url = "https://files.pythonhosted.org/packages/a3/79/58802d300950dbd7d4e31eb24afd7c270fc7900ff3923fd266cc915bb086/geventhttpclient-2.3.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:525bd192705b5cb41a7cc3fe41fca194bfd6b5b59997ab9fe68fe0a82dab6140", size = 111682, upload-time = "2025-06-11T13:17:29.291Z" }, + { url = "https://files.pythonhosted.org/packages/d3/9c/ae04e4033459b8142788dad80d8d0b42d460bc6db9150e0815c2d0a02cb4/geventhttpclient-2.3.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:42b6f6afb0d3aab6a013c9cdb97e19bf4fe08695975670d0a018113d24cb344c", size = 113252, upload-time = "2025-06-11T13:17:30.357Z" }, + { url = "https://files.pythonhosted.org/packages/d3/67/5ae5d5878b06397a7b54334d1d31bb78cefc950ae890c2b8f5c917eb271e/geventhttpclient-2.3.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:227579b703085c4e5c6d5217ad6565b19ac8d1164404133e5874efaae1905114", size = 118426, upload-time = "2025-06-11T13:17:31.363Z" }, + { url = "https://files.pythonhosted.org/packages/ca/36/9065bb51f261950c42eddf8718e01a9ff344d8082e31317a8b6677be9bd6/geventhttpclient-2.3.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8d1d0db89c1c8f3282eac9a22fda2b4082e1ed62a2107f70e3f1de1872c7919f", size = 112245, upload-time = "2025-06-11T13:17:32.331Z" }, + { url = "https://files.pythonhosted.org/packages/21/7e/08a615bec095c288f997951e42e48b262d43c6081bef33cfbfad96ab9658/geventhttpclient-2.3.4-cp313-cp313-win32.whl", hash = "sha256:4e492b9ab880f98f8a9cc143b96ea72e860946eae8ad5fb2837cede2a8f45154", size = 48360, upload-time = "2025-06-11T13:17:33.349Z" }, + { url = "https://files.pythonhosted.org/packages/ec/19/ef3cb21e7e95b14cfcd21e3ba7fe3d696e171682dfa43ab8c0a727cac601/geventhttpclient-2.3.4-cp313-cp313-win_amd64.whl", hash = "sha256:72575c5b502bf26ececccb905e4e028bb922f542946be701923e726acf305eb6", size = 48956, upload-time = "2025-06-11T13:17:34.956Z" }, +] + +[[package]] +name = "google-search-results" +version = "2.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/77/30/b3a6f6a2e00f8153549c2fa345c58ae1ce8e5f3153c2fe0484d444c3abcb/google_search_results-2.4.2.tar.gz", hash = "sha256:603a30ecae2af8e600b22635757a6df275dad4b934f975e67878ccd640b78245", size = 18818, upload-time = "2023-03-10T11:13:09.953Z" } + +[[package]] +name = "greenlet" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c9/92/bb85bd6e80148a4d2e0c59f7c0c2891029f8fd510183afc7d8d2feeed9b6/greenlet-3.2.3.tar.gz", hash = "sha256:8b0dd8ae4c0d6f5e54ee55ba935eeb3d735a9b58a8a1e5b5cbab64e01a39f365", size = 185752, upload-time = "2025-06-05T16:16:09.955Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/94/ad0d435f7c48debe960c53b8f60fb41c2026b1d0fa4a99a1cb17c3461e09/greenlet-3.2.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:25ad29caed5783d4bd7a85c9251c651696164622494c00802a139c00d639242d", size = 271992, upload-time = "2025-06-05T16:11:23.467Z" }, + { url = "https://files.pythonhosted.org/packages/93/5d/7c27cf4d003d6e77749d299c7c8f5fd50b4f251647b5c2e97e1f20da0ab5/greenlet-3.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:88cd97bf37fe24a6710ec6a3a7799f3f81d9cd33317dcf565ff9950c83f55e0b", size = 638820, upload-time = "2025-06-05T16:38:52.882Z" }, + { url = "https://files.pythonhosted.org/packages/c6/7e/807e1e9be07a125bb4c169144937910bf59b9d2f6d931578e57f0bce0ae2/greenlet-3.2.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:baeedccca94880d2f5666b4fa16fc20ef50ba1ee353ee2d7092b383a243b0b0d", size = 653046, upload-time = "2025-06-05T16:41:36.343Z" }, + { url = "https://files.pythonhosted.org/packages/9d/ab/158c1a4ea1068bdbc78dba5a3de57e4c7aeb4e7fa034320ea94c688bfb61/greenlet-3.2.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:be52af4b6292baecfa0f397f3edb3c6092ce071b499dd6fe292c9ac9f2c8f264", size = 647701, upload-time = "2025-06-05T16:48:19.604Z" }, + { url = "https://files.pythonhosted.org/packages/cc/0d/93729068259b550d6a0288da4ff72b86ed05626eaf1eb7c0d3466a2571de/greenlet-3.2.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0cc73378150b8b78b0c9fe2ce56e166695e67478550769536a6742dca3651688", size = 649747, upload-time = "2025-06-05T16:13:04.628Z" }, + { url = "https://files.pythonhosted.org/packages/f6/f6/c82ac1851c60851302d8581680573245c8fc300253fc1ff741ae74a6c24d/greenlet-3.2.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:706d016a03e78df129f68c4c9b4c4f963f7d73534e48a24f5f5a7101ed13dbbb", size = 605461, upload-time = "2025-06-05T16:12:50.792Z" }, + { url = "https://files.pythonhosted.org/packages/98/82/d022cf25ca39cf1200650fc58c52af32c90f80479c25d1cbf57980ec3065/greenlet-3.2.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:419e60f80709510c343c57b4bb5a339d8767bf9aef9b8ce43f4f143240f88b7c", size = 1121190, upload-time = "2025-06-05T16:36:48.59Z" }, + { url = "https://files.pythonhosted.org/packages/f5/e1/25297f70717abe8104c20ecf7af0a5b82d2f5a980eb1ac79f65654799f9f/greenlet-3.2.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:93d48533fade144203816783373f27a97e4193177ebaaf0fc396db19e5d61163", size = 1149055, upload-time = "2025-06-05T16:12:40.457Z" }, + { url = "https://files.pythonhosted.org/packages/1f/8f/8f9e56c5e82eb2c26e8cde787962e66494312dc8cb261c460e1f3a9c88bc/greenlet-3.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:7454d37c740bb27bdeddfc3f358f26956a07d5220818ceb467a483197d84f849", size = 297817, upload-time = "2025-06-05T16:29:49.244Z" }, + { url = "https://files.pythonhosted.org/packages/b1/cf/f5c0b23309070ae93de75c90d29300751a5aacefc0a3ed1b1d8edb28f08b/greenlet-3.2.3-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:500b8689aa9dd1ab26872a34084503aeddefcb438e2e7317b89b11eaea1901ad", size = 270732, upload-time = "2025-06-05T16:10:08.26Z" }, + { url = "https://files.pythonhosted.org/packages/48/ae/91a957ba60482d3fecf9be49bc3948f341d706b52ddb9d83a70d42abd498/greenlet-3.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a07d3472c2a93117af3b0136f246b2833fdc0b542d4a9799ae5f41c28323faef", size = 639033, upload-time = "2025-06-05T16:38:53.983Z" }, + { url = "https://files.pythonhosted.org/packages/6f/df/20ffa66dd5a7a7beffa6451bdb7400d66251374ab40b99981478c69a67a8/greenlet-3.2.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:8704b3768d2f51150626962f4b9a9e4a17d2e37c8a8d9867bbd9fa4eb938d3b3", size = 652999, upload-time = "2025-06-05T16:41:37.89Z" }, + { url = "https://files.pythonhosted.org/packages/51/b4/ebb2c8cb41e521f1d72bf0465f2f9a2fd803f674a88db228887e6847077e/greenlet-3.2.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5035d77a27b7c62db6cf41cf786cfe2242644a7a337a0e155c80960598baab95", size = 647368, upload-time = "2025-06-05T16:48:21.467Z" }, + { url = "https://files.pythonhosted.org/packages/8e/6a/1e1b5aa10dced4ae876a322155705257748108b7fd2e4fae3f2a091fe81a/greenlet-3.2.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2d8aa5423cd4a396792f6d4580f88bdc6efcb9205891c9d40d20f6e670992efb", size = 650037, upload-time = "2025-06-05T16:13:06.402Z" }, + { url = "https://files.pythonhosted.org/packages/26/f2/ad51331a157c7015c675702e2d5230c243695c788f8f75feba1af32b3617/greenlet-3.2.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2c724620a101f8170065d7dded3f962a2aea7a7dae133a009cada42847e04a7b", size = 608402, upload-time = "2025-06-05T16:12:51.91Z" }, + { url = "https://files.pythonhosted.org/packages/26/bc/862bd2083e6b3aff23300900a956f4ea9a4059de337f5c8734346b9b34fc/greenlet-3.2.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:873abe55f134c48e1f2a6f53f7d1419192a3d1a4e873bace00499a4e45ea6af0", size = 1119577, upload-time = "2025-06-05T16:36:49.787Z" }, + { url = "https://files.pythonhosted.org/packages/86/94/1fc0cc068cfde885170e01de40a619b00eaa8f2916bf3541744730ffb4c3/greenlet-3.2.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:024571bbce5f2c1cfff08bf3fbaa43bbc7444f580ae13b0099e95d0e6e67ed36", size = 1147121, upload-time = "2025-06-05T16:12:42.527Z" }, + { url = "https://files.pythonhosted.org/packages/27/1a/199f9587e8cb08a0658f9c30f3799244307614148ffe8b1e3aa22f324dea/greenlet-3.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:5195fb1e75e592dd04ce79881c8a22becdfa3e6f500e7feb059b1e6fdd54d3e3", size = 297603, upload-time = "2025-06-05T16:20:12.651Z" }, + { url = "https://files.pythonhosted.org/packages/d8/ca/accd7aa5280eb92b70ed9e8f7fd79dc50a2c21d8c73b9a0856f5b564e222/greenlet-3.2.3-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:3d04332dddb10b4a211b68111dabaee2e1a073663d117dc10247b5b1642bac86", size = 271479, upload-time = "2025-06-05T16:10:47.525Z" }, + { url = "https://files.pythonhosted.org/packages/55/71/01ed9895d9eb49223280ecc98a557585edfa56b3d0e965b9fa9f7f06b6d9/greenlet-3.2.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8186162dffde068a465deab08fc72c767196895c39db26ab1c17c0b77a6d8b97", size = 683952, upload-time = "2025-06-05T16:38:55.125Z" }, + { url = "https://files.pythonhosted.org/packages/ea/61/638c4bdf460c3c678a0a1ef4c200f347dff80719597e53b5edb2fb27ab54/greenlet-3.2.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f4bfbaa6096b1b7a200024784217defedf46a07c2eee1a498e94a1b5f8ec5728", size = 696917, upload-time = "2025-06-05T16:41:38.959Z" }, + { url = "https://files.pythonhosted.org/packages/22/cc/0bd1a7eb759d1f3e3cc2d1bc0f0b487ad3cc9f34d74da4b80f226fde4ec3/greenlet-3.2.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:ed6cfa9200484d234d8394c70f5492f144b20d4533f69262d530a1a082f6ee9a", size = 692443, upload-time = "2025-06-05T16:48:23.113Z" }, + { url = "https://files.pythonhosted.org/packages/67/10/b2a4b63d3f08362662e89c103f7fe28894a51ae0bc890fabf37d1d780e52/greenlet-3.2.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:02b0df6f63cd15012bed5401b47829cfd2e97052dc89da3cfaf2c779124eb892", size = 692995, upload-time = "2025-06-05T16:13:07.972Z" }, + { url = "https://files.pythonhosted.org/packages/5a/c6/ad82f148a4e3ce9564056453a71529732baf5448ad53fc323e37efe34f66/greenlet-3.2.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:86c2d68e87107c1792e2e8d5399acec2487a4e993ab76c792408e59394d52141", size = 655320, upload-time = "2025-06-05T16:12:53.453Z" }, + { url = "https://files.pythonhosted.org/packages/5c/4f/aab73ecaa6b3086a4c89863d94cf26fa84cbff63f52ce9bc4342b3087a06/greenlet-3.2.3-cp314-cp314-win_amd64.whl", hash = "sha256:8c47aae8fbbfcf82cc13327ae802ba13c9c36753b67e760023fd116bc124a62a", size = 301236, upload-time = "2025-06-05T16:15:20.111Z" }, +] + +[[package]] +name = "grpcio" +version = "1.67.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/53/d9282a66a5db45981499190b77790570617a604a38f3d103d0400974aeb5/grpcio-1.67.1.tar.gz", hash = "sha256:3dc2ed4cabea4dc14d5e708c2b426205956077cc5de419b4d4079315017e9732", size = 12580022, upload-time = "2024-10-29T06:30:07.787Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6e/25/6f95bd18d5f506364379eabc0d5874873cc7dbdaf0757df8d1e82bc07a88/grpcio-1.67.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:267d1745894200e4c604958da5f856da6293f063327cb049a51fe67348e4f953", size = 5089809, upload-time = "2024-10-29T06:24:31.24Z" }, + { url = "https://files.pythonhosted.org/packages/10/3f/d79e32e5d0354be33a12db2267c66d3cfeff700dd5ccdd09fd44a3ff4fb6/grpcio-1.67.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:85f69fdc1d28ce7cff8de3f9c67db2b0ca9ba4449644488c1e0303c146135ddb", size = 10981985, upload-time = "2024-10-29T06:24:34.942Z" }, + { url = "https://files.pythonhosted.org/packages/21/f2/36fbc14b3542e3a1c20fb98bd60c4732c55a44e374a4eb68f91f28f14aab/grpcio-1.67.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:f26b0b547eb8d00e195274cdfc63ce64c8fc2d3e2d00b12bf468ece41a0423a0", size = 5588770, upload-time = "2024-10-29T06:24:38.145Z" }, + { url = "https://files.pythonhosted.org/packages/0d/af/bbc1305df60c4e65de8c12820a942b5e37f9cf684ef5e49a63fbb1476a73/grpcio-1.67.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4422581cdc628f77302270ff839a44f4c24fdc57887dc2a45b7e53d8fc2376af", size = 6214476, upload-time = "2024-10-29T06:24:41.006Z" }, + { url = "https://files.pythonhosted.org/packages/92/cf/1d4c3e93efa93223e06a5c83ac27e32935f998bc368e276ef858b8883154/grpcio-1.67.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d7616d2ded471231c701489190379e0c311ee0a6c756f3c03e6a62b95a7146e", size = 5850129, upload-time = "2024-10-29T06:24:43.553Z" }, + { url = "https://files.pythonhosted.org/packages/ae/ca/26195b66cb253ac4d5ef59846e354d335c9581dba891624011da0e95d67b/grpcio-1.67.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8a00efecde9d6fcc3ab00c13f816313c040a28450e5e25739c24f432fc6d3c75", size = 6568489, upload-time = "2024-10-29T06:24:46.453Z" }, + { url = "https://files.pythonhosted.org/packages/d1/94/16550ad6b3f13b96f0856ee5dfc2554efac28539ee84a51d7b14526da985/grpcio-1.67.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:699e964923b70f3101393710793289e42845791ea07565654ada0969522d0a38", size = 6149369, upload-time = "2024-10-29T06:24:49.112Z" }, + { url = "https://files.pythonhosted.org/packages/33/0d/4c3b2587e8ad7f121b597329e6c2620374fccbc2e4e1aa3c73ccc670fde4/grpcio-1.67.1-cp312-cp312-win32.whl", hash = "sha256:4e7b904484a634a0fff132958dabdb10d63e0927398273917da3ee103e8d1f78", size = 3599176, upload-time = "2024-10-29T06:24:51.443Z" }, + { url = "https://files.pythonhosted.org/packages/7d/36/0c03e2d80db69e2472cf81c6123aa7d14741de7cf790117291a703ae6ae1/grpcio-1.67.1-cp312-cp312-win_amd64.whl", hash = "sha256:5721e66a594a6c4204458004852719b38f3d5522082be9061d6510b455c90afc", size = 4346574, upload-time = "2024-10-29T06:24:54.587Z" }, + { url = "https://files.pythonhosted.org/packages/12/d2/2f032b7a153c7723ea3dea08bffa4bcaca9e0e5bdf643ce565b76da87461/grpcio-1.67.1-cp313-cp313-linux_armv7l.whl", hash = "sha256:aa0162e56fd10a5547fac8774c4899fc3e18c1aa4a4759d0ce2cd00d3696ea6b", size = 5091487, upload-time = "2024-10-29T06:24:57.416Z" }, + { url = "https://files.pythonhosted.org/packages/d0/ae/ea2ff6bd2475a082eb97db1104a903cf5fc57c88c87c10b3c3f41a184fc0/grpcio-1.67.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:beee96c8c0b1a75d556fe57b92b58b4347c77a65781ee2ac749d550f2a365dc1", size = 10943530, upload-time = "2024-10-29T06:25:01.062Z" }, + { url = "https://files.pythonhosted.org/packages/07/62/646be83d1a78edf8d69b56647327c9afc223e3140a744c59b25fbb279c3b/grpcio-1.67.1-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:a93deda571a1bf94ec1f6fcda2872dad3ae538700d94dc283c672a3b508ba3af", size = 5589079, upload-time = "2024-10-29T06:25:04.254Z" }, + { url = "https://files.pythonhosted.org/packages/d0/25/71513d0a1b2072ce80d7f5909a93596b7ed10348b2ea4fdcbad23f6017bf/grpcio-1.67.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e6f255980afef598a9e64a24efce87b625e3e3c80a45162d111a461a9f92955", size = 6213542, upload-time = "2024-10-29T06:25:06.824Z" }, + { url = "https://files.pythonhosted.org/packages/76/9a/d21236297111052dcb5dc85cd77dc7bf25ba67a0f55ae028b2af19a704bc/grpcio-1.67.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e838cad2176ebd5d4a8bb03955138d6589ce9e2ce5d51c3ada34396dbd2dba8", size = 5850211, upload-time = "2024-10-29T06:25:10.149Z" }, + { url = "https://files.pythonhosted.org/packages/2d/fe/70b1da9037f5055be14f359026c238821b9bcf6ca38a8d760f59a589aacd/grpcio-1.67.1-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:a6703916c43b1d468d0756c8077b12017a9fcb6a1ef13faf49e67d20d7ebda62", size = 6572129, upload-time = "2024-10-29T06:25:12.853Z" }, + { url = "https://files.pythonhosted.org/packages/74/0d/7df509a2cd2a54814598caf2fb759f3e0b93764431ff410f2175a6efb9e4/grpcio-1.67.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:917e8d8994eed1d86b907ba2a61b9f0aef27a2155bca6cbb322430fc7135b7bb", size = 6149819, upload-time = "2024-10-29T06:25:15.803Z" }, + { url = "https://files.pythonhosted.org/packages/0a/08/bc3b0155600898fd10f16b79054e1cca6cb644fa3c250c0fe59385df5e6f/grpcio-1.67.1-cp313-cp313-win32.whl", hash = "sha256:e279330bef1744040db8fc432becc8a727b84f456ab62b744d3fdb83f327e121", size = 3596561, upload-time = "2024-10-29T06:25:19.348Z" }, + { url = "https://files.pythonhosted.org/packages/5a/96/44759eca966720d0f3e1b105c43f8ad4590c97bf8eb3cd489656e9590baa/grpcio-1.67.1-cp313-cp313-win_amd64.whl", hash = "sha256:fa0c739ad8b1996bd24823950e3cb5152ae91fca1c09cc791190bf1627ffefba", size = 4346042, upload-time = "2024-10-29T06:25:21.939Z" }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, +] + +[[package]] +name = "imageio" +version = "2.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "pillow" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0c/47/57e897fb7094afb2d26e8b2e4af9a45c7cf1a405acdeeca001fdf2c98501/imageio-2.37.0.tar.gz", hash = "sha256:71b57b3669666272c818497aebba2b4c5f20d5b37c81720e5e1a56d59c492996", size = 389963, upload-time = "2025-01-20T02:42:37.089Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/bd/b394387b598ed84d8d0fa90611a90bee0adc2021820ad5729f7ced74a8e2/imageio-2.37.0-py3-none-any.whl", hash = "sha256:11efa15b87bc7871b61590326b2d635439acc321cf7f8ce996f812543ce10eed", size = 315796, upload-time = "2025-01-20T02:42:34.931Z" }, +] + +[[package]] +name = "imageio-ffmpeg" +version = "0.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/44/bd/c3343c721f2a1b0c9fc71c1aebf1966a3b7f08c2eea8ed5437a2865611d6/imageio_ffmpeg-0.6.0.tar.gz", hash = "sha256:e2556bed8e005564a9f925bb7afa4002d82770d6b08825078b7697ab88ba1755", size = 25210, upload-time = "2025-01-16T21:34:32.747Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/da/58/87ef68ac83f4c7690961bce288fd8e382bc5f1513860fc7f90a9c1c1c6bf/imageio_ffmpeg-0.6.0-py3-none-macosx_10_9_intel.macosx_10_9_x86_64.whl", hash = "sha256:9d2baaf867088508d4a3458e61eeb30e945c4ad8016025545f66c4b5aaef0a61", size = 24932969, upload-time = "2025-01-16T21:34:20.464Z" }, + { url = "https://files.pythonhosted.org/packages/40/5c/f3d8a657d362cc93b81aab8feda487317da5b5d31c0e1fdfd5e986e55d17/imageio_ffmpeg-0.6.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:b1ae3173414b5fc5f538a726c4e48ea97edc0d2cdc11f103afee655c463fa742", size = 21113891, upload-time = "2025-01-16T21:34:00.277Z" }, + { url = "https://files.pythonhosted.org/packages/33/e7/1925bfbc563c39c1d2e82501d8372734a5c725e53ac3b31b4c2d081e895b/imageio_ffmpeg-0.6.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1d47bebd83d2c5fc770720d211855f208af8a596c82d17730aa51e815cdee6dc", size = 25632706, upload-time = "2025-01-16T21:33:53.475Z" }, + { url = "https://files.pythonhosted.org/packages/a0/2d/43c8522a2038e9d0e7dbdf3a61195ecc31ca576fb1527a528c877e87d973/imageio_ffmpeg-0.6.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c7e46fcec401dd990405049d2e2f475e2b397779df2519b544b8aab515195282", size = 29498237, upload-time = "2025-01-16T21:34:13.726Z" }, + { url = "https://files.pythonhosted.org/packages/a0/13/59da54728351883c3c1d9fca1710ab8eee82c7beba585df8f25ca925f08f/imageio_ffmpeg-0.6.0-py3-none-win32.whl", hash = "sha256:196faa79366b4a82f95c0f4053191d2013f4714a715780f0ad2a68ff37483cc2", size = 19652251, upload-time = "2025-01-16T21:34:06.812Z" }, + { url = "https://files.pythonhosted.org/packages/2c/c6/fa760e12a2483469e2bf5058c5faff664acf66cadb4df2ad6205b016a73d/imageio_ffmpeg-0.6.0-py3-none-win_amd64.whl", hash = "sha256:02fa47c83703c37df6bfe4896aab339013f62bf02c5ebf2dce6da56af04ffc0a", size = 31246824, upload-time = "2025-01-16T21:34:28.6Z" }, +] + +[[package]] +name = "kombu" +version = "5.5.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "amqp" }, + { name = "packaging" }, + { name = "tzdata" }, + { name = "vine" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0f/d3/5ff936d8319ac86b9c409f1501b07c426e6ad41966fedace9ef1b966e23f/kombu-5.5.4.tar.gz", hash = "sha256:886600168275ebeada93b888e831352fe578168342f0d1d5833d88ba0d847363", size = 461992, upload-time = "2025-06-01T10:19:22.281Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/70/a07dcf4f62598c8ad579df241af55ced65bed76e42e45d3c368a6d82dbc1/kombu-5.5.4-py3-none-any.whl", hash = "sha256:a12ed0557c238897d8e518f1d1fdf84bd1516c5e305af2dacd85c2015115feb8", size = 210034, upload-time = "2025-06-01T10:19:20.436Z" }, +] + +[[package]] +name = "moviepy" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "decorator" }, + { name = "imageio" }, + { name = "imageio-ffmpeg" }, + { name = "numpy" }, + { name = "pillow" }, + { name = "proglog" }, + { name = "python-dotenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/de/61/15f9476e270f64c78a834e7459ca045d669f869cec24eed26807b8cd479d/moviepy-2.2.1.tar.gz", hash = "sha256:c80cb56815ece94e5e3e2d361aa40070eeb30a09d23a24c4e684d03e16deacb1", size = 58431438, upload-time = "2025-05-21T19:31:52.601Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/73/7d3b2010baa0b5eb1e4dfa9e4385e89b6716be76f2fa21a6c0fe34b68e5a/moviepy-2.2.1-py3-none-any.whl", hash = "sha256:6b56803fec2ac54b557404126ac1160e65448e03798fa282bd23e8fab3795060", size = 129871, upload-time = "2025-05-21T19:31:50.11Z" }, +] + +[[package]] +name = "multidict" +version = "6.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/46/b5/59f27b4ce9951a4bce56b88ba5ff5159486797ab18863f2b4c1c5e8465bd/multidict-6.5.0.tar.gz", hash = "sha256:942bd8002492ba819426a8d7aefde3189c1b87099cdf18aaaefefcf7f3f7b6d2", size = 98512, upload-time = "2025-06-17T14:15:56.556Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/fa/18f4950e00924f7e84c8195f4fc303295e14df23f713d64e778b8fa8b903/multidict-6.5.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1bb986c8ea9d49947bc325c51eced1ada6d8d9b4c5b15fd3fcdc3c93edef5a74", size = 73474, upload-time = "2025-06-17T14:14:13.528Z" }, + { url = "https://files.pythonhosted.org/packages/6c/66/0392a2a8948bccff57e4793c9dde3e5c088f01e8b7f8867ee58a2f187fc5/multidict-6.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:03c0923da300120830fc467e23805d63bbb4e98b94032bd863bc7797ea5fa653", size = 43741, upload-time = "2025-06-17T14:14:15.188Z" }, + { url = "https://files.pythonhosted.org/packages/98/3e/f48487c91b2a070566cfbab876d7e1ebe7deb0a8002e4e896a97998ae066/multidict-6.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4c78d5ec00fdd35c91680ab5cf58368faad4bd1a8721f87127326270248de9bc", size = 42143, upload-time = "2025-06-17T14:14:16.612Z" }, + { url = "https://files.pythonhosted.org/packages/3f/49/439c6cc1cd00365cf561bdd3579cc3fa1a0d38effb3a59b8d9562839197f/multidict-6.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aadc3cb78be90a887f8f6b73945b840da44b4a483d1c9750459ae69687940c97", size = 239303, upload-time = "2025-06-17T14:14:17.707Z" }, + { url = "https://files.pythonhosted.org/packages/c4/24/491786269e90081cb536e4d7429508725bc92ece176d1204a4449de7c41c/multidict-6.5.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:5b02e1ca495d71e07e652e4cef91adae3bf7ae4493507a263f56e617de65dafc", size = 236913, upload-time = "2025-06-17T14:14:18.981Z" }, + { url = "https://files.pythonhosted.org/packages/e8/76/bbe2558b820ebeca8a317ab034541790e8160ca4b1e450415383ac69b339/multidict-6.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7fe92a62326eef351668eec4e2dfc494927764a0840a1895cff16707fceffcd3", size = 250752, upload-time = "2025-06-17T14:14:20.297Z" }, + { url = "https://files.pythonhosted.org/packages/3e/e3/3977f2c1123f553ceff9f53cd4de04be2c1912333c6fabbcd51531655476/multidict-6.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7673ee4f63879ecd526488deb1989041abcb101b2d30a9165e1e90c489f3f7fb", size = 243937, upload-time = "2025-06-17T14:14:21.935Z" }, + { url = "https://files.pythonhosted.org/packages/b6/b8/7a6e9c13c79709cdd2f22ee849f058e6da76892d141a67acc0e6c30d845c/multidict-6.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa097ae2a29f573de7e2d86620cbdda5676d27772d4ed2669cfa9961a0d73955", size = 237419, upload-time = "2025-06-17T14:14:23.215Z" }, + { url = "https://files.pythonhosted.org/packages/84/9d/8557f5e88da71bc7e7a8ace1ada4c28197f3bfdc2dd6e51d3b88f2e16e8e/multidict-6.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:300da0fa4f8457d9c4bd579695496116563409e676ac79b5e4dca18e49d1c308", size = 237222, upload-time = "2025-06-17T14:14:24.516Z" }, + { url = "https://files.pythonhosted.org/packages/a3/3b/8f023ad60e7969cb6bc0683738d0e1618f5ff5723d6d2d7818dc6df6ad3d/multidict-6.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9a19bd108c35877b57393243d392d024cfbfdefe759fd137abb98f6fc910b64c", size = 247861, upload-time = "2025-06-17T14:14:25.839Z" }, + { url = "https://files.pythonhosted.org/packages/af/1c/9cf5a099ce7e3189906cf5daa72c44ee962dcb4c1983659f3a6f8a7446ab/multidict-6.5.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:0f32a1777465a35c35ddbbd7fc1293077938a69402fcc59e40b2846d04a120dd", size = 243917, upload-time = "2025-06-17T14:14:27.164Z" }, + { url = "https://files.pythonhosted.org/packages/6c/bb/88ee66ebeef56868044bac58feb1cc25658bff27b20e3cfc464edc181287/multidict-6.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9cc1e10c14ce8112d1e6d8971fe3cdbe13e314f68bea0e727429249d4a6ce164", size = 249214, upload-time = "2025-06-17T14:14:28.795Z" }, + { url = "https://files.pythonhosted.org/packages/3e/ec/a90e88cc4a1309f33088ab1cdd5c0487718f49dfb82c5ffc845bb17c1973/multidict-6.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:e95c5e07a06594bdc288117ca90e89156aee8cb2d7c330b920d9c3dd19c05414", size = 258682, upload-time = "2025-06-17T14:14:30.066Z" }, + { url = "https://files.pythonhosted.org/packages/d2/d8/16dd69a6811920a31f4e06114ebe67b1cd922c8b05c9c82b050706d0b6fe/multidict-6.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:40ff26f58323795f5cd2855e2718a1720a1123fb90df4553426f0efd76135462", size = 254254, upload-time = "2025-06-17T14:14:31.323Z" }, + { url = "https://files.pythonhosted.org/packages/ac/a8/90193a5f5ca1bdbf92633d69a25a2ef9bcac7b412b8d48c84d01a2732518/multidict-6.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:76803a29fd71869a8b59c2118c9dcfb3b8f9c8723e2cce6baeb20705459505cf", size = 247741, upload-time = "2025-06-17T14:14:32.717Z" }, + { url = "https://files.pythonhosted.org/packages/cd/43/29c7a747153c05b41d1f67455426af39ed88d6de3f21c232b8f2724bde13/multidict-6.5.0-cp312-cp312-win32.whl", hash = "sha256:df7ecbc65a53a2ce1b3a0c82e6ad1a43dcfe7c6137733f9176a92516b9f5b851", size = 41049, upload-time = "2025-06-17T14:14:33.941Z" }, + { url = "https://files.pythonhosted.org/packages/1e/e8/8f3fc32b7e901f3a2719764d64aeaf6ae77b4ba961f1c3a3cf3867766636/multidict-6.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:0ec1c3fbbb0b655a6540bce408f48b9a7474fd94ed657dcd2e890671fefa7743", size = 44700, upload-time = "2025-06-17T14:14:35.016Z" }, + { url = "https://files.pythonhosted.org/packages/24/e4/e250806adc98d524d41e69c8d4a42bc3513464adb88cb96224df12928617/multidict-6.5.0-cp312-cp312-win_arm64.whl", hash = "sha256:2d24a00d34808b22c1f15902899b9d82d0faeca9f56281641c791d8605eacd35", size = 41703, upload-time = "2025-06-17T14:14:36.168Z" }, + { url = "https://files.pythonhosted.org/packages/1a/c9/092c4e9402b6d16de761cff88cb842a5c8cc50ccecaf9c4481ba53264b9e/multidict-6.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:53d92df1752df67a928fa7f884aa51edae6f1cf00eeb38cbcf318cf841c17456", size = 73486, upload-time = "2025-06-17T14:14:37.238Z" }, + { url = "https://files.pythonhosted.org/packages/08/f9/6f7ddb8213f5fdf4db48d1d640b78e8aef89b63a5de8a2313286db709250/multidict-6.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:680210de2c38eef17ce46b8df8bf2c1ece489261a14a6e43c997d49843a27c99", size = 43745, upload-time = "2025-06-17T14:14:38.32Z" }, + { url = "https://files.pythonhosted.org/packages/f3/a7/b9be0163bfeee3bb08a77a1705e24eb7e651d594ea554107fac8a1ca6a4d/multidict-6.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e279259bcb936732bfa1a8eec82b5d2352b3df69d2fa90d25808cfc403cee90a", size = 42135, upload-time = "2025-06-17T14:14:39.897Z" }, + { url = "https://files.pythonhosted.org/packages/8e/30/93c8203f943a417bda3c573a34d5db0cf733afdfffb0ca78545c7716dbd8/multidict-6.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1c185fc1069781e3fc8b622c4331fb3b433979850392daa5efbb97f7f9959bb", size = 238585, upload-time = "2025-06-17T14:14:41.332Z" }, + { url = "https://files.pythonhosted.org/packages/9d/fe/2582b56a1807604774f566eeef183b0d6b148f4b89d1612cd077567b2e1e/multidict-6.5.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6bb5f65ff91daf19ce97f48f63585e51595539a8a523258b34f7cef2ec7e0617", size = 236174, upload-time = "2025-06-17T14:14:42.602Z" }, + { url = "https://files.pythonhosted.org/packages/9b/c4/d8b66d42d385bd4f974cbd1eaa8b265e6b8d297249009f312081d5ded5c7/multidict-6.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d8646b4259450c59b9286db280dd57745897897284f6308edbdf437166d93855", size = 250145, upload-time = "2025-06-17T14:14:43.944Z" }, + { url = "https://files.pythonhosted.org/packages/bc/64/62feda5093ee852426aae3df86fab079f8bf1cdbe403e1078c94672ad3ec/multidict-6.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d245973d4ecc04eea0a8e5ebec7882cf515480036e1b48e65dffcfbdf86d00be", size = 243470, upload-time = "2025-06-17T14:14:45.343Z" }, + { url = "https://files.pythonhosted.org/packages/67/dc/9f6fa6e854625cf289c0e9f4464b40212a01f76b2f3edfe89b6779b4fb93/multidict-6.5.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a133e7ddc9bc7fb053733d0ff697ce78c7bf39b5aec4ac12857b6116324c8d75", size = 236968, upload-time = "2025-06-17T14:14:46.609Z" }, + { url = "https://files.pythonhosted.org/packages/46/ae/4b81c6e3745faee81a156f3f87402315bdccf04236f75c03e37be19c94ff/multidict-6.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80d696fa38d738fcebfd53eec4d2e3aeb86a67679fd5e53c325756682f152826", size = 236575, upload-time = "2025-06-17T14:14:47.929Z" }, + { url = "https://files.pythonhosted.org/packages/8a/fa/4089d7642ea344226e1bfab60dd588761d4791754f8072e911836a39bedf/multidict-6.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:20d30c9410ac3908abbaa52ee5967a754c62142043cf2ba091e39681bd51d21a", size = 247632, upload-time = "2025-06-17T14:14:49.525Z" }, + { url = "https://files.pythonhosted.org/packages/16/ee/a353dac797de0f28fb7f078cc181c5f2eefe8dd16aa11a7100cbdc234037/multidict-6.5.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:6c65068cc026f217e815fa519d8e959a7188e94ec163ffa029c94ca3ef9d4a73", size = 243520, upload-time = "2025-06-17T14:14:50.83Z" }, + { url = "https://files.pythonhosted.org/packages/50/ec/560deb3d2d95822d6eb1bcb1f1cb728f8f0197ec25be7c936d5d6a5d133c/multidict-6.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:e355ac668a8c3e49c2ca8daa4c92f0ad5b705d26da3d5af6f7d971e46c096da7", size = 248551, upload-time = "2025-06-17T14:14:52.229Z" }, + { url = "https://files.pythonhosted.org/packages/10/85/ddf277e67c78205f6695f2a7639be459bca9cc353b962fd8085a492a262f/multidict-6.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:08db204213d0375a91a381cae0677ab95dd8c67a465eb370549daf6dbbf8ba10", size = 258362, upload-time = "2025-06-17T14:14:53.934Z" }, + { url = "https://files.pythonhosted.org/packages/02/fc/d64ee1df9b87c5210f2d4c419cab07f28589c81b4e5711eda05a122d0614/multidict-6.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:ffa58e3e215af8f6536dc837a990e456129857bb6fd546b3991be470abd9597a", size = 253862, upload-time = "2025-06-17T14:14:55.323Z" }, + { url = "https://files.pythonhosted.org/packages/c9/7c/a2743c00d9e25f4826d3a77cc13d4746398872cf21c843eef96bb9945665/multidict-6.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3e86eb90015c6f21658dbd257bb8e6aa18bdb365b92dd1fba27ec04e58cdc31b", size = 247391, upload-time = "2025-06-17T14:14:57.293Z" }, + { url = "https://files.pythonhosted.org/packages/9b/03/7773518db74c442904dbd349074f1e7f2a854cee4d9529fc59e623d3949e/multidict-6.5.0-cp313-cp313-win32.whl", hash = "sha256:f34a90fbd9959d0f857323bd3c52b3e6011ed48f78d7d7b9e04980b8a41da3af", size = 41115, upload-time = "2025-06-17T14:14:59.33Z" }, + { url = "https://files.pythonhosted.org/packages/eb/9a/6fc51b1dc11a7baa944bc101a92167d8b0f5929d376a8c65168fc0d35917/multidict-6.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:fcb2aa79ac6aef8d5b709bbfc2fdb1d75210ba43038d70fbb595b35af470ce06", size = 44768, upload-time = "2025-06-17T14:15:00.427Z" }, + { url = "https://files.pythonhosted.org/packages/82/2d/0d010be24b663b3c16e3d3307bbba2de5ae8eec496f6027d5c0515b371a8/multidict-6.5.0-cp313-cp313-win_arm64.whl", hash = "sha256:6dcee5e7e92060b4bb9bb6f01efcbb78c13d0e17d9bc6eec71660dd71dc7b0c2", size = 41770, upload-time = "2025-06-17T14:15:01.854Z" }, + { url = "https://files.pythonhosted.org/packages/aa/d1/a71711a5f32f84b7b036e82182e3250b949a0ce70d51a2c6a4079e665449/multidict-6.5.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:cbbc88abea2388fde41dd574159dec2cda005cb61aa84950828610cb5010f21a", size = 80450, upload-time = "2025-06-17T14:15:02.968Z" }, + { url = "https://files.pythonhosted.org/packages/0f/a2/953a9eede63a98fcec2c1a2c1a0d88de120056219931013b871884f51b43/multidict-6.5.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:70b599f70ae6536e5976364d3c3cf36f40334708bd6cebdd1e2438395d5e7676", size = 46971, upload-time = "2025-06-17T14:15:04.149Z" }, + { url = "https://files.pythonhosted.org/packages/44/61/60250212953459edda2c729e1d85130912f23c67bd4f585546fe4bdb1578/multidict-6.5.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:828bab777aa8d29d59700018178061854e3a47727e0611cb9bec579d3882de3b", size = 45548, upload-time = "2025-06-17T14:15:05.666Z" }, + { url = "https://files.pythonhosted.org/packages/11/b6/e78ee82e96c495bc2582b303f68bed176b481c8d81a441fec07404fce2ca/multidict-6.5.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9695fc1462f17b131c111cf0856a22ff154b0480f86f539d24b2778571ff94d", size = 238545, upload-time = "2025-06-17T14:15:06.88Z" }, + { url = "https://files.pythonhosted.org/packages/5a/0f/6132ca06670c8d7b374c3a4fd1ba896fc37fbb66b0de903f61db7d1020ec/multidict-6.5.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0b5ac6ebaf5d9814b15f399337ebc6d3a7f4ce9331edd404e76c49a01620b68d", size = 229931, upload-time = "2025-06-17T14:15:08.24Z" }, + { url = "https://files.pythonhosted.org/packages/c0/63/d9957c506e6df6b3e7a194f0eea62955c12875e454b978f18262a65d017b/multidict-6.5.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84a51e3baa77ded07be4766a9e41d977987b97e49884d4c94f6d30ab6acaee14", size = 248181, upload-time = "2025-06-17T14:15:09.907Z" }, + { url = "https://files.pythonhosted.org/packages/43/3f/7d5490579640db5999a948e2c41d4a0efd91a75989bda3e0a03a79c92be2/multidict-6.5.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8de67f79314d24179e9b1869ed15e88d6ba5452a73fc9891ac142e0ee018b5d6", size = 241846, upload-time = "2025-06-17T14:15:11.596Z" }, + { url = "https://files.pythonhosted.org/packages/e1/f7/252b1ce949ece52bba4c0de7aa2e3a3d5964e800bce71fb778c2e6c66f7c/multidict-6.5.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17f78a52c214481d30550ec18208e287dfc4736f0c0148208334b105fd9e0887", size = 232893, upload-time = "2025-06-17T14:15:12.946Z" }, + { url = "https://files.pythonhosted.org/packages/45/7e/0070bfd48c16afc26e056f2acce49e853c0d604a69c7124bc0bbdb1bcc0a/multidict-6.5.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2966d0099cb2e2039f9b0e73e7fd5eb9c85805681aa2a7f867f9d95b35356921", size = 228567, upload-time = "2025-06-17T14:15:14.267Z" }, + { url = "https://files.pythonhosted.org/packages/2a/31/90551c75322113ebf5fd9c5422e8641d6952f6edaf6b6c07fdc49b1bebdd/multidict-6.5.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:86fb42ed5ed1971c642cc52acc82491af97567534a8e381a8d50c02169c4e684", size = 246188, upload-time = "2025-06-17T14:15:15.985Z" }, + { url = "https://files.pythonhosted.org/packages/cc/e2/aa4b02a55e7767ff292871023817fe4db83668d514dab7ccbce25eaf7659/multidict-6.5.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:4e990cbcb6382f9eae4ec720bcac6a1351509e6fc4a5bb70e4984b27973934e6", size = 235178, upload-time = "2025-06-17T14:15:17.395Z" }, + { url = "https://files.pythonhosted.org/packages/7d/5c/f67e726717c4b138b166be1700e2b56e06fbbcb84643d15f9a9d7335ff41/multidict-6.5.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:d99a59d64bb1f7f2117bec837d9e534c5aeb5dcedf4c2b16b9753ed28fdc20a3", size = 243422, upload-time = "2025-06-17T14:15:18.939Z" }, + { url = "https://files.pythonhosted.org/packages/e5/1c/15fa318285e26a50aa3fa979bbcffb90f9b4d5ec58882d0590eda067d0da/multidict-6.5.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:e8ef15cc97c9890212e1caf90f0d63f6560e1e101cf83aeaf63a57556689fb34", size = 254898, upload-time = "2025-06-17T14:15:20.31Z" }, + { url = "https://files.pythonhosted.org/packages/ad/3d/d6c6d1c2e9b61ca80313912d30bb90d4179335405e421ef0a164eac2c0f9/multidict-6.5.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:b8a09aec921b34bd8b9f842f0bcfd76c6a8c033dc5773511e15f2d517e7e1068", size = 247129, upload-time = "2025-06-17T14:15:21.665Z" }, + { url = "https://files.pythonhosted.org/packages/29/15/1568258cf0090bfa78d44be66247cfdb16e27dfd935c8136a1e8632d3057/multidict-6.5.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ff07b504c23b67f2044533244c230808a1258b3493aaf3ea2a0785f70b7be461", size = 243841, upload-time = "2025-06-17T14:15:23.38Z" }, + { url = "https://files.pythonhosted.org/packages/65/57/64af5dbcfd61427056e840c8e520b502879d480f9632fbe210929fd87393/multidict-6.5.0-cp313-cp313t-win32.whl", hash = "sha256:9232a117341e7e979d210e41c04e18f1dc3a1d251268df6c818f5334301274e1", size = 46761, upload-time = "2025-06-17T14:15:24.733Z" }, + { url = "https://files.pythonhosted.org/packages/26/a8/cac7f7d61e188ff44f28e46cb98f9cc21762e671c96e031f06c84a60556e/multidict-6.5.0-cp313-cp313t-win_amd64.whl", hash = "sha256:44cb5c53fb2d4cbcee70a768d796052b75d89b827643788a75ea68189f0980a1", size = 52112, upload-time = "2025-06-17T14:15:25.906Z" }, + { url = "https://files.pythonhosted.org/packages/51/9f/076533feb1b5488d22936da98b9c217205cfbf9f56f7174e8c5c86d86fe6/multidict-6.5.0-cp313-cp313t-win_arm64.whl", hash = "sha256:51d33fafa82640c0217391d4ce895d32b7e84a832b8aee0dcc1b04d8981ec7f4", size = 44358, upload-time = "2025-06-17T14:15:27.117Z" }, + { url = "https://files.pythonhosted.org/packages/44/d8/45e8fc9892a7386d074941429e033adb4640e59ff0780d96a8cf46fe788e/multidict-6.5.0-py3-none-any.whl", hash = "sha256:5634b35f225977605385f56153bd95a7133faffc0ffe12ad26e10517537e8dfc", size = 12181, upload-time = "2025-06-17T14:15:55.156Z" }, +] + +[[package]] +name = "numpy" +version = "1.26.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/65/6e/09db70a523a96d25e115e71cc56a6f9031e7b8cd166c1ac8438307c14058/numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010", size = 15786129, upload-time = "2024-02-06T00:26:44.495Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/12/8f2020a8e8b8383ac0177dc9570aad031a3beb12e38847f7129bacd96228/numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218", size = 20335901, upload-time = "2024-02-05T23:55:32.801Z" }, + { url = "https://files.pythonhosted.org/packages/75/5b/ca6c8bd14007e5ca171c7c03102d17b4f4e0ceb53957e8c44343a9546dcc/numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b", size = 13685868, upload-time = "2024-02-05T23:55:56.28Z" }, + { url = "https://files.pythonhosted.org/packages/79/f8/97f10e6755e2a7d027ca783f63044d5b1bc1ae7acb12afe6a9b4286eac17/numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b", size = 13925109, upload-time = "2024-02-05T23:56:20.368Z" }, + { url = "https://files.pythonhosted.org/packages/0f/50/de23fde84e45f5c4fda2488c759b69990fd4512387a8632860f3ac9cd225/numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed", size = 17950613, upload-time = "2024-02-05T23:56:56.054Z" }, + { url = "https://files.pythonhosted.org/packages/4c/0c/9c603826b6465e82591e05ca230dfc13376da512b25ccd0894709b054ed0/numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a", size = 13572172, upload-time = "2024-02-05T23:57:21.56Z" }, + { url = "https://files.pythonhosted.org/packages/76/8c/2ba3902e1a0fc1c74962ea9bb33a534bb05984ad7ff9515bf8d07527cadd/numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0", size = 17786643, upload-time = "2024-02-05T23:57:56.585Z" }, + { url = "https://files.pythonhosted.org/packages/28/4a/46d9e65106879492374999e76eb85f87b15328e06bd1550668f79f7b18c6/numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110", size = 5677803, upload-time = "2024-02-05T23:58:08.963Z" }, + { url = "https://files.pythonhosted.org/packages/16/2e/86f24451c2d530c88daf997cb8d6ac622c1d40d19f5a031ed68a4b73a374/numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818", size = 15517754, upload-time = "2024-02-05T23:58:36.364Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "pandas-stubs" +version = "2.2.3.250527" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "types-pytz" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5f/0d/5fe7f7f3596eb1c2526fea151e9470f86b379183d8b9debe44b2098651ca/pandas_stubs-2.2.3.250527.tar.gz", hash = "sha256:e2d694c4e72106055295ad143664e5c99e5815b07190d1ff85b73b13ff019e63", size = 106312, upload-time = "2025-05-27T15:24:29.716Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/f8/46141ba8c9d7064dc5008bfb4a6ae5bd3c30e4c61c28b5c5ed485bf358ba/pandas_stubs-2.2.3.250527-py3-none-any.whl", hash = "sha256:cd0a49a95b8c5f944e605be711042a4dd8550e2c559b43d70ba2c4b524b66163", size = 159683, upload-time = "2025-05-27T15:24:28.4Z" }, +] + +[[package]] +name = "pika-stubs" +version = "0.1.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c6/7a/0ce91b1507e1a88e104bddd2b64d47cc80a9eda53b7e74bb5a6038c926ae/pika-stubs-0.1.3.tar.gz", hash = "sha256:aaa78fa9f52eb3591b6073fbbe2607567405d1857be268d447bea252e22dd6cf", size = 15155, upload-time = "2020-06-10T02:19:59.923Z" } + +[[package]] +name = "pillow" +version = "11.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/af/cb/bb5c01fcd2a69335b86c22142b2bccfc3464087efb7fd382eee5ffc7fdf7/pillow-11.2.1.tar.gz", hash = "sha256:a64dd61998416367b7ef979b73d3a85853ba9bec4c2925f74e588879a58716b6", size = 47026707, upload-time = "2025-04-12T17:50:03.289Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/40/052610b15a1b8961f52537cc8326ca6a881408bc2bdad0d852edeb6ed33b/pillow-11.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:78afba22027b4accef10dbd5eed84425930ba41b3ea0a86fa8d20baaf19d807f", size = 3190185, upload-time = "2025-04-12T17:48:00.417Z" }, + { url = "https://files.pythonhosted.org/packages/e5/7e/b86dbd35a5f938632093dc40d1682874c33dcfe832558fc80ca56bfcb774/pillow-11.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78092232a4ab376a35d68c4e6d5e00dfd73454bd12b230420025fbe178ee3b0b", size = 3030306, upload-time = "2025-04-12T17:48:02.391Z" }, + { url = "https://files.pythonhosted.org/packages/a4/5c/467a161f9ed53e5eab51a42923c33051bf8d1a2af4626ac04f5166e58e0c/pillow-11.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25a5f306095c6780c52e6bbb6109624b95c5b18e40aab1c3041da3e9e0cd3e2d", size = 4416121, upload-time = "2025-04-12T17:48:04.554Z" }, + { url = "https://files.pythonhosted.org/packages/62/73/972b7742e38ae0e2ac76ab137ca6005dcf877480da0d9d61d93b613065b4/pillow-11.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c7b29dbd4281923a2bfe562acb734cee96bbb129e96e6972d315ed9f232bef4", size = 4501707, upload-time = "2025-04-12T17:48:06.831Z" }, + { url = "https://files.pythonhosted.org/packages/e4/3a/427e4cb0b9e177efbc1a84798ed20498c4f233abde003c06d2650a6d60cb/pillow-11.2.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3e645b020f3209a0181a418bffe7b4a93171eef6c4ef6cc20980b30bebf17b7d", size = 4522921, upload-time = "2025-04-12T17:48:09.229Z" }, + { url = "https://files.pythonhosted.org/packages/fe/7c/d8b1330458e4d2f3f45d9508796d7caf0c0d3764c00c823d10f6f1a3b76d/pillow-11.2.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b2dbea1012ccb784a65349f57bbc93730b96e85b42e9bf7b01ef40443db720b4", size = 4612523, upload-time = "2025-04-12T17:48:11.631Z" }, + { url = "https://files.pythonhosted.org/packages/b3/2f/65738384e0b1acf451de5a573d8153fe84103772d139e1e0bdf1596be2ea/pillow-11.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:da3104c57bbd72948d75f6a9389e6727d2ab6333c3617f0a89d72d4940aa0443", size = 4587836, upload-time = "2025-04-12T17:48:13.592Z" }, + { url = "https://files.pythonhosted.org/packages/6a/c5/e795c9f2ddf3debb2dedd0df889f2fe4b053308bb59a3cc02a0cd144d641/pillow-11.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:598174aef4589af795f66f9caab87ba4ff860ce08cd5bb447c6fc553ffee603c", size = 4669390, upload-time = "2025-04-12T17:48:15.938Z" }, + { url = "https://files.pythonhosted.org/packages/96/ae/ca0099a3995976a9fce2f423166f7bff9b12244afdc7520f6ed38911539a/pillow-11.2.1-cp312-cp312-win32.whl", hash = "sha256:1d535df14716e7f8776b9e7fee118576d65572b4aad3ed639be9e4fa88a1cad3", size = 2332309, upload-time = "2025-04-12T17:48:17.885Z" }, + { url = "https://files.pythonhosted.org/packages/7c/18/24bff2ad716257fc03da964c5e8f05d9790a779a8895d6566e493ccf0189/pillow-11.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:14e33b28bf17c7a38eede290f77db7c664e4eb01f7869e37fa98a5aa95978941", size = 2676768, upload-time = "2025-04-12T17:48:19.655Z" }, + { url = "https://files.pythonhosted.org/packages/da/bb/e8d656c9543276517ee40184aaa39dcb41e683bca121022f9323ae11b39d/pillow-11.2.1-cp312-cp312-win_arm64.whl", hash = "sha256:21e1470ac9e5739ff880c211fc3af01e3ae505859392bf65458c224d0bf283eb", size = 2415087, upload-time = "2025-04-12T17:48:21.991Z" }, + { url = "https://files.pythonhosted.org/packages/36/9c/447528ee3776e7ab8897fe33697a7ff3f0475bb490c5ac1456a03dc57956/pillow-11.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fdec757fea0b793056419bca3e9932eb2b0ceec90ef4813ea4c1e072c389eb28", size = 3190098, upload-time = "2025-04-12T17:48:23.915Z" }, + { url = "https://files.pythonhosted.org/packages/b5/09/29d5cd052f7566a63e5b506fac9c60526e9ecc553825551333e1e18a4858/pillow-11.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b0e130705d568e2f43a17bcbe74d90958e8a16263868a12c3e0d9c8162690830", size = 3030166, upload-time = "2025-04-12T17:48:25.738Z" }, + { url = "https://files.pythonhosted.org/packages/71/5d/446ee132ad35e7600652133f9c2840b4799bbd8e4adba881284860da0a36/pillow-11.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bdb5e09068332578214cadd9c05e3d64d99e0e87591be22a324bdbc18925be0", size = 4408674, upload-time = "2025-04-12T17:48:27.908Z" }, + { url = "https://files.pythonhosted.org/packages/69/5f/cbe509c0ddf91cc3a03bbacf40e5c2339c4912d16458fcb797bb47bcb269/pillow-11.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d189ba1bebfbc0c0e529159631ec72bb9e9bc041f01ec6d3233d6d82eb823bc1", size = 4496005, upload-time = "2025-04-12T17:48:29.888Z" }, + { url = "https://files.pythonhosted.org/packages/f9/b3/dd4338d8fb8a5f312021f2977fb8198a1184893f9b00b02b75d565c33b51/pillow-11.2.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:191955c55d8a712fab8934a42bfefbf99dd0b5875078240943f913bb66d46d9f", size = 4518707, upload-time = "2025-04-12T17:48:31.874Z" }, + { url = "https://files.pythonhosted.org/packages/13/eb/2552ecebc0b887f539111c2cd241f538b8ff5891b8903dfe672e997529be/pillow-11.2.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:ad275964d52e2243430472fc5d2c2334b4fc3ff9c16cb0a19254e25efa03a155", size = 4610008, upload-time = "2025-04-12T17:48:34.422Z" }, + { url = "https://files.pythonhosted.org/packages/72/d1/924ce51bea494cb6e7959522d69d7b1c7e74f6821d84c63c3dc430cbbf3b/pillow-11.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:750f96efe0597382660d8b53e90dd1dd44568a8edb51cb7f9d5d918b80d4de14", size = 4585420, upload-time = "2025-04-12T17:48:37.641Z" }, + { url = "https://files.pythonhosted.org/packages/43/ab/8f81312d255d713b99ca37479a4cb4b0f48195e530cdc1611990eb8fd04b/pillow-11.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fe15238d3798788d00716637b3d4e7bb6bde18b26e5d08335a96e88564a36b6b", size = 4667655, upload-time = "2025-04-12T17:48:39.652Z" }, + { url = "https://files.pythonhosted.org/packages/94/86/8f2e9d2dc3d308dfd137a07fe1cc478df0a23d42a6c4093b087e738e4827/pillow-11.2.1-cp313-cp313-win32.whl", hash = "sha256:3fe735ced9a607fee4f481423a9c36701a39719252a9bb251679635f99d0f7d2", size = 2332329, upload-time = "2025-04-12T17:48:41.765Z" }, + { url = "https://files.pythonhosted.org/packages/6d/ec/1179083b8d6067a613e4d595359b5fdea65d0a3b7ad623fee906e1b3c4d2/pillow-11.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:74ee3d7ecb3f3c05459ba95eed5efa28d6092d751ce9bf20e3e253a4e497e691", size = 2676388, upload-time = "2025-04-12T17:48:43.625Z" }, + { url = "https://files.pythonhosted.org/packages/23/f1/2fc1e1e294de897df39fa8622d829b8828ddad938b0eaea256d65b84dd72/pillow-11.2.1-cp313-cp313-win_arm64.whl", hash = "sha256:5119225c622403afb4b44bad4c1ca6c1f98eed79db8d3bc6e4e160fc6339d66c", size = 2414950, upload-time = "2025-04-12T17:48:45.475Z" }, + { url = "https://files.pythonhosted.org/packages/c4/3e/c328c48b3f0ead7bab765a84b4977acb29f101d10e4ef57a5e3400447c03/pillow-11.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8ce2e8411c7aaef53e6bb29fe98f28cd4fbd9a1d9be2eeea434331aac0536b22", size = 3192759, upload-time = "2025-04-12T17:48:47.866Z" }, + { url = "https://files.pythonhosted.org/packages/18/0e/1c68532d833fc8b9f404d3a642991441d9058eccd5606eab31617f29b6d4/pillow-11.2.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9ee66787e095127116d91dea2143db65c7bb1e232f617aa5957c0d9d2a3f23a7", size = 3033284, upload-time = "2025-04-12T17:48:50.189Z" }, + { url = "https://files.pythonhosted.org/packages/b7/cb/6faf3fb1e7705fd2db74e070f3bf6f88693601b0ed8e81049a8266de4754/pillow-11.2.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9622e3b6c1d8b551b6e6f21873bdcc55762b4b2126633014cea1803368a9aa16", size = 4445826, upload-time = "2025-04-12T17:48:52.346Z" }, + { url = "https://files.pythonhosted.org/packages/07/94/8be03d50b70ca47fb434a358919d6a8d6580f282bbb7af7e4aa40103461d/pillow-11.2.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63b5dff3a68f371ea06025a1a6966c9a1e1ee452fc8020c2cd0ea41b83e9037b", size = 4527329, upload-time = "2025-04-12T17:48:54.403Z" }, + { url = "https://files.pythonhosted.org/packages/fd/a4/bfe78777076dc405e3bd2080bc32da5ab3945b5a25dc5d8acaa9de64a162/pillow-11.2.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:31df6e2d3d8fc99f993fd253e97fae451a8db2e7207acf97859732273e108406", size = 4549049, upload-time = "2025-04-12T17:48:56.383Z" }, + { url = "https://files.pythonhosted.org/packages/65/4d/eaf9068dc687c24979e977ce5677e253624bd8b616b286f543f0c1b91662/pillow-11.2.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:062b7a42d672c45a70fa1f8b43d1d38ff76b63421cbbe7f88146b39e8a558d91", size = 4635408, upload-time = "2025-04-12T17:48:58.782Z" }, + { url = "https://files.pythonhosted.org/packages/1d/26/0fd443365d9c63bc79feb219f97d935cd4b93af28353cba78d8e77b61719/pillow-11.2.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4eb92eca2711ef8be42fd3f67533765d9fd043b8c80db204f16c8ea62ee1a751", size = 4614863, upload-time = "2025-04-12T17:49:00.709Z" }, + { url = "https://files.pythonhosted.org/packages/49/65/dca4d2506be482c2c6641cacdba5c602bc76d8ceb618fd37de855653a419/pillow-11.2.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f91ebf30830a48c825590aede79376cb40f110b387c17ee9bd59932c961044f9", size = 4692938, upload-time = "2025-04-12T17:49:02.946Z" }, + { url = "https://files.pythonhosted.org/packages/b3/92/1ca0c3f09233bd7decf8f7105a1c4e3162fb9142128c74adad0fb361b7eb/pillow-11.2.1-cp313-cp313t-win32.whl", hash = "sha256:e0b55f27f584ed623221cfe995c912c61606be8513bfa0e07d2c674b4516d9dd", size = 2335774, upload-time = "2025-04-12T17:49:04.889Z" }, + { url = "https://files.pythonhosted.org/packages/a5/ac/77525347cb43b83ae905ffe257bbe2cc6fd23acb9796639a1f56aa59d191/pillow-11.2.1-cp313-cp313t-win_amd64.whl", hash = "sha256:36d6b82164c39ce5482f649b437382c0fb2395eabc1e2b1702a6deb8ad647d6e", size = 2681895, upload-time = "2025-04-12T17:49:06.635Z" }, + { url = "https://files.pythonhosted.org/packages/67/32/32dc030cfa91ca0fc52baebbba2e009bb001122a1daa8b6a79ad830b38d3/pillow-11.2.1-cp313-cp313t-win_arm64.whl", hash = "sha256:225c832a13326e34f212d2072982bb1adb210e0cc0b153e688743018c94a2681", size = 2417234, upload-time = "2025-04-12T17:49:08.399Z" }, +] + +[[package]] +name = "proglog" +version = "0.1.12" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "tqdm" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c2/af/c108866c452eda1132f3d6b3cb6be2ae8430c97e9309f38ca9dbd430af37/proglog-0.1.12.tar.gz", hash = "sha256:361ee074721c277b89b75c061336cb8c5f287c92b043efa562ccf7866cda931c", size = 8794, upload-time = "2025-05-09T14:36:18.316Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/1b/f7ea6cde25621cd9236541c66ff018f4268012a534ec31032bcb187dc5e7/proglog-0.1.12-py3-none-any.whl", hash = "sha256:ccaafce51e80a81c65dc907a460c07ccb8ec1f78dc660cfd8f9ec3a22f01b84c", size = 6337, upload-time = "2025-05-09T14:36:16.798Z" }, +] + +[[package]] +name = "prompt-toolkit" +version = "3.0.51" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wcwidth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bb/6e/9d084c929dfe9e3bfe0c6a47e31f78a25c54627d64a66e884a8bf5474f1c/prompt_toolkit-3.0.51.tar.gz", hash = "sha256:931a162e3b27fc90c86f1b48bb1fb2c528c2761475e57c9c06de13311c7b54ed", size = 428940, upload-time = "2025-04-15T09:18:47.731Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/4f/5249960887b1fbe561d9ff265496d170b55a735b76724f10ef19f9e40716/prompt_toolkit-3.0.51-py3-none-any.whl", hash = "sha256:52742911fde84e2d423e2f9a4cf1de7d7ac4e51958f648d9540e0fb8db077b07", size = 387810, upload-time = "2025-04-15T09:18:44.753Z" }, +] + +[[package]] +name = "propcache" +version = "0.3.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a6/16/43264e4a779dd8588c21a70f0709665ee8f611211bdd2c87d952cfa7c776/propcache-0.3.2.tar.gz", hash = "sha256:20d7d62e4e7ef05f221e0db2856b979540686342e7dd9973b815599c7057e168", size = 44139, upload-time = "2025-06-09T22:56:06.081Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/42/9ca01b0a6f48e81615dca4765a8f1dd2c057e0540f6116a27dc5ee01dfb6/propcache-0.3.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8de106b6c84506b31c27168582cd3cb3000a6412c16df14a8628e5871ff83c10", size = 73674, upload-time = "2025-06-09T22:54:30.551Z" }, + { url = "https://files.pythonhosted.org/packages/af/6e/21293133beb550f9c901bbece755d582bfaf2176bee4774000bd4dd41884/propcache-0.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:28710b0d3975117239c76600ea351934ac7b5ff56e60953474342608dbbb6154", size = 43570, upload-time = "2025-06-09T22:54:32.296Z" }, + { url = "https://files.pythonhosted.org/packages/0c/c8/0393a0a3a2b8760eb3bde3c147f62b20044f0ddac81e9d6ed7318ec0d852/propcache-0.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce26862344bdf836650ed2487c3d724b00fbfec4233a1013f597b78c1cb73615", size = 43094, upload-time = "2025-06-09T22:54:33.929Z" }, + { url = "https://files.pythonhosted.org/packages/37/2c/489afe311a690399d04a3e03b069225670c1d489eb7b044a566511c1c498/propcache-0.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bca54bd347a253af2cf4544bbec232ab982f4868de0dd684246b67a51bc6b1db", size = 226958, upload-time = "2025-06-09T22:54:35.186Z" }, + { url = "https://files.pythonhosted.org/packages/9d/ca/63b520d2f3d418c968bf596839ae26cf7f87bead026b6192d4da6a08c467/propcache-0.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55780d5e9a2ddc59711d727226bb1ba83a22dd32f64ee15594b9392b1f544eb1", size = 234894, upload-time = "2025-06-09T22:54:36.708Z" }, + { url = "https://files.pythonhosted.org/packages/11/60/1d0ed6fff455a028d678df30cc28dcee7af77fa2b0e6962ce1df95c9a2a9/propcache-0.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:035e631be25d6975ed87ab23153db6a73426a48db688070d925aa27e996fe93c", size = 233672, upload-time = "2025-06-09T22:54:38.062Z" }, + { url = "https://files.pythonhosted.org/packages/37/7c/54fd5301ef38505ab235d98827207176a5c9b2aa61939b10a460ca53e123/propcache-0.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee6f22b6eaa39297c751d0e80c0d3a454f112f5c6481214fcf4c092074cecd67", size = 224395, upload-time = "2025-06-09T22:54:39.634Z" }, + { url = "https://files.pythonhosted.org/packages/ee/1a/89a40e0846f5de05fdc6779883bf46ba980e6df4d2ff8fb02643de126592/propcache-0.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ca3aee1aa955438c4dba34fc20a9f390e4c79967257d830f137bd5a8a32ed3b", size = 212510, upload-time = "2025-06-09T22:54:41.565Z" }, + { url = "https://files.pythonhosted.org/packages/5e/33/ca98368586c9566a6b8d5ef66e30484f8da84c0aac3f2d9aec6d31a11bd5/propcache-0.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7a4f30862869fa2b68380d677cc1c5fcf1e0f2b9ea0cf665812895c75d0ca3b8", size = 222949, upload-time = "2025-06-09T22:54:43.038Z" }, + { url = "https://files.pythonhosted.org/packages/ba/11/ace870d0aafe443b33b2f0b7efdb872b7c3abd505bfb4890716ad7865e9d/propcache-0.3.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:b77ec3c257d7816d9f3700013639db7491a434644c906a2578a11daf13176251", size = 217258, upload-time = "2025-06-09T22:54:44.376Z" }, + { url = "https://files.pythonhosted.org/packages/5b/d2/86fd6f7adffcfc74b42c10a6b7db721d1d9ca1055c45d39a1a8f2a740a21/propcache-0.3.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:cab90ac9d3f14b2d5050928483d3d3b8fb6b4018893fc75710e6aa361ecb2474", size = 213036, upload-time = "2025-06-09T22:54:46.243Z" }, + { url = "https://files.pythonhosted.org/packages/07/94/2d7d1e328f45ff34a0a284cf5a2847013701e24c2a53117e7c280a4316b3/propcache-0.3.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0b504d29f3c47cf6b9e936c1852246c83d450e8e063d50562115a6be6d3a2535", size = 227684, upload-time = "2025-06-09T22:54:47.63Z" }, + { url = "https://files.pythonhosted.org/packages/b7/05/37ae63a0087677e90b1d14710e532ff104d44bc1efa3b3970fff99b891dc/propcache-0.3.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:ce2ac2675a6aa41ddb2a0c9cbff53780a617ac3d43e620f8fd77ba1c84dcfc06", size = 234562, upload-time = "2025-06-09T22:54:48.982Z" }, + { url = "https://files.pythonhosted.org/packages/a4/7c/3f539fcae630408d0bd8bf3208b9a647ccad10976eda62402a80adf8fc34/propcache-0.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:62b4239611205294cc433845b914131b2a1f03500ff3c1ed093ed216b82621e1", size = 222142, upload-time = "2025-06-09T22:54:50.424Z" }, + { url = "https://files.pythonhosted.org/packages/7c/d2/34b9eac8c35f79f8a962546b3e97e9d4b990c420ee66ac8255d5d9611648/propcache-0.3.2-cp312-cp312-win32.whl", hash = "sha256:df4a81b9b53449ebc90cc4deefb052c1dd934ba85012aa912c7ea7b7e38b60c1", size = 37711, upload-time = "2025-06-09T22:54:52.072Z" }, + { url = "https://files.pythonhosted.org/packages/19/61/d582be5d226cf79071681d1b46b848d6cb03d7b70af7063e33a2787eaa03/propcache-0.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:7046e79b989d7fe457bb755844019e10f693752d169076138abf17f31380800c", size = 41479, upload-time = "2025-06-09T22:54:53.234Z" }, + { url = "https://files.pythonhosted.org/packages/dc/d1/8c747fafa558c603c4ca19d8e20b288aa0c7cda74e9402f50f31eb65267e/propcache-0.3.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ca592ed634a73ca002967458187109265e980422116c0a107cf93d81f95af945", size = 71286, upload-time = "2025-06-09T22:54:54.369Z" }, + { url = "https://files.pythonhosted.org/packages/61/99/d606cb7986b60d89c36de8a85d58764323b3a5ff07770a99d8e993b3fa73/propcache-0.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9ecb0aad4020e275652ba3975740f241bd12a61f1a784df044cf7477a02bc252", size = 42425, upload-time = "2025-06-09T22:54:55.642Z" }, + { url = "https://files.pythonhosted.org/packages/8c/96/ef98f91bbb42b79e9bb82bdd348b255eb9d65f14dbbe3b1594644c4073f7/propcache-0.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7f08f1cc28bd2eade7a8a3d2954ccc673bb02062e3e7da09bc75d843386b342f", size = 41846, upload-time = "2025-06-09T22:54:57.246Z" }, + { url = "https://files.pythonhosted.org/packages/5b/ad/3f0f9a705fb630d175146cd7b1d2bf5555c9beaed54e94132b21aac098a6/propcache-0.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1a342c834734edb4be5ecb1e9fb48cb64b1e2320fccbd8c54bf8da8f2a84c33", size = 208871, upload-time = "2025-06-09T22:54:58.975Z" }, + { url = "https://files.pythonhosted.org/packages/3a/38/2085cda93d2c8b6ec3e92af2c89489a36a5886b712a34ab25de9fbca7992/propcache-0.3.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a544caaae1ac73f1fecfae70ded3e93728831affebd017d53449e3ac052ac1e", size = 215720, upload-time = "2025-06-09T22:55:00.471Z" }, + { url = "https://files.pythonhosted.org/packages/61/c1/d72ea2dc83ac7f2c8e182786ab0fc2c7bd123a1ff9b7975bee671866fe5f/propcache-0.3.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310d11aa44635298397db47a3ebce7db99a4cc4b9bbdfcf6c98a60c8d5261cf1", size = 215203, upload-time = "2025-06-09T22:55:01.834Z" }, + { url = "https://files.pythonhosted.org/packages/af/81/b324c44ae60c56ef12007105f1460d5c304b0626ab0cc6b07c8f2a9aa0b8/propcache-0.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c1396592321ac83157ac03a2023aa6cc4a3cc3cfdecb71090054c09e5a7cce3", size = 206365, upload-time = "2025-06-09T22:55:03.199Z" }, + { url = "https://files.pythonhosted.org/packages/09/73/88549128bb89e66d2aff242488f62869014ae092db63ccea53c1cc75a81d/propcache-0.3.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cabf5b5902272565e78197edb682017d21cf3b550ba0460ee473753f28d23c1", size = 196016, upload-time = "2025-06-09T22:55:04.518Z" }, + { url = "https://files.pythonhosted.org/packages/b9/3f/3bdd14e737d145114a5eb83cb172903afba7242f67c5877f9909a20d948d/propcache-0.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0a2f2235ac46a7aa25bdeb03a9e7060f6ecbd213b1f9101c43b3090ffb971ef6", size = 205596, upload-time = "2025-06-09T22:55:05.942Z" }, + { url = "https://files.pythonhosted.org/packages/0f/ca/2f4aa819c357d3107c3763d7ef42c03980f9ed5c48c82e01e25945d437c1/propcache-0.3.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:92b69e12e34869a6970fd2f3da91669899994b47c98f5d430b781c26f1d9f387", size = 200977, upload-time = "2025-06-09T22:55:07.792Z" }, + { url = "https://files.pythonhosted.org/packages/cd/4a/e65276c7477533c59085251ae88505caf6831c0e85ff8b2e31ebcbb949b1/propcache-0.3.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:54e02207c79968ebbdffc169591009f4474dde3b4679e16634d34c9363ff56b4", size = 197220, upload-time = "2025-06-09T22:55:09.173Z" }, + { url = "https://files.pythonhosted.org/packages/7c/54/fc7152e517cf5578278b242396ce4d4b36795423988ef39bb8cd5bf274c8/propcache-0.3.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4adfb44cb588001f68c5466579d3f1157ca07f7504fc91ec87862e2b8e556b88", size = 210642, upload-time = "2025-06-09T22:55:10.62Z" }, + { url = "https://files.pythonhosted.org/packages/b9/80/abeb4a896d2767bf5f1ea7b92eb7be6a5330645bd7fb844049c0e4045d9d/propcache-0.3.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fd3e6019dc1261cd0291ee8919dd91fbab7b169bb76aeef6c716833a3f65d206", size = 212789, upload-time = "2025-06-09T22:55:12.029Z" }, + { url = "https://files.pythonhosted.org/packages/b3/db/ea12a49aa7b2b6d68a5da8293dcf50068d48d088100ac016ad92a6a780e6/propcache-0.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4c181cad81158d71c41a2bce88edce078458e2dd5ffee7eddd6b05da85079f43", size = 205880, upload-time = "2025-06-09T22:55:13.45Z" }, + { url = "https://files.pythonhosted.org/packages/d1/e5/9076a0bbbfb65d1198007059c65639dfd56266cf8e477a9707e4b1999ff4/propcache-0.3.2-cp313-cp313-win32.whl", hash = "sha256:8a08154613f2249519e549de2330cf8e2071c2887309a7b07fb56098f5170a02", size = 37220, upload-time = "2025-06-09T22:55:15.284Z" }, + { url = "https://files.pythonhosted.org/packages/d3/f5/b369e026b09a26cd77aa88d8fffd69141d2ae00a2abaaf5380d2603f4b7f/propcache-0.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:e41671f1594fc4ab0a6dec1351864713cb3a279910ae8b58f884a88a0a632c05", size = 40678, upload-time = "2025-06-09T22:55:16.445Z" }, + { url = "https://files.pythonhosted.org/packages/a4/3a/6ece377b55544941a08d03581c7bc400a3c8cd3c2865900a68d5de79e21f/propcache-0.3.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:9a3cf035bbaf035f109987d9d55dc90e4b0e36e04bbbb95af3055ef17194057b", size = 76560, upload-time = "2025-06-09T22:55:17.598Z" }, + { url = "https://files.pythonhosted.org/packages/0c/da/64a2bb16418740fa634b0e9c3d29edff1db07f56d3546ca2d86ddf0305e1/propcache-0.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:156c03d07dc1323d8dacaa221fbe028c5c70d16709cdd63502778e6c3ccca1b0", size = 44676, upload-time = "2025-06-09T22:55:18.922Z" }, + { url = "https://files.pythonhosted.org/packages/36/7b/f025e06ea51cb72c52fb87e9b395cced02786610b60a3ed51da8af017170/propcache-0.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74413c0ba02ba86f55cf60d18daab219f7e531620c15f1e23d95563f505efe7e", size = 44701, upload-time = "2025-06-09T22:55:20.106Z" }, + { url = "https://files.pythonhosted.org/packages/a4/00/faa1b1b7c3b74fc277f8642f32a4c72ba1d7b2de36d7cdfb676db7f4303e/propcache-0.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f066b437bb3fa39c58ff97ab2ca351db465157d68ed0440abecb21715eb24b28", size = 276934, upload-time = "2025-06-09T22:55:21.5Z" }, + { url = "https://files.pythonhosted.org/packages/74/ab/935beb6f1756e0476a4d5938ff44bf0d13a055fed880caf93859b4f1baf4/propcache-0.3.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1304b085c83067914721e7e9d9917d41ad87696bf70f0bc7dee450e9c71ad0a", size = 278316, upload-time = "2025-06-09T22:55:22.918Z" }, + { url = "https://files.pythonhosted.org/packages/f8/9d/994a5c1ce4389610838d1caec74bdf0e98b306c70314d46dbe4fcf21a3e2/propcache-0.3.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab50cef01b372763a13333b4e54021bdcb291fc9a8e2ccb9c2df98be51bcde6c", size = 282619, upload-time = "2025-06-09T22:55:24.651Z" }, + { url = "https://files.pythonhosted.org/packages/2b/00/a10afce3d1ed0287cef2e09506d3be9822513f2c1e96457ee369adb9a6cd/propcache-0.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fad3b2a085ec259ad2c2842666b2a0a49dea8463579c606426128925af1ed725", size = 265896, upload-time = "2025-06-09T22:55:26.049Z" }, + { url = "https://files.pythonhosted.org/packages/2e/a8/2aa6716ffa566ca57c749edb909ad27884680887d68517e4be41b02299f3/propcache-0.3.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:261fa020c1c14deafd54c76b014956e2f86991af198c51139faf41c4d5e83892", size = 252111, upload-time = "2025-06-09T22:55:27.381Z" }, + { url = "https://files.pythonhosted.org/packages/36/4f/345ca9183b85ac29c8694b0941f7484bf419c7f0fea2d1e386b4f7893eed/propcache-0.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:46d7f8aa79c927e5f987ee3a80205c987717d3659f035c85cf0c3680526bdb44", size = 268334, upload-time = "2025-06-09T22:55:28.747Z" }, + { url = "https://files.pythonhosted.org/packages/3e/ca/fcd54f78b59e3f97b3b9715501e3147f5340167733d27db423aa321e7148/propcache-0.3.2-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:6d8f3f0eebf73e3c0ff0e7853f68be638b4043c65a70517bb575eff54edd8dbe", size = 255026, upload-time = "2025-06-09T22:55:30.184Z" }, + { url = "https://files.pythonhosted.org/packages/8b/95/8e6a6bbbd78ac89c30c225210a5c687790e532ba4088afb8c0445b77ef37/propcache-0.3.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:03c89c1b14a5452cf15403e291c0ccd7751d5b9736ecb2c5bab977ad6c5bcd81", size = 250724, upload-time = "2025-06-09T22:55:31.646Z" }, + { url = "https://files.pythonhosted.org/packages/ee/b0/0dd03616142baba28e8b2d14ce5df6631b4673850a3d4f9c0f9dd714a404/propcache-0.3.2-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:0cc17efde71e12bbaad086d679ce575268d70bc123a5a71ea7ad76f70ba30bba", size = 268868, upload-time = "2025-06-09T22:55:33.209Z" }, + { url = "https://files.pythonhosted.org/packages/c5/98/2c12407a7e4fbacd94ddd32f3b1e3d5231e77c30ef7162b12a60e2dd5ce3/propcache-0.3.2-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:acdf05d00696bc0447e278bb53cb04ca72354e562cf88ea6f9107df8e7fd9770", size = 271322, upload-time = "2025-06-09T22:55:35.065Z" }, + { url = "https://files.pythonhosted.org/packages/35/91/9cb56efbb428b006bb85db28591e40b7736847b8331d43fe335acf95f6c8/propcache-0.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4445542398bd0b5d32df908031cb1b30d43ac848e20470a878b770ec2dcc6330", size = 265778, upload-time = "2025-06-09T22:55:36.45Z" }, + { url = "https://files.pythonhosted.org/packages/9a/4c/b0fe775a2bdd01e176b14b574be679d84fc83958335790f7c9a686c1f468/propcache-0.3.2-cp313-cp313t-win32.whl", hash = "sha256:f86e5d7cd03afb3a1db8e9f9f6eff15794e79e791350ac48a8c924e6f439f394", size = 41175, upload-time = "2025-06-09T22:55:38.436Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ff/47f08595e3d9b5e149c150f88d9714574f1a7cbd89fe2817158a952674bf/propcache-0.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:9704bedf6e7cbe3c65eca4379a9b53ee6a83749f047808cbb5044d40d7d72198", size = 44857, upload-time = "2025-06-09T22:55:39.687Z" }, + { url = "https://files.pythonhosted.org/packages/cc/35/cc0aaecf278bb4575b8555f2b137de5ab821595ddae9da9d3cd1da4072c7/propcache-0.3.2-py3-none-any.whl", hash = "sha256:98f1ec44fb675f5052cccc8e609c46ed23a35a1cfd18545ad4e29002d858a43f", size = 12663, upload-time = "2025-06-09T22:56:04.484Z" }, +] + +[[package]] +name = "protobuf" +version = "5.29.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/29/d09e70352e4e88c9c7a198d5645d7277811448d76c23b00345670f7c8a38/protobuf-5.29.5.tar.gz", hash = "sha256:bc1463bafd4b0929216c35f437a8e28731a2b7fe3d98bb77a600efced5a15c84", size = 425226, upload-time = "2025-05-28T23:51:59.82Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/11/6e40e9fc5bba02988a214c07cf324595789ca7820160bfd1f8be96e48539/protobuf-5.29.5-cp310-abi3-win32.whl", hash = "sha256:3f1c6468a2cfd102ff4703976138844f78ebd1fb45f49011afc5139e9e283079", size = 422963, upload-time = "2025-05-28T23:51:41.204Z" }, + { url = "https://files.pythonhosted.org/packages/81/7f/73cefb093e1a2a7c3ffd839e6f9fcafb7a427d300c7f8aef9c64405d8ac6/protobuf-5.29.5-cp310-abi3-win_amd64.whl", hash = "sha256:3f76e3a3675b4a4d867b52e4a5f5b78a2ef9565549d4037e06cf7b0942b1d3fc", size = 434818, upload-time = "2025-05-28T23:51:44.297Z" }, + { url = "https://files.pythonhosted.org/packages/dd/73/10e1661c21f139f2c6ad9b23040ff36fee624310dc28fba20d33fdae124c/protobuf-5.29.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e38c5add5a311f2a6eb0340716ef9b039c1dfa428b28f25a7838ac329204a671", size = 418091, upload-time = "2025-05-28T23:51:45.907Z" }, + { url = "https://files.pythonhosted.org/packages/6c/04/98f6f8cf5b07ab1294c13f34b4e69b3722bb609c5b701d6c169828f9f8aa/protobuf-5.29.5-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:fa18533a299d7ab6c55a238bf8629311439995f2e7eca5caaff08663606e9015", size = 319824, upload-time = "2025-05-28T23:51:47.545Z" }, + { url = "https://files.pythonhosted.org/packages/85/e4/07c80521879c2d15f321465ac24c70efe2381378c00bf5e56a0f4fbac8cd/protobuf-5.29.5-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:63848923da3325e1bf7e9003d680ce6e14b07e55d0473253a690c3a8b8fd6e61", size = 319942, upload-time = "2025-05-28T23:51:49.11Z" }, + { url = "https://files.pythonhosted.org/packages/7e/cc/7e77861000a0691aeea8f4566e5d3aa716f2b1dece4a24439437e41d3d25/protobuf-5.29.5-py3-none-any.whl", hash = "sha256:6cf42630262c59b2d8de33954443d94b746c952b01434fc58a417fdbd2e84bd5", size = 172823, upload-time = "2025-05-28T23:51:58.157Z" }, +] + +[[package]] +name = "pycparser" +version = "2.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736, upload-time = "2024-03-30T13:22:22.564Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552, upload-time = "2024-03-30T13:22:20.476Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-dotenv" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/88/2c/7bb1416c5620485aa793f2de31d3df393d3686aa8a8506d11e10e13c5baf/python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5", size = 39920, upload-time = "2025-03-25T10:14:56.835Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/18/98a99ad95133c6a6e2005fe89faedf294a748bd5dc803008059409ac9b1e/python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d", size = 20256, upload-time = "2025-03-25T10:14:55.034Z" }, +] + +[[package]] +name = "python-multipart" +version = "0.0.20" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/87/f44d7c9f274c7ee665a29b885ec97089ec5dc034c7f3fafa03da9e39a09e/python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13", size = 37158, upload-time = "2024-12-16T19:45:46.972Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" }, +] + +[[package]] +name = "python-rapidjson" +version = "1.20" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/47/2a/2510836a65a1fc40c923393611896c3c8ad1e2f583ed0c32cf0bb48cc378/python_rapidjson-1.20.tar.gz", hash = "sha256:115f08c86d2df7543c02605e77c84727cdabc4b08310d2f097e953efeaaa73eb", size = 238158, upload-time = "2024-08-05T17:00:29.91Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/d1/40616f40499f8f61e83135aa078a0ba7d392e7ea63c016c7cc544ecb7344/python_rapidjson-1.20-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6056fcc8caeb9b04775bf655568bba362c7670ab792c1b438671bb056db954cd", size = 230104, upload-time = "2024-08-05T17:55:27.252Z" }, + { url = "https://files.pythonhosted.org/packages/ea/2f/d28f4da4df83cfeb60fb7b84396a9c3678a0ac615012dc234d5b962fbaaf/python_rapidjson-1.20-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:225bd4cbabfe7910261cbcebb8b811d4ff98e90cdd17c233b916c6aa71a9553f", size = 211105, upload-time = "2024-08-05T17:55:28.869Z" }, + { url = "https://files.pythonhosted.org/packages/b3/60/ebc521afbdb626bb571a815378831f685213cb6b98ffe08176fe3191c5a3/python_rapidjson-1.20-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:026077b663acf93a3f2b1adb87282e611a30214b8ae8001b7e4863a3b978e646", size = 1650309, upload-time = "2024-08-05T17:55:30.917Z" }, + { url = "https://files.pythonhosted.org/packages/19/da/4c375b90c54091e93a600fca06a9f3b8456b0e09050e862e998fc22b6385/python_rapidjson-1.20-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:884e1dd4c0770ed424737941af4d5dc9014995f9c33595f151af13f83ce282c3", size = 1700043, upload-time = "2024-08-05T17:55:33.244Z" }, + { url = "https://files.pythonhosted.org/packages/bc/6e/2718413e7bc300523c5d4eaa25418059d8b17effa9aef2f2ae370493b861/python_rapidjson-1.20-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f55531c8197cb7a21a5ef0ffa46f2b8fc8c5fe7c6fd08bdbd2063ae65d2ff65", size = 1700523, upload-time = "2024-08-05T17:55:35.751Z" }, + { url = "https://files.pythonhosted.org/packages/32/fe/d96e996f9c5140d3ce93d440f871a1b336f1c14fae27b64d4872fc58d45d/python_rapidjson-1.20-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c60121d155562dc694c05ed7df4e39e42ee1d3adff2a060c64a004498e6451f7", size = 1598383, upload-time = "2024-08-05T17:55:37.243Z" }, + { url = "https://files.pythonhosted.org/packages/46/32/ef3a381641b803e1b67c9b9c360d161b650620605768652e704fb35ad2b9/python_rapidjson-1.20-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3a6620eed0b04196f37fab7048c1d672d03391bb29d7f09ee8fee8dea33f11f4", size = 2454134, upload-time = "2024-08-05T17:55:39.04Z" }, + { url = "https://files.pythonhosted.org/packages/2f/50/771826d3f217b7c597f14df0dfa943d9e6f2f14749d974de4402f56ce39a/python_rapidjson-1.20-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ddb63eff401ce7cf20cdd5e21942fc23fbe0e1dc1d96d7ae838645fb1f74fb47", size = 2585576, upload-time = "2024-08-05T17:55:40.689Z" }, + { url = "https://files.pythonhosted.org/packages/64/95/f3e7ed53c9ab27a99c876c42b7d1994312e6fd2c2d8131ce849bd4275be8/python_rapidjson-1.20-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:05e28c3dbb4a0d74ec13af9668ef2b9f302edf83cf7ce1d8316a95364720eec0", size = 2599382, upload-time = "2024-08-05T17:55:43.111Z" }, + { url = "https://files.pythonhosted.org/packages/bc/4c/34778932d0145fdc7087274cd4c0fa421a96acbc96bf9860cbdf3e389dcd/python_rapidjson-1.20-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b733978ecd84fc5df9a778ce821dc1f3113f7bfc2493cac0bb17efb4ae0bb8fa", size = 2537066, upload-time = "2024-08-05T17:55:45.738Z" }, + { url = "https://files.pythonhosted.org/packages/50/16/dfef47ec507d5a5d00281b8db8526d5c36b715afeeae0ceeef4030f1640f/python_rapidjson-1.20-cp312-cp312-win32.whl", hash = "sha256:d87041448cec00e2db5d858625a76dc1b59eef6691a039acff6d92ad8581cfc1", size = 128358, upload-time = "2024-08-05T17:55:48.108Z" }, + { url = "https://files.pythonhosted.org/packages/bc/97/42a550a79ab90ab37fcd8b519cd71bba4b96b85679218100d63b437770c0/python_rapidjson-1.20-cp312-cp312-win_amd64.whl", hash = "sha256:5d3be149ce5475f9605f01240487541057792abad94d3fd0cd56af363cf5a4dc", size = 149067, upload-time = "2024-08-05T17:55:49.834Z" }, + { url = "https://files.pythonhosted.org/packages/18/04/47d9d10c3fa6e57af9462792088187605a07d88ad6f6f2e193fb01eff0fc/python_rapidjson-1.20-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:daee815b4c20ca6e4dbc6bde373dd3f65b53813d775f1c94b765b33b402513a7", size = 229315, upload-time = "2024-08-05T17:55:51.263Z" }, + { url = "https://files.pythonhosted.org/packages/9a/3a/0c4e0af51d7356d9efdef1bf1785d9d9f9e0789a7d2844cc3e9b35ef383f/python_rapidjson-1.20-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:083df379c769b30f9bc40041c91fd9d8f7bb8ca2b3c7170258842aced2098e05", size = 211111, upload-time = "2024-08-05T17:55:52.707Z" }, + { url = "https://files.pythonhosted.org/packages/83/e1/e253de9a774d021f9a6947f845628fae8237f441c63198e8a72e5906d31f/python_rapidjson-1.20-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9399ad75a2e3377f9e6208caabe73eb9354cd01b732407475ccadcd42c577df", size = 1650131, upload-time = "2024-08-05T17:55:54.302Z" }, + { url = "https://files.pythonhosted.org/packages/3e/93/8f723c7f7be055086d6bec2ba9e5ef13e749c3fb3ad5a3dc1d740acee889/python_rapidjson-1.20-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:599ab208ccf6172d6cfac1abe048c837e62612f91f97d198e32773c45346a0b4", size = 1699873, upload-time = "2024-08-05T17:55:55.967Z" }, + { url = "https://files.pythonhosted.org/packages/7d/2e/eb7255601b81a5b70f2bff05caab136e191b66825c16db3e7db1bdaa8314/python_rapidjson-1.20-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf3c0e2a5b97b0d07311f15f0dce4434e43dec865c3794ad1b10d968460fd665", size = 1700484, upload-time = "2024-08-05T17:55:57.846Z" }, + { url = "https://files.pythonhosted.org/packages/90/54/23d8b595dd4fdbdaa6c5f723a4df7a7be78aa702aa0b6dac6c964e6e6d30/python_rapidjson-1.20-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8064b8edb57ddd9e3ffa539cf2ec2f03515751fb0698b40ba5cb66a2123af19", size = 1598344, upload-time = "2024-08-05T17:55:59.586Z" }, + { url = "https://files.pythonhosted.org/packages/3d/3a/3628e199a826e7bc598633ce895516981602ab1d8fce76359005f90ca488/python_rapidjson-1.20-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bc79d7f00f7538e027960ca6bcd1e03ed99fcf660d4d882d1c22f641155d0db0", size = 2454206, upload-time = "2024-08-05T17:56:01.556Z" }, + { url = "https://files.pythonhosted.org/packages/ed/19/eef8629f73b1af21fa778d140e68e72076fe5746357426d6716a0c411dd2/python_rapidjson-1.20-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:87aa0b01b8c20984844f1440b8ff6bdb32de911a1750fed344b9daed33b4b52b", size = 2585553, upload-time = "2024-08-05T17:56:03.637Z" }, + { url = "https://files.pythonhosted.org/packages/d8/9d/217e56c74a65cfaf4441b26b6206b924b41fb339f98776a74e60dd287b46/python_rapidjson-1.20-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4099cb9eae8a0ce19c09e02729eb6d69d5180424f13a2641a6c407d053e47a82", size = 2599513, upload-time = "2024-08-05T17:56:05.795Z" }, + { url = "https://files.pythonhosted.org/packages/54/f6/4d40189f14e4fa5526a91aad9944864c8a4eebc0257e0314a331f3c64170/python_rapidjson-1.20-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4c680cd2b4de760ff6875de71fe6a87bd610aa116593d62e4f81a563be86ae18", size = 2537192, upload-time = "2024-08-05T17:56:07.53Z" }, + { url = "https://files.pythonhosted.org/packages/ee/30/f3f40abfd8d7f0586b88ccfcd747f2e227fe589c16fbb485b1e238d8e641/python_rapidjson-1.20-cp313-cp313-win32.whl", hash = "sha256:9e431a7afc77aa874fed537c9f6bf5fcecaef124ebeae2a2379d3b9e9adce74b", size = 128362, upload-time = "2024-08-05T17:56:09.332Z" }, + { url = "https://files.pythonhosted.org/packages/94/df/7126352e55cb72a5ca99630bd44ffb11bbf61ee35f4e1f34d203a77597c5/python_rapidjson-1.20-cp313-cp313-win_amd64.whl", hash = "sha256:7444bc7e6a04c03d6ed748b5dab0798fa2b3f2b303be8c38d3af405b2cac6d63", size = 149072, upload-time = "2024-08-05T17:56:10.625Z" }, +] + +[[package]] +name = "pywin32" +version = "310" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6b/ec/4fdbe47932f671d6e348474ea35ed94227fb5df56a7c30cbbb42cd396ed0/pywin32-310-cp312-cp312-win32.whl", hash = "sha256:8a75a5cc3893e83a108c05d82198880704c44bbaee4d06e442e471d3c9ea4f3d", size = 8796239, upload-time = "2025-03-17T00:55:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/e3/e5/b0627f8bb84e06991bea89ad8153a9e50ace40b2e1195d68e9dff6b03d0f/pywin32-310-cp312-cp312-win_amd64.whl", hash = "sha256:bf5c397c9a9a19a6f62f3fb821fbf36cac08f03770056711f765ec1503972060", size = 9503839, upload-time = "2025-03-17T00:56:00.8Z" }, + { url = "https://files.pythonhosted.org/packages/1f/32/9ccf53748df72301a89713936645a664ec001abd35ecc8578beda593d37d/pywin32-310-cp312-cp312-win_arm64.whl", hash = "sha256:2349cc906eae872d0663d4d6290d13b90621eaf78964bb1578632ff20e152966", size = 8459470, upload-time = "2025-03-17T00:56:02.601Z" }, + { url = "https://files.pythonhosted.org/packages/1c/09/9c1b978ffc4ae53999e89c19c77ba882d9fce476729f23ef55211ea1c034/pywin32-310-cp313-cp313-win32.whl", hash = "sha256:5d241a659c496ada3253cd01cfaa779b048e90ce4b2b38cd44168ad555ce74ab", size = 8794384, upload-time = "2025-03-17T00:56:04.383Z" }, + { url = "https://files.pythonhosted.org/packages/45/3c/b4640f740ffebadd5d34df35fecba0e1cfef8fde9f3e594df91c28ad9b50/pywin32-310-cp313-cp313-win_amd64.whl", hash = "sha256:667827eb3a90208ddbdcc9e860c81bde63a135710e21e4cb3348968e4bd5249e", size = 9503039, upload-time = "2025-03-17T00:56:06.207Z" }, + { url = "https://files.pythonhosted.org/packages/b4/f4/f785020090fb050e7fb6d34b780f2231f302609dc964672f72bfaeb59a28/pywin32-310-cp313-cp313-win_arm64.whl", hash = "sha256:e308f831de771482b7cf692a1f308f8fca701b2d8f9dde6cc440c7da17e47b33", size = 8458152, upload-time = "2025-03-17T00:56:07.819Z" }, +] + +[[package]] +name = "requests" +version = "2.32.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e1/0a/929373653770d8a0d7ea76c37de6e41f11eb07559b103b1c02cafb3f7cf8/requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422", size = 135258, upload-time = "2025-06-09T16:43:07.34Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/e4/56027c4a6b4ae70ca9de302488c5ca95ad4a39e190093d6c1a8ace08341b/requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c", size = 64847, upload-time = "2025-06-09T16:43:05.728Z" }, +] + +[[package]] +name = "setuptools" +version = "80.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958, upload-time = "2025-05-27T00:56:51.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "tqdm" +version = "4.67.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" }, +] + +[[package]] +name = "trinity-client-aida" +version = "0.1.0" +source = { virtual = "." } +dependencies = [ + { name = "apscheduler" }, + { name = "celery" }, + { name = "geventhttpclient" }, + { name = "google-search-results" }, + { name = "moviepy" }, + { name = "numpy" }, + { name = "pandas-stubs" }, + { name = "pika-stubs" }, + { name = "python-multipart" }, + { name = "tritonclient", extra = ["all"] }, + { name = "types-urllib3" }, +] + +[package.metadata] +requires-dist = [ + { name = "apscheduler", specifier = ">=3.11.0" }, + { name = "celery", specifier = ">=5.5.3" }, + { name = "geventhttpclient", specifier = ">=2.3.4" }, + { name = "google-search-results", specifier = ">=2.4.2" }, + { name = "moviepy", specifier = ">=2.2.1" }, + { name = "numpy", specifier = "==1.26.4" }, + { name = "pandas-stubs", specifier = "==2.2.3.250527" }, + { name = "pika-stubs", specifier = "==0.1.3" }, + { name = "python-multipart", specifier = ">=0.0.20" }, + { name = "tritonclient", extras = ["all"], specifier = ">=2.58.0" }, + { name = "types-urllib3", specifier = "==1.26.25.14" }, +] + +[[package]] +name = "tritonclient" +version = "2.58.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "python-rapidjson" }, + { name = "urllib3" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/c8/7ef73066476d54e505ebed7435293469a2358f3a173106d2ff9eee70b91f/tritonclient-2.58.0-py3-none-any.whl", hash = "sha256:f456c3d982cc4f0eaaac49e1175a0e86f20fa810d2afc44c0fae3587a97ba67c", size = 98240, upload-time = "2025-05-31T22:07:25.032Z" }, + { url = "https://files.pythonhosted.org/packages/24/d5/c4ae120fba6621c88c270878e5bffd81c88cf870dd82011e35c2f625cd35/tritonclient-2.58.0-py3-none-manylinux1_x86_64.whl", hash = "sha256:671bf5c6fe441191904978f947ae7db72e4f82b9894301d3c62219a5a2f0c4ee", size = 14450319, upload-time = "2025-05-31T22:07:42.837Z" }, + { url = "https://files.pythonhosted.org/packages/54/8a/9f10a452a0dcc1156434d93624f1d88351ac0547b990e8b5220c89839de3/tritonclient-2.58.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0e25fc64ffaca89ec9b76f8a016a8f0e7391c11b3db334daef1dcc77ce1ca493", size = 13629753, upload-time = "2025-05-31T22:08:07.676Z" }, +] + +[package.optional-dependencies] +all = [ + { name = "aiohttp" }, + { name = "cuda-python" }, + { name = "geventhttpclient" }, + { name = "grpcio" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "protobuf" }, + { name = "python-rapidjson" }, +] + +[[package]] +name = "types-pytz" +version = "2025.2.0.20250516" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bd/72/b0e711fd90409f5a76c75349055d3eb19992c110f0d2d6aabbd6cfbc14bf/types_pytz-2025.2.0.20250516.tar.gz", hash = "sha256:e1216306f8c0d5da6dafd6492e72eb080c9a166171fa80dd7a1990fd8be7a7b3", size = 10940, upload-time = "2025-05-16T03:07:01.91Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/ba/e205cd11c1c7183b23c97e4bcd1de7bc0633e2e867601c32ecfc6ad42675/types_pytz-2025.2.0.20250516-py3-none-any.whl", hash = "sha256:e0e0c8a57e2791c19f718ed99ab2ba623856b11620cb6b637e5f62ce285a7451", size = 10136, upload-time = "2025-05-16T03:07:01.075Z" }, +] + +[[package]] +name = "types-urllib3" +version = "1.26.25.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/73/de/b9d7a68ad39092368fb21dd6194b362b98a1daeea5dcfef5e1adb5031c7e/types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f", size = 11239, upload-time = "2023-07-20T15:19:31.307Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/7b/3fc711b2efea5e85a7a0bbfe269ea944aa767bbba5ec52f9ee45d362ccf3/types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e", size = 15377, upload-time = "2023-07-20T15:19:30.379Z" }, +] + +[[package]] +name = "tzdata" +version = "2025.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/32/1a225d6164441be760d75c2c42e2780dc0873fe382da3e98a2e1e48361e5/tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9", size = 196380, upload-time = "2025-03-23T13:54:43.652Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" }, +] + +[[package]] +name = "tzlocal" +version = "5.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "tzdata", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8b/2e/c14812d3d4d9cd1773c6be938f89e5735a1f11a9f184ac3639b93cef35d5/tzlocal-5.3.1.tar.gz", hash = "sha256:cceffc7edecefea1f595541dbd6e990cb1ea3d19bf01b2809f362a03dd7921fd", size = 30761, upload-time = "2025-03-05T21:17:41.549Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/14/e2a54fabd4f08cd7af1c07030603c3356b74da07f7cc056e600436edfa17/tzlocal-5.3.1-py3-none-any.whl", hash = "sha256:eb1a66c3ef5847adf7a834f1be0800581b683b5608e74f86ecbcef8ab91bb85d", size = 18026, upload-time = "2025-03-05T21:17:39.857Z" }, +] + +[[package]] +name = "urllib3" +version = "2.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" }, +] + +[[package]] +name = "vine" +version = "5.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bd/e4/d07b5f29d283596b9727dd5275ccbceb63c44a1a82aa9e4bfd20426762ac/vine-5.1.0.tar.gz", hash = "sha256:8b62e981d35c41049211cf62a0a1242d8c1ee9bd15bb196ce38aefd6799e61e0", size = 48980, upload-time = "2023-11-05T08:46:53.857Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/ff/7c0c86c43b3cbb927e0ccc0255cb4057ceba4799cd44ae95174ce8e8b5b2/vine-5.1.0-py3-none-any.whl", hash = "sha256:40fdf3c48b2cfe1c38a49e9ae2da6fda88e4794c810050a728bd7413811fb1dc", size = 9636, upload-time = "2023-11-05T08:46:51.205Z" }, +] + +[[package]] +name = "wcwidth" +version = "0.2.13" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301, upload-time = "2024-01-06T02:10:57.829Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166, upload-time = "2024-01-06T02:10:55.763Z" }, +] + +[[package]] +name = "yarl" +version = "1.20.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "multidict" }, + { name = "propcache" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3c/fb/efaa23fa4e45537b827620f04cf8f3cd658b76642205162e072703a5b963/yarl-1.20.1.tar.gz", hash = "sha256:d017a4997ee50c91fd5466cef416231bb82177b93b029906cefc542ce14c35ac", size = 186428, upload-time = "2025-06-10T00:46:09.923Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/9a/cb7fad7d73c69f296eda6815e4a2c7ed53fc70c2f136479a91c8e5fbdb6d/yarl-1.20.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdcc4cd244e58593a4379fe60fdee5ac0331f8eb70320a24d591a3be197b94a9", size = 133667, upload-time = "2025-06-10T00:43:44.369Z" }, + { url = "https://files.pythonhosted.org/packages/67/38/688577a1cb1e656e3971fb66a3492501c5a5df56d99722e57c98249e5b8a/yarl-1.20.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b29a2c385a5f5b9c7d9347e5812b6f7ab267193c62d282a540b4fc528c8a9d2a", size = 91025, upload-time = "2025-06-10T00:43:46.295Z" }, + { url = "https://files.pythonhosted.org/packages/50/ec/72991ae51febeb11a42813fc259f0d4c8e0507f2b74b5514618d8b640365/yarl-1.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1112ae8154186dfe2de4732197f59c05a83dc814849a5ced892b708033f40dc2", size = 89709, upload-time = "2025-06-10T00:43:48.22Z" }, + { url = "https://files.pythonhosted.org/packages/99/da/4d798025490e89426e9f976702e5f9482005c548c579bdae792a4c37769e/yarl-1.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90bbd29c4fe234233f7fa2b9b121fb63c321830e5d05b45153a2ca68f7d310ee", size = 352287, upload-time = "2025-06-10T00:43:49.924Z" }, + { url = "https://files.pythonhosted.org/packages/1a/26/54a15c6a567aac1c61b18aa0f4b8aa2e285a52d547d1be8bf48abe2b3991/yarl-1.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:680e19c7ce3710ac4cd964e90dad99bf9b5029372ba0c7cbfcd55e54d90ea819", size = 345429, upload-time = "2025-06-10T00:43:51.7Z" }, + { url = "https://files.pythonhosted.org/packages/d6/95/9dcf2386cb875b234353b93ec43e40219e14900e046bf6ac118f94b1e353/yarl-1.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a979218c1fdb4246a05efc2cc23859d47c89af463a90b99b7c56094daf25a16", size = 365429, upload-time = "2025-06-10T00:43:53.494Z" }, + { url = "https://files.pythonhosted.org/packages/91/b2/33a8750f6a4bc224242a635f5f2cff6d6ad5ba651f6edcccf721992c21a0/yarl-1.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255b468adf57b4a7b65d8aad5b5138dce6a0752c139965711bdcb81bc370e1b6", size = 363862, upload-time = "2025-06-10T00:43:55.766Z" }, + { url = "https://files.pythonhosted.org/packages/98/28/3ab7acc5b51f4434b181b0cee8f1f4b77a65919700a355fb3617f9488874/yarl-1.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a97d67108e79cfe22e2b430d80d7571ae57d19f17cda8bb967057ca8a7bf5bfd", size = 355616, upload-time = "2025-06-10T00:43:58.056Z" }, + { url = "https://files.pythonhosted.org/packages/36/a3/f666894aa947a371724ec7cd2e5daa78ee8a777b21509b4252dd7bd15e29/yarl-1.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8570d998db4ddbfb9a590b185a0a33dbf8aafb831d07a5257b4ec9948df9cb0a", size = 339954, upload-time = "2025-06-10T00:43:59.773Z" }, + { url = "https://files.pythonhosted.org/packages/f1/81/5f466427e09773c04219d3450d7a1256138a010b6c9f0af2d48565e9ad13/yarl-1.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:97c75596019baae7c71ccf1d8cc4738bc08134060d0adfcbe5642f778d1dca38", size = 365575, upload-time = "2025-06-10T00:44:02.051Z" }, + { url = "https://files.pythonhosted.org/packages/2e/e3/e4b0ad8403e97e6c9972dd587388940a032f030ebec196ab81a3b8e94d31/yarl-1.20.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1c48912653e63aef91ff988c5432832692ac5a1d8f0fb8a33091520b5bbe19ef", size = 365061, upload-time = "2025-06-10T00:44:04.196Z" }, + { url = "https://files.pythonhosted.org/packages/ac/99/b8a142e79eb86c926f9f06452eb13ecb1bb5713bd01dc0038faf5452e544/yarl-1.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4c3ae28f3ae1563c50f3d37f064ddb1511ecc1d5584e88c6b7c63cf7702a6d5f", size = 364142, upload-time = "2025-06-10T00:44:06.527Z" }, + { url = "https://files.pythonhosted.org/packages/34/f2/08ed34a4a506d82a1a3e5bab99ccd930a040f9b6449e9fd050320e45845c/yarl-1.20.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c5e9642f27036283550f5f57dc6156c51084b458570b9d0d96100c8bebb186a8", size = 381894, upload-time = "2025-06-10T00:44:08.379Z" }, + { url = "https://files.pythonhosted.org/packages/92/f8/9a3fbf0968eac704f681726eff595dce9b49c8a25cd92bf83df209668285/yarl-1.20.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2c26b0c49220d5799f7b22c6838409ee9bc58ee5c95361a4d7831f03cc225b5a", size = 383378, upload-time = "2025-06-10T00:44:10.51Z" }, + { url = "https://files.pythonhosted.org/packages/af/85/9363f77bdfa1e4d690957cd39d192c4cacd1c58965df0470a4905253b54f/yarl-1.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:564ab3d517e3d01c408c67f2e5247aad4019dcf1969982aba3974b4093279004", size = 374069, upload-time = "2025-06-10T00:44:12.834Z" }, + { url = "https://files.pythonhosted.org/packages/35/99/9918c8739ba271dcd935400cff8b32e3cd319eaf02fcd023d5dcd487a7c8/yarl-1.20.1-cp312-cp312-win32.whl", hash = "sha256:daea0d313868da1cf2fac6b2d3a25c6e3a9e879483244be38c8e6a41f1d876a5", size = 81249, upload-time = "2025-06-10T00:44:14.731Z" }, + { url = "https://files.pythonhosted.org/packages/eb/83/5d9092950565481b413b31a23e75dd3418ff0a277d6e0abf3729d4d1ce25/yarl-1.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:48ea7d7f9be0487339828a4de0360d7ce0efc06524a48e1810f945c45b813698", size = 86710, upload-time = "2025-06-10T00:44:16.716Z" }, + { url = "https://files.pythonhosted.org/packages/8a/e1/2411b6d7f769a07687acee88a062af5833cf1966b7266f3d8dfb3d3dc7d3/yarl-1.20.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:0b5ff0fbb7c9f1b1b5ab53330acbfc5247893069e7716840c8e7d5bb7355038a", size = 131811, upload-time = "2025-06-10T00:44:18.933Z" }, + { url = "https://files.pythonhosted.org/packages/b2/27/584394e1cb76fb771371770eccad35de400e7b434ce3142c2dd27392c968/yarl-1.20.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:14f326acd845c2b2e2eb38fb1346c94f7f3b01a4f5c788f8144f9b630bfff9a3", size = 90078, upload-time = "2025-06-10T00:44:20.635Z" }, + { url = "https://files.pythonhosted.org/packages/bf/9a/3246ae92d4049099f52d9b0fe3486e3b500e29b7ea872d0f152966fc209d/yarl-1.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f60e4ad5db23f0b96e49c018596707c3ae89f5d0bd97f0ad3684bcbad899f1e7", size = 88748, upload-time = "2025-06-10T00:44:22.34Z" }, + { url = "https://files.pythonhosted.org/packages/a3/25/35afe384e31115a1a801fbcf84012d7a066d89035befae7c5d4284df1e03/yarl-1.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49bdd1b8e00ce57e68ba51916e4bb04461746e794e7c4d4bbc42ba2f18297691", size = 349595, upload-time = "2025-06-10T00:44:24.314Z" }, + { url = "https://files.pythonhosted.org/packages/28/2d/8aca6cb2cabc8f12efcb82749b9cefecbccfc7b0384e56cd71058ccee433/yarl-1.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:66252d780b45189975abfed839616e8fd2dbacbdc262105ad7742c6ae58f3e31", size = 342616, upload-time = "2025-06-10T00:44:26.167Z" }, + { url = "https://files.pythonhosted.org/packages/0b/e9/1312633d16b31acf0098d30440ca855e3492d66623dafb8e25b03d00c3da/yarl-1.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59174e7332f5d153d8f7452a102b103e2e74035ad085f404df2e40e663a22b28", size = 361324, upload-time = "2025-06-10T00:44:27.915Z" }, + { url = "https://files.pythonhosted.org/packages/bc/a0/688cc99463f12f7669eec7c8acc71ef56a1521b99eab7cd3abb75af887b0/yarl-1.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e3968ec7d92a0c0f9ac34d5ecfd03869ec0cab0697c91a45db3fbbd95fe1b653", size = 359676, upload-time = "2025-06-10T00:44:30.041Z" }, + { url = "https://files.pythonhosted.org/packages/af/44/46407d7f7a56e9a85a4c207724c9f2c545c060380718eea9088f222ba697/yarl-1.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1a4fbb50e14396ba3d375f68bfe02215d8e7bc3ec49da8341fe3157f59d2ff5", size = 352614, upload-time = "2025-06-10T00:44:32.171Z" }, + { url = "https://files.pythonhosted.org/packages/b1/91/31163295e82b8d5485d31d9cf7754d973d41915cadce070491778d9c9825/yarl-1.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11a62c839c3a8eac2410e951301309426f368388ff2f33799052787035793b02", size = 336766, upload-time = "2025-06-10T00:44:34.494Z" }, + { url = "https://files.pythonhosted.org/packages/b4/8e/c41a5bc482121f51c083c4c2bcd16b9e01e1cf8729e380273a952513a21f/yarl-1.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:041eaa14f73ff5a8986b4388ac6bb43a77f2ea09bf1913df7a35d4646db69e53", size = 364615, upload-time = "2025-06-10T00:44:36.856Z" }, + { url = "https://files.pythonhosted.org/packages/e3/5b/61a3b054238d33d70ea06ebba7e58597891b71c699e247df35cc984ab393/yarl-1.20.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:377fae2fef158e8fd9d60b4c8751387b8d1fb121d3d0b8e9b0be07d1b41e83dc", size = 360982, upload-time = "2025-06-10T00:44:39.141Z" }, + { url = "https://files.pythonhosted.org/packages/df/a3/6a72fb83f8d478cb201d14927bc8040af901811a88e0ff2da7842dd0ed19/yarl-1.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1c92f4390e407513f619d49319023664643d3339bd5e5a56a3bebe01bc67ec04", size = 369792, upload-time = "2025-06-10T00:44:40.934Z" }, + { url = "https://files.pythonhosted.org/packages/7c/af/4cc3c36dfc7c077f8dedb561eb21f69e1e9f2456b91b593882b0b18c19dc/yarl-1.20.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d25ddcf954df1754ab0f86bb696af765c5bfaba39b74095f27eececa049ef9a4", size = 382049, upload-time = "2025-06-10T00:44:42.854Z" }, + { url = "https://files.pythonhosted.org/packages/19/3a/e54e2c4752160115183a66dc9ee75a153f81f3ab2ba4bf79c3c53b33de34/yarl-1.20.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:909313577e9619dcff8c31a0ea2aa0a2a828341d92673015456b3ae492e7317b", size = 384774, upload-time = "2025-06-10T00:44:45.275Z" }, + { url = "https://files.pythonhosted.org/packages/9c/20/200ae86dabfca89060ec6447649f219b4cbd94531e425e50d57e5f5ac330/yarl-1.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:793fd0580cb9664548c6b83c63b43c477212c0260891ddf86809e1c06c8b08f1", size = 374252, upload-time = "2025-06-10T00:44:47.31Z" }, + { url = "https://files.pythonhosted.org/packages/83/75/11ee332f2f516b3d094e89448da73d557687f7d137d5a0f48c40ff211487/yarl-1.20.1-cp313-cp313-win32.whl", hash = "sha256:468f6e40285de5a5b3c44981ca3a319a4b208ccc07d526b20b12aeedcfa654b7", size = 81198, upload-time = "2025-06-10T00:44:49.164Z" }, + { url = "https://files.pythonhosted.org/packages/ba/ba/39b1ecbf51620b40ab402b0fc817f0ff750f6d92712b44689c2c215be89d/yarl-1.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:495b4ef2fea40596bfc0affe3837411d6aa3371abcf31aac0ccc4bdd64d4ef5c", size = 86346, upload-time = "2025-06-10T00:44:51.182Z" }, + { url = "https://files.pythonhosted.org/packages/43/c7/669c52519dca4c95153c8ad96dd123c79f354a376346b198f438e56ffeb4/yarl-1.20.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:f60233b98423aab21d249a30eb27c389c14929f47be8430efa7dbd91493a729d", size = 138826, upload-time = "2025-06-10T00:44:52.883Z" }, + { url = "https://files.pythonhosted.org/packages/6a/42/fc0053719b44f6ad04a75d7f05e0e9674d45ef62f2d9ad2c1163e5c05827/yarl-1.20.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:6f3eff4cc3f03d650d8755c6eefc844edde99d641d0dcf4da3ab27141a5f8ddf", size = 93217, upload-time = "2025-06-10T00:44:54.658Z" }, + { url = "https://files.pythonhosted.org/packages/4f/7f/fa59c4c27e2a076bba0d959386e26eba77eb52ea4a0aac48e3515c186b4c/yarl-1.20.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:69ff8439d8ba832d6bed88af2c2b3445977eba9a4588b787b32945871c2444e3", size = 92700, upload-time = "2025-06-10T00:44:56.784Z" }, + { url = "https://files.pythonhosted.org/packages/2f/d4/062b2f48e7c93481e88eff97a6312dca15ea200e959f23e96d8ab898c5b8/yarl-1.20.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cf34efa60eb81dd2645a2e13e00bb98b76c35ab5061a3989c7a70f78c85006d", size = 347644, upload-time = "2025-06-10T00:44:59.071Z" }, + { url = "https://files.pythonhosted.org/packages/89/47/78b7f40d13c8f62b499cc702fdf69e090455518ae544c00a3bf4afc9fc77/yarl-1.20.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:8e0fe9364ad0fddab2688ce72cb7a8e61ea42eff3c7caeeb83874a5d479c896c", size = 323452, upload-time = "2025-06-10T00:45:01.605Z" }, + { url = "https://files.pythonhosted.org/packages/eb/2b/490d3b2dc66f52987d4ee0d3090a147ea67732ce6b4d61e362c1846d0d32/yarl-1.20.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f64fbf81878ba914562c672024089e3401974a39767747691c65080a67b18c1", size = 346378, upload-time = "2025-06-10T00:45:03.946Z" }, + { url = "https://files.pythonhosted.org/packages/66/ad/775da9c8a94ce925d1537f939a4f17d782efef1f973039d821cbe4bcc211/yarl-1.20.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6342d643bf9a1de97e512e45e4b9560a043347e779a173250824f8b254bd5ce", size = 353261, upload-time = "2025-06-10T00:45:05.992Z" }, + { url = "https://files.pythonhosted.org/packages/4b/23/0ed0922b47a4f5c6eb9065d5ff1e459747226ddce5c6a4c111e728c9f701/yarl-1.20.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56dac5f452ed25eef0f6e3c6a066c6ab68971d96a9fb441791cad0efba6140d3", size = 335987, upload-time = "2025-06-10T00:45:08.227Z" }, + { url = "https://files.pythonhosted.org/packages/3e/49/bc728a7fe7d0e9336e2b78f0958a2d6b288ba89f25a1762407a222bf53c3/yarl-1.20.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7d7f497126d65e2cad8dc5f97d34c27b19199b6414a40cb36b52f41b79014be", size = 329361, upload-time = "2025-06-10T00:45:10.11Z" }, + { url = "https://files.pythonhosted.org/packages/93/8f/b811b9d1f617c83c907e7082a76e2b92b655400e61730cd61a1f67178393/yarl-1.20.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:67e708dfb8e78d8a19169818eeb5c7a80717562de9051bf2413aca8e3696bf16", size = 346460, upload-time = "2025-06-10T00:45:12.055Z" }, + { url = "https://files.pythonhosted.org/packages/70/fd/af94f04f275f95da2c3b8b5e1d49e3e79f1ed8b6ceb0f1664cbd902773ff/yarl-1.20.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:595c07bc79af2494365cc96ddeb772f76272364ef7c80fb892ef9d0649586513", size = 334486, upload-time = "2025-06-10T00:45:13.995Z" }, + { url = "https://files.pythonhosted.org/packages/84/65/04c62e82704e7dd0a9b3f61dbaa8447f8507655fd16c51da0637b39b2910/yarl-1.20.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7bdd2f80f4a7df852ab9ab49484a4dee8030023aa536df41f2d922fd57bf023f", size = 342219, upload-time = "2025-06-10T00:45:16.479Z" }, + { url = "https://files.pythonhosted.org/packages/91/95/459ca62eb958381b342d94ab9a4b6aec1ddec1f7057c487e926f03c06d30/yarl-1.20.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:c03bfebc4ae8d862f853a9757199677ab74ec25424d0ebd68a0027e9c639a390", size = 350693, upload-time = "2025-06-10T00:45:18.399Z" }, + { url = "https://files.pythonhosted.org/packages/a6/00/d393e82dd955ad20617abc546a8f1aee40534d599ff555ea053d0ec9bf03/yarl-1.20.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:344d1103e9c1523f32a5ed704d576172d2cabed3122ea90b1d4e11fe17c66458", size = 355803, upload-time = "2025-06-10T00:45:20.677Z" }, + { url = "https://files.pythonhosted.org/packages/9e/ed/c5fb04869b99b717985e244fd93029c7a8e8febdfcffa06093e32d7d44e7/yarl-1.20.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:88cab98aa4e13e1ade8c141daeedd300a4603b7132819c484841bb7af3edce9e", size = 341709, upload-time = "2025-06-10T00:45:23.221Z" }, + { url = "https://files.pythonhosted.org/packages/24/fd/725b8e73ac2a50e78a4534ac43c6addf5c1c2d65380dd48a9169cc6739a9/yarl-1.20.1-cp313-cp313t-win32.whl", hash = "sha256:b121ff6a7cbd4abc28985b6028235491941b9fe8fe226e6fdc539c977ea1739d", size = 86591, upload-time = "2025-06-10T00:45:25.793Z" }, + { url = "https://files.pythonhosted.org/packages/94/c3/b2e9f38bc3e11191981d57ea08cab2166e74ea770024a646617c9cddd9f6/yarl-1.20.1-cp313-cp313t-win_amd64.whl", hash = "sha256:541d050a355bbbc27e55d906bc91cb6fe42f96c01413dd0f4ed5a5240513874f", size = 93003, upload-time = "2025-06-10T00:45:27.752Z" }, + { url = "https://files.pythonhosted.org/packages/b4/2d/2345fce04cfd4bee161bf1e7d9cdc702e3e16109021035dbb24db654a622/yarl-1.20.1-py3-none-any.whl", hash = "sha256:83b8eb083fe4683c6115795d9fc1cfaf2cbbefb19b3a1cb68f6527460f483a77", size = 46542, upload-time = "2025-06-10T00:46:07.521Z" }, +] + +[[package]] +name = "zope-event" +version = "5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/46/c2/427f1867bb96555d1d34342f1dd97f8c420966ab564d58d18469a1db8736/zope.event-5.0.tar.gz", hash = "sha256:bac440d8d9891b4068e2b5a2c5e2c9765a9df762944bda6955f96bb9b91e67cd", size = 17350, upload-time = "2023-06-23T06:28:35.709Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fe/42/f8dbc2b9ad59e927940325a22d6d3931d630c3644dae7e2369ef5d9ba230/zope.event-5.0-py3-none-any.whl", hash = "sha256:2832e95014f4db26c47a13fdaef84cef2f4df37e66b59d8f1f4a8f319a632c26", size = 6824, upload-time = "2023-06-23T06:28:32.652Z" }, +] + +[[package]] +name = "zope-interface" +version = "7.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/30/93/9210e7606be57a2dfc6277ac97dcc864fd8d39f142ca194fdc186d596fda/zope.interface-7.2.tar.gz", hash = "sha256:8b49f1a3d1ee4cdaf5b32d2e738362c7f5e40ac8b46dd7d1a65e82a4872728fe", size = 252960, upload-time = "2024-11-28T08:45:39.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/0b/c7516bc3bad144c2496f355e35bd699443b82e9437aa02d9867653203b4a/zope.interface-7.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:086ee2f51eaef1e4a52bd7d3111a0404081dadae87f84c0ad4ce2649d4f708b7", size = 208959, upload-time = "2024-11-28T08:47:47.788Z" }, + { url = "https://files.pythonhosted.org/packages/a2/e9/1463036df1f78ff8c45a02642a7bf6931ae4a38a4acd6a8e07c128e387a7/zope.interface-7.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:21328fcc9d5b80768bf051faa35ab98fb979080c18e6f84ab3f27ce703bce465", size = 209357, upload-time = "2024-11-28T08:47:50.897Z" }, + { url = "https://files.pythonhosted.org/packages/07/a8/106ca4c2add440728e382f1b16c7d886563602487bdd90004788d45eb310/zope.interface-7.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6dd02ec01f4468da0f234da9d9c8545c5412fef80bc590cc51d8dd084138a89", size = 264235, upload-time = "2024-11-28T09:18:15.56Z" }, + { url = "https://files.pythonhosted.org/packages/fc/ca/57286866285f4b8a4634c12ca1957c24bdac06eae28fd4a3a578e30cf906/zope.interface-7.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e7da17f53e25d1a3bde5da4601e026adc9e8071f9f6f936d0fe3fe84ace6d54", size = 259253, upload-time = "2024-11-28T08:48:29.025Z" }, + { url = "https://files.pythonhosted.org/packages/96/08/2103587ebc989b455cf05e858e7fbdfeedfc3373358320e9c513428290b1/zope.interface-7.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cab15ff4832580aa440dc9790b8a6128abd0b88b7ee4dd56abacbc52f212209d", size = 264702, upload-time = "2024-11-28T08:48:37.363Z" }, + { url = "https://files.pythonhosted.org/packages/5f/c7/3c67562e03b3752ba4ab6b23355f15a58ac2d023a6ef763caaca430f91f2/zope.interface-7.2-cp312-cp312-win_amd64.whl", hash = "sha256:29caad142a2355ce7cfea48725aa8bcf0067e2b5cc63fcf5cd9f97ad12d6afb5", size = 212466, upload-time = "2024-11-28T08:49:14.397Z" }, + { url = "https://files.pythonhosted.org/packages/c6/3b/e309d731712c1a1866d61b5356a069dd44e5b01e394b6cb49848fa2efbff/zope.interface-7.2-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:3e0350b51e88658d5ad126c6a57502b19d5f559f6cb0a628e3dc90442b53dd98", size = 208961, upload-time = "2024-11-28T08:48:29.865Z" }, + { url = "https://files.pythonhosted.org/packages/49/65/78e7cebca6be07c8fc4032bfbb123e500d60efdf7b86727bb8a071992108/zope.interface-7.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15398c000c094b8855d7d74f4fdc9e73aa02d4d0d5c775acdef98cdb1119768d", size = 209356, upload-time = "2024-11-28T08:48:33.297Z" }, + { url = "https://files.pythonhosted.org/packages/11/b1/627384b745310d082d29e3695db5f5a9188186676912c14b61a78bbc6afe/zope.interface-7.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:802176a9f99bd8cc276dcd3b8512808716492f6f557c11196d42e26c01a69a4c", size = 264196, upload-time = "2024-11-28T09:18:17.584Z" }, + { url = "https://files.pythonhosted.org/packages/b8/f6/54548df6dc73e30ac6c8a7ff1da73ac9007ba38f866397091d5a82237bd3/zope.interface-7.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb23f58a446a7f09db85eda09521a498e109f137b85fb278edb2e34841055398", size = 259237, upload-time = "2024-11-28T08:48:31.71Z" }, + { url = "https://files.pythonhosted.org/packages/b6/66/ac05b741c2129fdf668b85631d2268421c5cd1a9ff99be1674371139d665/zope.interface-7.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a71a5b541078d0ebe373a81a3b7e71432c61d12e660f1d67896ca62d9628045b", size = 264696, upload-time = "2024-11-28T08:48:41.161Z" }, + { url = "https://files.pythonhosted.org/packages/0a/2f/1bccc6f4cc882662162a1158cda1a7f616add2ffe322b28c99cb031b4ffc/zope.interface-7.2-cp313-cp313-win_amd64.whl", hash = "sha256:4893395d5dd2ba655c38ceb13014fd65667740f09fa5bb01caa1e6284e48c0cd", size = 212472, upload-time = "2024-11-28T08:49:56.587Z" }, +] From e0876388287e0359217f3a5582e3e1e5acd8e99d Mon Sep 17 00:00:00 2001 From: zchengrong Date: Tue, 24 Jun 2025 17:36:31 +0800 Subject: [PATCH 089/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20relight=20=E5=9B=BE=E7=89=87=E5=B0=BA=E5=AF=B8?= =?UTF-8?q?=E8=87=AA=E9=80=82=E5=BA=94=20docs=EF=BC=88=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D?= =?UTF-8?q?=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95?= =?UTF-8?q?):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_test.py | 9 +++- .../service_generate_relight_image.py | 44 ++++++++++++++++++- 2 files changed, 50 insertions(+), 3 deletions(-) diff --git a/app/api/api_test.py b/app/api/api_test.py index 0f8b98f..a7b965c 100644 --- a/app/api/api_test.py +++ b/app/api/api_test.py @@ -4,7 +4,7 @@ import logging from fastapi import APIRouter from fastapi import HTTPException -from app.core.config import SR_RABBITMQ_QUEUES, GI_RABBITMQ_QUEUES, GPI_RABBITMQ_QUEUES, GRI_RABBITMQ_QUEUES, OSS, JAVA_STREAM_API_URL, GMV_RABBITMQ_QUEUES, SLOGAN_RABBITMQ_QUEUES, GEN_SINGLE_LOGO_RABBITMQ_QUEUES +from app.core.config import SR_RABBITMQ_QUEUES, GI_RABBITMQ_QUEUES, GPI_RABBITMQ_QUEUES, GRI_RABBITMQ_QUEUES, OSS, JAVA_STREAM_API_URL, GMV_RABBITMQ_QUEUES, SLOGAN_RABBITMQ_QUEUES, GEN_SINGLE_LOGO_RABBITMQ_QUEUES, PS_RABBITMQ_QUEUES, BATCH_GPI_RABBITMQ_QUEUES, BATCH_GRI_RABBITMQ_QUEUES, BATCH_PS_RABBITMQ_QUEUES from app.schemas.response_template import ResponseModel logger = logging.getLogger() @@ -16,10 +16,17 @@ def test(id: int): data = { "超分 SR_RABBITMQ_QUEUES": SR_RABBITMQ_QUEUES, "多视角 GMV_RABBITMQ_QUEUES": GMV_RABBITMQ_QUEUES, + "pose transform PS_RABBITMQ_QUEUES": PS_RABBITMQ_QUEUES, "logan SLOGAN_RABBITMQ_QUEUES": SLOGAN_RABBITMQ_QUEUES, "image and single logo GI_RABBITMQ_QUEUES": GI_RABBITMQ_QUEUES, "to product image GPI_RABBITMQ_QUEUES": GPI_RABBITMQ_QUEUES, "relight GRI_RABBITMQ_QUEUES": GRI_RABBITMQ_QUEUES, + + # batch + "batch product BATCH_GPI_RABBITMQ_QUEUES": BATCH_GPI_RABBITMQ_QUEUES, + "batch relight BATCH_GRI_RABBITMQ_QUEUES": BATCH_GRI_RABBITMQ_QUEUES, + "batch pose transform BATCH_PS_RABBITMQ_QUEUES": BATCH_PS_RABBITMQ_QUEUES, + "JAVA_STREAM_API_URL": JAVA_STREAM_API_URL, "local_oss_server": OSS } diff --git a/app/service/generate_image/service_generate_relight_image.py b/app/service/generate_image/service_generate_relight_image.py index 3723ee9..7db12c2 100644 --- a/app/service/generate_image/service_generate_relight_image.py +++ b/app/service/generate_image/service_generate_relight_image.py @@ -39,7 +39,7 @@ class GenerateRelightImage: self.negative_prompt = 'lowres, bad anatomy, bad hands, cropped, worst quality' self.direction = request_data.direction self.image_url = request_data.image_url - self.image = oss_get_image(bucket=self.image_url.split('/')[0], object_name=self.image_url[self.image_url.find('/') + 1:], data_type="cv2") + self.image = pre_processing_image(self.image_url) self.tasks_id = request_data.tasks_id self.user_id = self.tasks_id[self.tasks_id.rfind('-') + 1:] self.gen_product_data = {'tasks_id': self.tasks_id, 'status': 'PENDING', 'message': "pending", 'image_url': ''} @@ -137,6 +137,46 @@ class GenerateRelightImage: if not DEBUG: publish_status(str_gen_product_data, GRI_RABBITMQ_QUEUES) +def pre_processing_image(image_url): + image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL") + # 目标图片的尺寸 + target_width = 512 + target_height = 768 + + # 原始图片的尺寸 + original_width, original_height = image.size + + # 计算宽度和高度的缩放比例 + width_ratio = target_width / original_width + height_ratio = target_height / original_height + + # 选择较小的缩放比例,确保图片能完整放入目标图片中 + scale_ratio = min(width_ratio, height_ratio) + + # 计算调整后的尺寸 + new_width = int(original_width * scale_ratio) + new_height = int(original_height * scale_ratio) + + # 调整图片大小 + resized_image = image.resize((new_width, new_height)) + + # 创建一个 512x768 的透明图片 + result_image = Image.new("RGBA", (target_width, target_height), (255, 255, 255, 0)) + + # 计算需要粘贴的位置,使图片居中 + x_offset = (target_width - new_width) // 2 + y_offset = (target_height - new_height) // 2 + + # 将调整大小后的图片粘贴到透明图片上 + if resized_image.mode == "RGBA": + result_image.paste(resized_image, (x_offset, y_offset), mask=resized_image.split()[3]) + else: + result_image.paste(resized_image, (x_offset, y_offset)) + + image = np.array(result_image) + + # image = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA) + return image def infer_cancel(tasks_id): redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) @@ -153,7 +193,7 @@ if __name__ == '__main__': prompt="Colorful black", image_url='aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png', direction="Right Light", - product_type="single" + product_type="overall" ) server = GenerateRelightImage(rd) print(server.get_result()) From 8cfe67c256291fa140e9ad0cbeb8ef2da7192881 Mon Sep 17 00:00:00 2001 From: zchengrong Date: Mon, 30 Jun 2025 11:29:19 +0800 Subject: [PATCH 090/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20=20=E6=96=B0=E5=A2=9Eagent=20=E5=B7=A5?= =?UTF-8?q?=E5=85=B7,=E5=9B=BE=E7=89=87=E7=94=9F=E6=88=90=E6=8E=A5?= =?UTF-8?q?=E5=8F=A3=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug=EF=BC=89:=20docs?= =?UTF-8?q?=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refac?= =?UTF-8?q?tor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E?= =?UTF-8?q?=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_generate_image.py | 49 +++++- app/schemas/generate_image.py | 13 ++ .../service_agent_tool_generate_image.py | 149 ++++++++++++++++++ .../generate_image/service_generate_image.py | 2 +- .../project_info_extraction/service.py | 11 +- 5 files changed, 220 insertions(+), 4 deletions(-) create mode 100644 app/service/generate_image/service_agent_tool_generate_image.py diff --git a/app/api/api_generate_image.py b/app/api/api_generate_image.py index 2706abd..5bd5404 100644 --- a/app/api/api_generate_image.py +++ b/app/api/api_generate_image.py @@ -3,10 +3,11 @@ import logging from fastapi import APIRouter, BackgroundTasks, HTTPException -from app.schemas.generate_image import GenerateImageModel, GenerateProductImageModel, GenerateSingleLogoImageModel, GenerateRelightImageModel, GenerateMultiViewModel, BatchGenerateProductImageModel, BatchGenerateRelightImageModel +from app.schemas.generate_image import GenerateImageModel, GenerateProductImageModel, GenerateSingleLogoImageModel, GenerateRelightImageModel, GenerateMultiViewModel, BatchGenerateProductImageModel, BatchGenerateRelightImageModel, AgentTollGenerateImageModel from app.schemas.pose_transform import BatchPoseTransformModel from app.schemas.response_template import ResponseModel from app.service.generate_batch_image.service import start_product_batch_generate, start_relight_batch_generate, start_pose_transform_batch_generate +from app.service.generate_image.service_agent_tool_generate_image import AgentToolGenerateImage from app.service.generate_image.service_generate_image import GenerateImage, infer_cancel as generate_image_infer_cancel from app.service.generate_image.service_generate_multi_view import GenerateMultiView, infer_cancel as generate_multi_view_cancel from app.service.generate_image.service_generate_product_image import GenerateProductImage, infer_cancel as generate_product_image_cancel @@ -304,3 +305,49 @@ async def batch_generate_pose_transform(request_batch_item: BatchPoseTransformMo } """ return await start_pose_transform_batch_generate(request_batch_item) + + +"""agent tool""" + + +@router.post("/agent_tool_generate_image") +def agent_tool_generate_image(request_item: AgentTollGenerateImageModel, background_tasks: BackgroundTasks): + """ + 创建一个具有以下参数的请求体: + - **prompt**: 想要生成图片的描述词 + - **category**: 生成图片的类别,sketch print 等等 + - **gender**: 生成sketch专用,服装类别 + - **version**: 使用模型版本 fast 或者 high + - **size**: 生成数量 + - **version**: 使用模型版本 fast 或者 high + + + 示例参数: + { + "prompt": "a single item of sketch of Wabi-sabi, skirt, tiered, 4k, white background", + "category": "sketch", + "gender": "male", + "size":2, + "version":"high" + } + """ + try: + logger.info(f"agent_tool_generate_image request item is : @@@@@@:{request_item.dict()}") + request_data = request_item.dict() + service = AgentToolGenerateImage(request_data['version']) + image_url_list, clothing_category_list = service.get_result( + prompt=request_data['prompt'], + size=request_data['size'], + version=request_data['version'], + category=request_data['category'], + gender=request_data['gender'] + ) + data = { + "image_url_list": image_url_list, + "clothing_category_list": clothing_category_list + } + logger.info(f"agent_tool_generate_image response item is : @@@@@@:{data}") + except Exception as e: + logger.warning(f"agent_tool_generate_image Run Exception @@@@@@:{e}") + raise HTTPException(status_code=404, detail=str(e)) + return ResponseModel(data=data) diff --git a/app/schemas/generate_image.py b/app/schemas/generate_image.py index 7d1d864..5062d78 100644 --- a/app/schemas/generate_image.py +++ b/app/schemas/generate_image.py @@ -75,3 +75,16 @@ class BatchGenerateRelightImageModel(BaseModel): batch_tasks_id: str user_id: str batch_data_list: List[RelightItemModel] + + +""" + agent tool generate image +""" + + +class AgentTollGenerateImageModel(BaseModel): + prompt: str + category: str + gender: str + version: str + size: int diff --git a/app/service/generate_image/service_agent_tool_generate_image.py b/app/service/generate_image/service_agent_tool_generate_image.py new file mode 100644 index 0000000..a5c295c --- /dev/null +++ b/app/service/generate_image/service_agent_tool_generate_image.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +""" +@Project :trinity_client +@File :service_att_recognition.py +@Author :周成融 +@Date :2023/7/26 12:01:05 +@detail : +""" +import logging +import time +import uuid +import cv2 +import mmcv +import numpy as np +import pandas as pd +import torch +import tritonclient.http as httpclient +import cv2 +import numpy as np +import tritonclient.grpc as grpcclient +from minio import Minio +from tritonclient.utils import np_to_triton_dtype +from app.core.config import * +from app.service.utils.new_oss_client import oss_upload_image + +logger = logging.getLogger() + +minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) + + +class AgentToolGenerateImage: + def __init__(self, version): + if version == "fast": + self.grpc_client = grpcclient.InferenceServerClient(url=FAST_GI_MODEL_URL) + else: + self.grpc_client = grpcclient.InferenceServerClient(url=GI_MODEL_URL) + self.image = np.random.randint(0, 256, (1024, 1024, 3), dtype=np.uint8) + self.triton_client = httpclient.InferenceServerClient(url=DESIGN_MODEL_URL) + + def get_result(self, prompt, size, version, category, gender): + + image_url_list = [] + image_result_list = [] + clothing_category_list = [] + try: + prompts = [prompt] * 1 + modes = ["txt2img"] * 1 + images = [self.image.astype(np.float16)] * 1 + + text_obj = np.array(prompts, dtype="object").reshape((-1, 1)) + mode_obj = np.array(modes, dtype="object").reshape((-1, 1)) + image_obj = np.array(images, dtype=np.float16).reshape((-1, 1024, 1024, 3)) + + input_text = grpcclient.InferInput("prompt", text_obj.shape, np_to_triton_dtype(text_obj.dtype)) + input_image = grpcclient.InferInput("input_image", image_obj.shape, np_to_triton_dtype(image_obj.dtype)) + input_mode = grpcclient.InferInput("mode", mode_obj.shape, np_to_triton_dtype(mode_obj.dtype)) + + input_text.set_data_from_numpy(text_obj) + input_image.set_data_from_numpy(image_obj) + input_mode.set_data_from_numpy(mode_obj) + + inputs = [input_text, input_image, input_mode] + for i in range(size): + if version == "fast": + response = self.grpc_client.infer(model_name=FAST_GI_MODEL_NAME, inputs=inputs, priority=0) + else: + response = self.grpc_client.infer(model_name=GI_MODEL_NAME, inputs=inputs, priority=0) + image = response.as_numpy("generated_image") + image_result = cv2.cvtColor(np.squeeze(image.astype(np.uint8)), cv2.COLOR_RGB2BGR) + _, img_byte_array = cv2.imencode('.jpg', image_result) + + req = oss_upload_image(oss_client=minio_client, bucket='test', object_name=f'{uuid.uuid1()}-{i}.jpg', image_bytes=img_byte_array) + image_url_list.append(f"{req.bucket_name}/{req.object_name}") + image_result_list.append(image_result) + + if category == "sketch": + clothing_category_list = self.get_clothing_category(image_result_list, gender) + + return image_url_list, clothing_category_list + except Exception as e: + logger.error(e) + return image_url_list, clothing_category_list + finally: + self.grpc_client.close() + self.triton_client.close() + + def preprocess(self, img): + img = mmcv.imread(img) + img_scale = (224, 224) + img = cv2.resize(img, img_scale) + img = mmcv.imnormalize( + img, + mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), + to_rgb=True) + preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0) + return preprocessed_img + + def get_category(self, image): + inputs = [httpclient.InferInput("input__0", image.shape, datatype="FP32")] + inputs[0].set_data_from_numpy(image, binary_data=True) + results = self.triton_client.infer(model_name="attr_retrieve_category", inputs=inputs) + inference_output = torch.from_numpy(results.as_numpy(f'output__0')) + scores = inference_output.detach().numpy() + colattr = list(attr_type['labelName']) + maxsc = np.max(scores[0][:5]) + indexs = np.argwhere(scores == maxsc)[:, 1] + return colattr[indexs[0]] + + def get_clothing_category(self, images, gender): + category_list = [] + for image in images: + sketch = self.preprocess(image) + if gender.lower() == "female": + category_list.append(self.get_category(sketch)) + elif gender.lower() == "male": + category = self.get_category(sketch) + if category == 'Trousers' or category == 'Skirt': + category_list.append('Bottoms') + elif category == 'Blouse' or category == 'Dress': + category_list.append('Tops') + else: + category_list.append('Outwear') + else: + category_list.append(self.get_category(sketch)) + return category_list + + +attr_type = pd.read_csv(CATEGORY_PATH) + +if __name__ == '__main__': + request_data = { + "prompt": "a single item of sketch of Wabi-sabi, skirt, tiered, 4k, white background", + "category": "sketch", + "version": "high", + "size": 2, + "gender": "Female", + } + server = AgentToolGenerateImage(request_data['version']) + image_url_list, clothing_category_list = server.get_result( + prompt=request_data['prompt'], + size=request_data['size'], + version=request_data['version'], + category=request_data['category'], + gender=request_data['gender'] + ) + + print(image_url_list) + print(clothing_category_list) diff --git a/app/service/generate_image/service_generate_image.py b/app/service/generate_image/service_generate_image.py index c3ae2d7..7d00b87 100644 --- a/app/service/generate_image/service_generate_image.py +++ b/app/service/generate_image/service_generate_image.py @@ -186,7 +186,7 @@ def infer_cancel(tasks_id): if __name__ == '__main__': rd = GenerateImageModel( tasks_id="123-89", - prompt='a single item of sketch of Wabi-sabi, skirt, tiered, 4k, white background', + prompt="Women's clothing ,dress,technical drawing style, clean line art, no shading, no texture, flat sketch, no human body, no face, centered composition, pure white background, single garmentsingle garment only, front flat view", image_url="aida-collection-element/87/Printboard/842c09cf-7297-42d9-9e6e-9c17d4a13cb5.jpg", mode='txt2img', category="test", diff --git a/app/service/project_info_extraction/service.py b/app/service/project_info_extraction/service.py index 40d59ba..cf9df6c 100644 --- a/app/service/project_info_extraction/service.py +++ b/app/service/project_info_extraction/service.py @@ -15,7 +15,10 @@ process = ['SERIES_DESIGN', 'SINGLE_DESIGN'] class ProjectInfoExtraction: def __init__(self, request_data): # llm generate brand info init - self.model = ChatTongyi(model="qwen2.5-14b-instruct", api_key="sk-7658298c6b99443c98184a5e634fe6ab") + if len(request_data.image_list) or len(request_data.file_list): + self.model = ChatTongyi(model="qwen-vl-plus", api_key="sk-7658298c6b99443c98184a5e634fe6ab") + else: + self.model = ChatTongyi(model="qwen2.5-14b-instruct", api_key="sk-7658298c6b99443c98184a5e634fe6ab") self.response_schemas = [ ResponseSchema(name="project_name", description="项目的名称."), @@ -55,7 +58,11 @@ class ProjectInfoExtraction: if __name__ == '__main__': request_data = ProjectInfoExtractionModel( - prompt="海边派对主题的衬衫设计" + prompt="性别为儿童", + image_list=[ + 'https://www.minio-api.aida.com.hk/test/019aaeed-3227-11f0-a194-0826ae3ad6b3.jpg?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=vXKFLSJkYeEq2DrSZvkB%2F20250613%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250613T020236Z&X-Amz-Expires=604800&X-Amz-SignedHeaders=host&X-Amz-Signature=a513b706c24134071a489c34f0fa2c0f510e871b8589dc0c08a0f26ea28ee2ff' + ], + file_list=[] ) service = ProjectInfoExtraction(request_data) print(service.get_result()) From 9268a611dda85a22936cf2017de69c76117fe278 Mon Sep 17 00:00:00 2001 From: zchen Date: Tue, 1 Jul 2025 22:42:59 +0800 Subject: [PATCH 091/101] =?UTF-8?q?qwen=20api=20key=20=E6=9B=B4=E6=8D=A2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/core/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/core/config.py b/app/core/config.py index b4e26de..6ec49ed 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -207,7 +207,7 @@ PRIORITY_DICT = { 'earring_back': -99, } -QWEN_API_KEY = "sk-a6bdf594e1f54a4aa3e9d4d48f8c661f" +QWEN_API_KEY = "sk-f31c29e61ac2498ba5e307aaa6dc10e0" DB_CONFIG = { "host": "18.167.251.121", From 40fb9c981d07be20fefbc221067db5327f1a7dbe Mon Sep 17 00:00:00 2001 From: zchen Date: Wed, 2 Jul 2025 18:22:22 +0800 Subject: [PATCH 092/101] =?UTF-8?q?batch=20design=20=20image=5Fcategory=20?= =?UTF-8?q?=E5=8F=82=E6=95=B0=E5=AF=B9=E9=BD=90?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/design_batch/design_batch_celery.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/service/design_batch/design_batch_celery.py b/app/service/design_batch/design_batch_celery.py index f5cdc58..3f5eed7 100644 --- a/app/service/design_batch/design_batch_celery.py +++ b/app/service/design_batch/design_batch_celery.py @@ -85,7 +85,7 @@ def batch_design(objects_data, tasks_id, json_name): for lay in layers: items_response['layers'].append({ - 'image_category': lay['name'], + 'image_category': "body" if lay['name'] == 'mannequin' else lay['name'], 'position': lay['position'], 'priority': lay.get("priority", None), 'resize_scale': lay['resize_scale'] if "resize_scale" in lay.keys() else None, From ca7e53a7fd43ee4dd21714eea43512f616cd85e5 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Thu, 3 Jul 2025 19:21:19 +0800 Subject: [PATCH 093/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20batch=20relight=20=E5=9B=BE=E7=89=87size=20?= =?UTF-8?q?=E5=A1=AB=E5=85=85=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98?= =?UTF-8?q?=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84?= =?UTF-8?q?=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../service_batch_generate_relight_image.py | 48 +++++++++++++++++-- 1 file changed, 45 insertions(+), 3 deletions(-) diff --git a/app/service/generate_batch_image/service_batch_generate_relight_image.py b/app/service/generate_batch_image/service_batch_generate_relight_image.py index e75c0cc..0a039d5 100644 --- a/app/service/generate_batch_image/service_batch_generate_relight_image.py +++ b/app/service/generate_batch_image/service_batch_generate_relight_image.py @@ -48,7 +48,7 @@ def batch_generate_relight(batch_request_data): prompt = data['prompt'] product_type = data['product_type'] image_url = data['image_url'] - image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url.split('/', 1)[1], data_type="cv2") + image = pre_processing_image(image_url) tasks_id = data['tasks_id'] prompts = [prompt] * 1 @@ -149,6 +149,48 @@ def batch_generate_relight(batch_request_data): logger.info(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | batch_tasks_id:{batch_tasks_id} | progress:OK | result_data_list:{result_data_list}") +def pre_processing_image(image_url): + image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL") + # 目标图片的尺寸 + target_width = 512 + target_height = 768 + + # 原始图片的尺寸 + original_width, original_height = image.size + + # 计算宽度和高度的缩放比例 + width_ratio = target_width / original_width + height_ratio = target_height / original_height + + # 选择较小的缩放比例,确保图片能完整放入目标图片中 + scale_ratio = min(width_ratio, height_ratio) + + # 计算调整后的尺寸 + new_width = int(original_width * scale_ratio) + new_height = int(original_height * scale_ratio) + + # 调整图片大小 + resized_image = image.resize((new_width, new_height)) + + # 创建一个 512x768 的透明图片 + result_image = Image.new("RGBA", (target_width, target_height), (255, 255, 255, 0)) + + # 计算需要粘贴的位置,使图片居中 + x_offset = (target_width - new_width) // 2 + y_offset = (target_height - new_height) // 2 + + # 将调整大小后的图片粘贴到透明图片上 + if resized_image.mode == "RGBA": + result_image.paste(resized_image, (x_offset, y_offset), mask=resized_image.split()[3]) + else: + result_image.paste(resized_image, (x_offset, y_offset)) + + image = np.array(result_image) + + # image = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA) + return image + + def publish_status(task_id, progress, result): connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) channel = connection.channel() @@ -171,14 +213,14 @@ if __name__ == '__main__': RelightItemModel( tasks_id="123-5464", product_type="overall", - image_url="aida-users/89/product_image/02894523-19b5-46eb-a9c6-2f512f5fec84-0-89.png", + image_url="test/703190759.png", prompt="Colorful black", direction="Right Light", ), RelightItemModel( tasks_id="123-5464123", product_type="overall", - image_url="aida-users/89/product_image/02894523-19b5-46eb-a9c6-2f512f5fec84-0-89.png", + image_url="test/703190759.png", direction="Right Light", prompt="Colorful black", ) From b0f9edf2b921edaae644cebec2ead5af356bfb8a Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Fri, 29 Aug 2025 16:13:05 +0800 Subject: [PATCH 094/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20=E4=BF=AE=E5=A4=8Dprint=5Fpainting.py=20?= =?UTF-8?q?=E5=8D=B0=E8=8A=B1=E9=80=9A=E9=81=93=E4=B8=BARGB=E6=97=B6=20?= =?UTF-8?q?=E5=87=BA=E7=8E=B0=E7=9A=84=E5=81=8F=E7=A7=BB=E8=B6=8A=E7=95=8C?= =?UTF-8?q?=E9=97=AE=E9=A2=98=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98?= =?UTF-8?q?=E6=9B=B4=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84?= =?UTF-8?q?=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../design_fast/pipeline/print_painting.py | 163 +++++++++--------- 1 file changed, 81 insertions(+), 82 deletions(-) diff --git a/app/service/design_fast/pipeline/print_painting.py b/app/service/design_fast/pipeline/print_painting.py index 1534f9c..bfdc386 100644 --- a/app/service/design_fast/pipeline/print_painting.py +++ b/app/service/design_fast/pipeline/print_painting.py @@ -55,90 +55,89 @@ class PrintPainting: mask_background = np.zeros((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), dtype=np.uint8) for i in range(len(single_print['print_path_list'])): image, image_mode = self.read_image(single_print['print_path_list'][i]) - if image_mode == "RGBA": - new_size = (int(result['pattern_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['pattern_image'].shape[0] * single_print['print_scale_list'][i][1])) - mask = image.split()[3] - resized_source = image.resize(new_size) - resized_source_mask = mask.resize(new_size) + if image_mode == "RGB": + image_rgba = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA) + image = Image.fromarray(image_rgba) - rotated_resized_source = resized_source.rotate(-single_print['print_angle_list'][i]) - rotated_resized_source_mask = resized_source_mask.rotate(-single_print['print_angle_list'][i]) - - source_image_pil = Image.fromarray(cv2.cvtColor(print_background, cv2.COLOR_BGR2RGB)) - source_image_pil_mask = Image.fromarray(cv2.cvtColor(mask_background, cv2.COLOR_BGR2RGB)) - - source_image_pil.paste(rotated_resized_source, (int(single_print['location'][i][0]), int(single_print['location'][i][1])), rotated_resized_source) - source_image_pil_mask.paste(rotated_resized_source_mask, (int(single_print['location'][i][0]), int(single_print['location'][i][1])), rotated_resized_source_mask) - - print_background = cv2.cvtColor(np.array(source_image_pil), cv2.COLOR_RGBA2BGR) - mask_background = cv2.cvtColor(np.array(source_image_pil_mask), cv2.COLOR_RGBA2BGR) - ret, mask_background = cv2.threshold(mask_background, 124, 255, cv2.THRESH_BINARY) - else: - mask = self.get_mask_inv(image) - mask = np.expand_dims(mask, axis=2) - mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) - mask = cv2.bitwise_not(mask) - - mask = cv2.resize(mask, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1]))) - image = cv2.resize(image, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1]))) - # 旋转后的坐标需要重新算 - rotate_mask, _ = self.img_rotate(mask, single_print['print_angle_list'][i]) - rotate_image, rotated_new_size = self.img_rotate(image, single_print['print_angle_list'][i]) - # x, y = int(result['print']['location'][i][0] - rotated_new_size[0] - (rotate_mask.shape[0] - image.shape[0]) / 2), int(result['print']['location'][i][1] - rotated_new_size[1] - (rotate_mask.shape[1] - image.shape[1]) / 2) - x, y = int(single_print['location'][i][0] - rotated_new_size[0]), int(single_print['location'][i][1] - rotated_new_size[1]) - - image_x = print_background.shape[1] - image_y = print_background.shape[0] - print_x = rotate_image.shape[1] - print_y = rotate_image.shape[0] - - # 有bug - # if x + print_x > image_x: - # rotate_image = rotate_image[:, :x + print_x - image_x] - # rotate_mask = rotate_mask[:, :x + print_x - image_x] - # # - # if y + print_y > image_y: - # rotate_image = rotate_image[:y + print_y - image_y] - # rotate_mask = rotate_mask[:y + print_y - image_y] - - # 不能是并行 - # 当前第一轮的if (108以及115)是判断有没有过下界和右界。第二轮的是判断左上有没有超出。 如果这个样子的话,先裁了右边,再左移,region就会有问题 - # 先挪 再判断 最后裁剪 - - # 如果print旋转了 或者 print贴边了 则需要判断 判断左界和上界是否小于0 - if x <= 0: - rotate_image = rotate_image[:, -x:] - rotate_mask = rotate_mask[:, -x:] - start_x = x = 0 - else: - start_x = x - - if y <= 0: - rotate_image = rotate_image[-y:, :] - rotate_mask = rotate_mask[-y:, :] - start_y = y = 0 - else: - start_y = y - - # ------------------ - # 如果print-size大于image-size 则需要裁剪print - - if x + print_x > image_x: - rotate_image = rotate_image[:, :image_x - x] - rotate_mask = rotate_mask[:, :image_x - x] - - if y + print_y > image_y: - rotate_image = rotate_image[:image_y - y, :] - rotate_mask = rotate_mask[:image_y - y, :] - - # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = cv2.bitwise_xor(mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]], rotate_mask) - # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = cv2.add(print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]], rotate_image) - - # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = rotate_mask - # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = rotate_image - mask_background = self.stack_prin(mask_background, result['pattern_image'], rotate_mask, start_y, y, start_x, x) - print_background = self.stack_prin(print_background, result['pattern_image'], rotate_image, start_y, y, start_x, x) + new_size = (int(result['pattern_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['pattern_image'].shape[0] * single_print['print_scale_list'][i][1])) + mask = image.split()[3] + resized_source = image.resize(new_size) + resized_source_mask = mask.resize(new_size) + rotated_resized_source = resized_source.rotate(-single_print['print_angle_list'][i]) + rotated_resized_source_mask = resized_source_mask.rotate(-single_print['print_angle_list'][i]) + source_image_pil = Image.fromarray(cv2.cvtColor(print_background, cv2.COLOR_BGR2RGB)) + source_image_pil_mask = Image.fromarray(cv2.cvtColor(mask_background, cv2.COLOR_BGR2RGB)) + source_image_pil.paste(rotated_resized_source, (int(single_print['location'][i][0]), int(single_print['location'][i][1])), rotated_resized_source) + source_image_pil_mask.paste(rotated_resized_source_mask, (int(single_print['location'][i][0]), int(single_print['location'][i][1])), rotated_resized_source_mask) + print_background = cv2.cvtColor(np.array(source_image_pil), cv2.COLOR_RGBA2BGR) + mask_background = cv2.cvtColor(np.array(source_image_pil_mask), cv2.COLOR_RGBA2BGR) + ret, mask_background = cv2.threshold(mask_background, 124, 255, cv2.THRESH_BINARY) + # else: + # mask = self.get_mask_inv(image) + # mask = np.expand_dims(mask, axis=2) + # mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) + # mask = cv2.bitwise_not(mask) + # + # mask = cv2.resize(mask, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1]))) + # image = cv2.resize(image, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1]))) + # # 旋转后的坐标需要重新算 + # rotate_mask, _ = self.img_rotate(mask, single_print['print_angle_list'][i]) + # rotate_image, rotated_new_size = self.img_rotate(image, single_print['print_angle_list'][i]) + # # x, y = int(result['print']['location'][i][0] - rotated_new_size[0] - (rotate_mask.shape[0] - image.shape[0]) / 2), int(result['print']['location'][i][1] - rotated_new_size[1] - (rotate_mask.shape[1] - image.shape[1]) / 2) + # x, y = int(single_print['location'][i][0] - rotated_new_size[0]), int(single_print['location'][i][1] - rotated_new_size[1]) + # + # image_x = print_background.shape[1] # 底图宽 + # image_y = print_background.shape[0] # 底图高 + # print_x = rotate_image.shape[1] #印花宽 + # print_y = rotate_image.shape[0] #印花高 + # + # # 有bug + # # if x + print_x > image_x: + # # rotate_image = rotate_image[:, :x + print_x - image_x] + # # rotate_mask = rotate_mask[:, :x + print_x - image_x] + # # # + # # if y + print_y > image_y: + # # rotate_image = rotate_image[:y + print_y - image_y] + # # rotate_mask = rotate_mask[:y + print_y - image_y] + # + # # 不能是并行 + # # 当前第一轮的if (108以及115)是判断有没有过下界和右界。第二轮的是判断左上有没有超出。 如果这个样子的话,先裁了右边,再左移,region就会有问题 + # # 先挪 再判断 最后裁剪 + # + # # 如果print旋转了 或者 print贴边了 则需要判断 判断左界和上界是否小于0 + # if x <= 0: # 如果X轴偏移量小于0,说明印花需要被裁剪至合适大小 或当X轴偏移量大于印花宽度时,裁剪后的印花宽度为0 + # rotate_image = rotate_image[:, -x:] + # rotate_mask = rotate_mask[:, -x:] + # start_x = x = 0 + # else: + # start_x = x + # + # if y <= 0: + # rotate_image = rotate_image[-y:, :] + # rotate_mask = rotate_mask[-y:, :] + # start_y = y = 0 + # else: + # start_y = y + # + # # ------------------ + # # 如果print-size大于image-size 则需要裁剪print + # + # if x + print_x > image_x: + # rotate_image = rotate_image[:, :image_x - x] + # rotate_mask = rotate_mask[:, :image_x - x] + # + # if y + print_y > image_y: + # rotate_image = rotate_image[:image_y - y, :] + # rotate_mask = rotate_mask[:image_y - y, :] + # + # # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = cv2.bitwise_xor(mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]], rotate_mask) + # # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = cv2.add(print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]], rotate_image) + # + # # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = rotate_mask + # # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = rotate_image + # mask_background = self.stack_prin(mask_background, result['pattern_image'], rotate_mask, start_y, y, start_x, x) + # print_background = self.stack_prin(print_background, result['pattern_image'], rotate_image, start_y, y, start_x, x) # gray_image = cv2.cvtColor(mask_background, cv2.COLOR_BGR2GRAY) # print_background = cv2.bitwise_and(print_background, print_background, mask=gray_image) From d102b974a5cfb2452e3f63342c7a7c81fad8ed91 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Fri, 29 Aug 2025 17:44:14 +0800 Subject: [PATCH 095/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4?= =?UTF-8?q?=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20te?= =?UTF-8?q?st(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):=20java=20=E5=9B=9E?= =?UTF-8?q?=E8=B0=83=20api=E5=9C=B0=E5=9D=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/design_fast/design_generate.py | 1 + 1 file changed, 1 insertion(+) diff --git a/app/service/design_fast/design_generate.py b/app/service/design_fast/design_generate.py index 2f7fa93..4751ed2 100644 --- a/app/service/design_fast/design_generate.py +++ b/app/service/design_fast/design_generate.py @@ -200,6 +200,7 @@ def design_generate_v2(request_data): items_response['synthesis_url'] = synthesis_single(item_result['front_image'], item_result['back_image']) # 发送结果给java端 url = JAVA_STREAM_API_URL + logger.info(f"java 回调 -> {url}") headers = { 'Accept': "*/*", 'Accept-Encoding': "gzip, deflate, br", From c47dd977f376ed11d71e3b41a1c49ff2ff1c7732 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Fri, 29 Aug 2025 17:51:54 +0800 Subject: [PATCH 096/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4?= =?UTF-8?q?=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20te?= =?UTF-8?q?st(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):=20java=20=E5=9B=9E?= =?UTF-8?q?=E8=B0=83=20api=E5=9C=B0=E5=9D=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/design_fast/design_generate.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/app/service/design_fast/design_generate.py b/app/service/design_fast/design_generate.py index 4751ed2..6c698b2 100644 --- a/app/service/design_fast/design_generate.py +++ b/app/service/design_fast/design_generate.py @@ -200,7 +200,11 @@ def design_generate_v2(request_data): items_response['synthesis_url'] = synthesis_single(item_result['front_image'], item_result['back_image']) # 发送结果给java端 url = JAVA_STREAM_API_URL + xu_pei_test_url = "https://cd21b9110505.ngrok-free.app/api/third/party/receiveDesignResults" + logger.info(f"java 回调 -> {url}") + logger.info(f"xupei java 回调 -> {xu_pei_test_url}") + headers = { 'Accept': "*/*", 'Accept-Encoding': "gzip, deflate, br", @@ -214,6 +218,11 @@ def design_generate_v2(request_data): # 打印结果 logger.info(response.text) + response = post_request(xu_pei_test_url, json_data=items_response, headers=headers) + if response: + # 打印结果 + logger.info(f"xupei test response : {response.text}") + for step, object in enumerate(objects_data): t = threading.Thread(target=process_object, args=(step, object)) threads.append(t) From c6acf703740acb2e59a60dd3a7dc32b74e7f89f4 Mon Sep 17 00:00:00 2001 From: zchengrong <124802516+zchengrong@users.noreply.github.com> Date: Sat, 30 Aug 2025 09:16:57 +0800 Subject: [PATCH 097/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20=20sketch=E5=9B=BE=E5=B1=82=E6=8B=89=E4=BC=B8?= =?UTF-8?q?=E5=90=8E=20print=20=E6=AF=94=E4=BE=8B=E4=BD=8D=E7=BD=AE?= =?UTF-8?q?=E4=B8=8D=E6=AD=A3=E5=B8=B8=E9=97=AE=E9=A2=98=20docs=EF=BC=88?= =?UTF-8?q?=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4=EF=BC=89:=20refactor?= =?UTF-8?q?=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20test(=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95):?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../design_fast/pipeline/print_painting.py | 163 +++++++++--------- 1 file changed, 81 insertions(+), 82 deletions(-) diff --git a/app/service/design_fast/pipeline/print_painting.py b/app/service/design_fast/pipeline/print_painting.py index bfdc386..aa3f7b8 100644 --- a/app/service/design_fast/pipeline/print_painting.py +++ b/app/service/design_fast/pipeline/print_painting.py @@ -33,7 +33,6 @@ class PrintPainting: result['mask'] = cv2.resize(result['mask'], (new_width, new_height)) result['gray'] = cv2.resize(result['gray'], (new_width, new_height)) - print(1) if overall_print['print_path_list']: painting_dict = {'dim_image_h': result['pattern_image'].shape[0], 'dim_image_w': result['pattern_image'].shape[1]} result['print_image'] = result['pattern_image'] @@ -56,88 +55,88 @@ class PrintPainting: for i in range(len(single_print['print_path_list'])): image, image_mode = self.read_image(single_print['print_path_list'][i]) - if image_mode == "RGB": - image_rgba = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA) - image = Image.fromarray(image_rgba) + if image_mode == "RGBA": + # image_rgba = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA) + # image = Image.fromarray(image_rgba) - new_size = (int(result['pattern_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['pattern_image'].shape[0] * single_print['print_scale_list'][i][1])) - mask = image.split()[3] - resized_source = image.resize(new_size) - resized_source_mask = mask.resize(new_size) - rotated_resized_source = resized_source.rotate(-single_print['print_angle_list'][i]) - rotated_resized_source_mask = resized_source_mask.rotate(-single_print['print_angle_list'][i]) - source_image_pil = Image.fromarray(cv2.cvtColor(print_background, cv2.COLOR_BGR2RGB)) - source_image_pil_mask = Image.fromarray(cv2.cvtColor(mask_background, cv2.COLOR_BGR2RGB)) - source_image_pil.paste(rotated_resized_source, (int(single_print['location'][i][0]), int(single_print['location'][i][1])), rotated_resized_source) - source_image_pil_mask.paste(rotated_resized_source_mask, (int(single_print['location'][i][0]), int(single_print['location'][i][1])), rotated_resized_source_mask) - print_background = cv2.cvtColor(np.array(source_image_pil), cv2.COLOR_RGBA2BGR) - mask_background = cv2.cvtColor(np.array(source_image_pil_mask), cv2.COLOR_RGBA2BGR) - ret, mask_background = cv2.threshold(mask_background, 124, 255, cv2.THRESH_BINARY) - # else: - # mask = self.get_mask_inv(image) - # mask = np.expand_dims(mask, axis=2) - # mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) - # mask = cv2.bitwise_not(mask) - # - # mask = cv2.resize(mask, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1]))) - # image = cv2.resize(image, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1]))) - # # 旋转后的坐标需要重新算 - # rotate_mask, _ = self.img_rotate(mask, single_print['print_angle_list'][i]) - # rotate_image, rotated_new_size = self.img_rotate(image, single_print['print_angle_list'][i]) - # # x, y = int(result['print']['location'][i][0] - rotated_new_size[0] - (rotate_mask.shape[0] - image.shape[0]) / 2), int(result['print']['location'][i][1] - rotated_new_size[1] - (rotate_mask.shape[1] - image.shape[1]) / 2) - # x, y = int(single_print['location'][i][0] - rotated_new_size[0]), int(single_print['location'][i][1] - rotated_new_size[1]) - # - # image_x = print_background.shape[1] # 底图宽 - # image_y = print_background.shape[0] # 底图高 - # print_x = rotate_image.shape[1] #印花宽 - # print_y = rotate_image.shape[0] #印花高 - # - # # 有bug - # # if x + print_x > image_x: - # # rotate_image = rotate_image[:, :x + print_x - image_x] - # # rotate_mask = rotate_mask[:, :x + print_x - image_x] - # # # - # # if y + print_y > image_y: - # # rotate_image = rotate_image[:y + print_y - image_y] - # # rotate_mask = rotate_mask[:y + print_y - image_y] - # - # # 不能是并行 - # # 当前第一轮的if (108以及115)是判断有没有过下界和右界。第二轮的是判断左上有没有超出。 如果这个样子的话,先裁了右边,再左移,region就会有问题 - # # 先挪 再判断 最后裁剪 - # - # # 如果print旋转了 或者 print贴边了 则需要判断 判断左界和上界是否小于0 - # if x <= 0: # 如果X轴偏移量小于0,说明印花需要被裁剪至合适大小 或当X轴偏移量大于印花宽度时,裁剪后的印花宽度为0 - # rotate_image = rotate_image[:, -x:] - # rotate_mask = rotate_mask[:, -x:] - # start_x = x = 0 - # else: - # start_x = x - # - # if y <= 0: - # rotate_image = rotate_image[-y:, :] - # rotate_mask = rotate_mask[-y:, :] - # start_y = y = 0 - # else: - # start_y = y - # - # # ------------------ - # # 如果print-size大于image-size 则需要裁剪print - # - # if x + print_x > image_x: - # rotate_image = rotate_image[:, :image_x - x] - # rotate_mask = rotate_mask[:, :image_x - x] - # - # if y + print_y > image_y: - # rotate_image = rotate_image[:image_y - y, :] - # rotate_mask = rotate_mask[:image_y - y, :] - # - # # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = cv2.bitwise_xor(mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]], rotate_mask) - # # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = cv2.add(print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]], rotate_image) - # - # # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = rotate_mask - # # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = rotate_image - # mask_background = self.stack_prin(mask_background, result['pattern_image'], rotate_mask, start_y, y, start_x, x) - # print_background = self.stack_prin(print_background, result['pattern_image'], rotate_image, start_y, y, start_x, x) + new_size = (int(result['pattern_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['pattern_image'].shape[0] * single_print['print_scale_list'][i][1])) + mask = image.split()[3] + resized_source = image.resize(new_size) + resized_source_mask = mask.resize(new_size) + rotated_resized_source = resized_source.rotate(-single_print['print_angle_list'][i]) + rotated_resized_source_mask = resized_source_mask.rotate(-single_print['print_angle_list'][i]) + source_image_pil = Image.fromarray(cv2.cvtColor(print_background, cv2.COLOR_BGR2RGB)) + source_image_pil_mask = Image.fromarray(cv2.cvtColor(mask_background, cv2.COLOR_BGR2RGB)) + source_image_pil.paste(rotated_resized_source, (int(single_print['location'][i][0]), int(single_print['location'][i][1])), rotated_resized_source) + source_image_pil_mask.paste(rotated_resized_source_mask, (int(single_print['location'][i][0]), int(single_print['location'][i][1])), rotated_resized_source_mask) + print_background = cv2.cvtColor(np.array(source_image_pil), cv2.COLOR_RGBA2BGR) + mask_background = cv2.cvtColor(np.array(source_image_pil_mask), cv2.COLOR_RGBA2BGR) + ret, mask_background = cv2.threshold(mask_background, 124, 255, cv2.THRESH_BINARY) + else: + mask = self.get_mask_inv(image) + mask = np.expand_dims(mask, axis=2) + mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) + mask = cv2.bitwise_not(mask) + + mask = cv2.resize(mask, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1]))) + image = cv2.resize(image, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1]))) + # 旋转后的坐标需要重新算 + rotate_mask, _ = self.img_rotate(mask, single_print['print_angle_list'][i]) + rotate_image, rotated_new_size = self.img_rotate(image, single_print['print_angle_list'][i]) + # x, y = int(result['print']['location'][i][0] - rotated_new_size[0] - (rotate_mask.shape[0] - image.shape[0]) / 2), int(result['print']['location'][i][1] - rotated_new_size[1] - (rotate_mask.shape[1] - image.shape[1]) / 2) + x, y = int(single_print['location'][i][0] - rotated_new_size[0]), int(single_print['location'][i][1] - rotated_new_size[1]) + + image_x = print_background.shape[1] # 底图宽 + image_y = print_background.shape[0] # 底图高 + print_x = rotate_image.shape[1] #印花宽 + print_y = rotate_image.shape[0] #印花高 + + # 有bug + # if x + print_x > image_x: + # rotate_image = rotate_image[:, :x + print_x - image_x] + # rotate_mask = rotate_mask[:, :x + print_x - image_x] + # # + # if y + print_y > image_y: + # rotate_image = rotate_image[:y + print_y - image_y] + # rotate_mask = rotate_mask[:y + print_y - image_y] + + # 不能是并行 + # 当前第一轮的if (108以及115)是判断有没有过下界和右界。第二轮的是判断左上有没有超出。 如果这个样子的话,先裁了右边,再左移,region就会有问题 + # 先挪 再判断 最后裁剪 + + # 如果print旋转了 或者 print贴边了 则需要判断 判断左界和上界是否小于0 + if x <= 0: # 如果X轴偏移量小于0,说明印花需要被裁剪至合适大小 或当X轴偏移量大于印花宽度时,裁剪后的印花宽度为0 + rotate_image = rotate_image[:, abs(x):] + rotate_mask = rotate_mask[:, abs(x):] + start_x = x = 0 + else: + start_x = x + + if y >= 0: # 如果X轴偏移量大于0,说明印花需要被裁剪至合适大小 或当Y轴偏移量大于印花宽度时,裁剪后的印花宽度为0 + rotate_image = rotate_image[y:, :] + rotate_mask = rotate_mask[y:, :] + start_y = y = 0 + else: + start_y = y + + # ------------------ + # 如果print-size大于image-size 则需要裁剪print + + if x + print_x > image_x: + rotate_image = rotate_image[:, :image_x - x] + rotate_mask = rotate_mask[:, :image_x - x] + + if y + print_y > image_y: + rotate_image = rotate_image[:image_y - y, :] + rotate_mask = rotate_mask[:image_y - y, :] + + # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = cv2.bitwise_xor(mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]], rotate_mask) + # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = cv2.add(print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]], rotate_image) + + # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = rotate_mask + # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = rotate_image + mask_background = self.stack_prin(mask_background, result['pattern_image'], rotate_mask, start_y, y, start_x, x) + print_background = self.stack_prin(print_background, result['pattern_image'], rotate_image, start_y, y, start_x, x) # gray_image = cv2.cvtColor(mask_background, cv2.COLOR_BGR2GRAY) # print_background = cv2.bitwise_and(print_background, print_background, mask=gray_image) From d55f8a1c0aebfd3f92e87d9d9bf23accce98e878 Mon Sep 17 00:00:00 2001 From: zchen Date: Sat, 30 Aug 2025 10:46:23 +0800 Subject: [PATCH 098/101] 1 --- app/service/design_fast/pipeline/print_painting.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/app/service/design_fast/pipeline/print_painting.py b/app/service/design_fast/pipeline/print_painting.py index aa3f7b8..e4a9dc9 100644 --- a/app/service/design_fast/pipeline/print_painting.py +++ b/app/service/design_fast/pipeline/print_painting.py @@ -112,9 +112,9 @@ class PrintPainting: else: start_x = x - if y >= 0: # 如果X轴偏移量大于0,说明印花需要被裁剪至合适大小 或当Y轴偏移量大于印花宽度时,裁剪后的印花宽度为0 - rotate_image = rotate_image[y:, :] - rotate_mask = rotate_mask[y:, :] + if y <= 0: # 如果X轴偏移量大于0,说明印花需要被裁剪至合适大小 或当Y轴偏移量大于印花宽度时,裁剪后的印花宽度为0 + rotate_image = rotate_image[abs(y):, :] + rotate_mask = rotate_mask[abs(y):, :] start_y = y = 0 else: start_y = y From 8bc3639ae3ad4357370ffc6bbf3fb97db4cef31e Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Mon, 1 Sep 2025 12:42:04 +0800 Subject: [PATCH 099/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4?= =?UTF-8?q?=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20te?= =?UTF-8?q?st(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):=20=E5=8F=96=E6=B6=88d?= =?UTF-8?q?esign=20v2=20=E6=9C=AC=E5=9C=B0=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/service/design_fast/design_generate.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/app/service/design_fast/design_generate.py b/app/service/design_fast/design_generate.py index 6c698b2..7413997 100644 --- a/app/service/design_fast/design_generate.py +++ b/app/service/design_fast/design_generate.py @@ -200,10 +200,10 @@ def design_generate_v2(request_data): items_response['synthesis_url'] = synthesis_single(item_result['front_image'], item_result['back_image']) # 发送结果给java端 url = JAVA_STREAM_API_URL - xu_pei_test_url = "https://cd21b9110505.ngrok-free.app/api/third/party/receiveDesignResults" + # xu_pei_test_url = "https://cd21b9110505.ngrok-free.app/api/third/party/receiveDesignResults" logger.info(f"java 回调 -> {url}") - logger.info(f"xupei java 回调 -> {xu_pei_test_url}") + # logger.info(f"xupei java 回调 -> {xu_pei_test_url}") headers = { 'Accept': "*/*", @@ -218,10 +218,10 @@ def design_generate_v2(request_data): # 打印结果 logger.info(response.text) - response = post_request(xu_pei_test_url, json_data=items_response, headers=headers) - if response: + # response = post_request(xu_pei_test_url, json_data=items_response, headers=headers) + # if response: # 打印结果 - logger.info(f"xupei test response : {response.text}") + # logger.info(f"xupei test response : {response.text}") for step, object in enumerate(objects_data): t = threading.Thread(target=process_object, args=(step, object)) From 4a8a8d8d07d55ef86cba2da1e86163e5de66d0fe Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Tue, 2 Sep 2025 17:37:46 +0800 Subject: [PATCH 100/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4?= =?UTF-8?q?=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20te?= =?UTF-8?q?st(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):=20print=5Fpainting.py?= =?UTF-8?q?=20cv=E8=B4=B4=E5=9B=BE=E8=BD=ACpil=E8=B4=B4=E5=9B=BE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_route.py | 2 +- app/core/config.py | 2 +- .../design_fast/pipeline/print_painting.py | 162 +++++++++--------- 3 files changed, 83 insertions(+), 83 deletions(-) diff --git a/app/api/api_route.py b/app/api/api_route.py index eedb6fb..23885f9 100644 --- a/app/api/api_route.py +++ b/app/api/api_route.py @@ -31,7 +31,7 @@ router.include_router(api_image2sketch.router, tags=['api_image2sketch'], prefix router.include_router(api_brighten.router, tags=['api_brighten'], prefix="/api") router.include_router(api_query_image.router, tags=['api_query_image'], prefix="/api") router.include_router(api_brand_dna.router, tags=['api_brand_dna'], prefix="/api") -router.include_router(api_recommendation.router, tags=['api_recommendation'], prefix="/api") +# router.include_router(api_recommendation.router, tags=['api_recommendation'], prefix="/api") router.include_router(api_mannequins_edit.router, tags=['api_mannequins_edit'], prefix="/api") router.include_router(api_pose_transform.router, tags=['api_pose_transform'], prefix="/api") router.include_router(api_clothing_seg.router, tags=['api_clothing_seg'], prefix="/api") diff --git a/app/core/config.py b/app/core/config.py index 6ec49ed..aed5463 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -20,7 +20,7 @@ class Settings(BaseSettings): OSS = "minio" -DEBUG = False +DEBUG = True if DEBUG: LOGS_PATH = "logs/" CATEGORY_PATH = "service/attribute/config/descriptor/category/category_dis.csv" diff --git a/app/service/design_fast/pipeline/print_painting.py b/app/service/design_fast/pipeline/print_painting.py index e4a9dc9..2d054a8 100644 --- a/app/service/design_fast/pipeline/print_painting.py +++ b/app/service/design_fast/pipeline/print_painting.py @@ -55,88 +55,88 @@ class PrintPainting: for i in range(len(single_print['print_path_list'])): image, image_mode = self.read_image(single_print['print_path_list'][i]) - if image_mode == "RGBA": - # image_rgba = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA) - # image = Image.fromarray(image_rgba) + if image_mode == "RGB": + image_rgba = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA) + image = Image.fromarray(image_rgba) - new_size = (int(result['pattern_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['pattern_image'].shape[0] * single_print['print_scale_list'][i][1])) - mask = image.split()[3] - resized_source = image.resize(new_size) - resized_source_mask = mask.resize(new_size) - rotated_resized_source = resized_source.rotate(-single_print['print_angle_list'][i]) - rotated_resized_source_mask = resized_source_mask.rotate(-single_print['print_angle_list'][i]) - source_image_pil = Image.fromarray(cv2.cvtColor(print_background, cv2.COLOR_BGR2RGB)) - source_image_pil_mask = Image.fromarray(cv2.cvtColor(mask_background, cv2.COLOR_BGR2RGB)) - source_image_pil.paste(rotated_resized_source, (int(single_print['location'][i][0]), int(single_print['location'][i][1])), rotated_resized_source) - source_image_pil_mask.paste(rotated_resized_source_mask, (int(single_print['location'][i][0]), int(single_print['location'][i][1])), rotated_resized_source_mask) - print_background = cv2.cvtColor(np.array(source_image_pil), cv2.COLOR_RGBA2BGR) - mask_background = cv2.cvtColor(np.array(source_image_pil_mask), cv2.COLOR_RGBA2BGR) - ret, mask_background = cv2.threshold(mask_background, 124, 255, cv2.THRESH_BINARY) - else: - mask = self.get_mask_inv(image) - mask = np.expand_dims(mask, axis=2) - mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) - mask = cv2.bitwise_not(mask) - - mask = cv2.resize(mask, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1]))) - image = cv2.resize(image, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1]))) - # 旋转后的坐标需要重新算 - rotate_mask, _ = self.img_rotate(mask, single_print['print_angle_list'][i]) - rotate_image, rotated_new_size = self.img_rotate(image, single_print['print_angle_list'][i]) - # x, y = int(result['print']['location'][i][0] - rotated_new_size[0] - (rotate_mask.shape[0] - image.shape[0]) / 2), int(result['print']['location'][i][1] - rotated_new_size[1] - (rotate_mask.shape[1] - image.shape[1]) / 2) - x, y = int(single_print['location'][i][0] - rotated_new_size[0]), int(single_print['location'][i][1] - rotated_new_size[1]) - - image_x = print_background.shape[1] # 底图宽 - image_y = print_background.shape[0] # 底图高 - print_x = rotate_image.shape[1] #印花宽 - print_y = rotate_image.shape[0] #印花高 - - # 有bug - # if x + print_x > image_x: - # rotate_image = rotate_image[:, :x + print_x - image_x] - # rotate_mask = rotate_mask[:, :x + print_x - image_x] - # # - # if y + print_y > image_y: - # rotate_image = rotate_image[:y + print_y - image_y] - # rotate_mask = rotate_mask[:y + print_y - image_y] - - # 不能是并行 - # 当前第一轮的if (108以及115)是判断有没有过下界和右界。第二轮的是判断左上有没有超出。 如果这个样子的话,先裁了右边,再左移,region就会有问题 - # 先挪 再判断 最后裁剪 - - # 如果print旋转了 或者 print贴边了 则需要判断 判断左界和上界是否小于0 - if x <= 0: # 如果X轴偏移量小于0,说明印花需要被裁剪至合适大小 或当X轴偏移量大于印花宽度时,裁剪后的印花宽度为0 - rotate_image = rotate_image[:, abs(x):] - rotate_mask = rotate_mask[:, abs(x):] - start_x = x = 0 - else: - start_x = x - - if y <= 0: # 如果X轴偏移量大于0,说明印花需要被裁剪至合适大小 或当Y轴偏移量大于印花宽度时,裁剪后的印花宽度为0 - rotate_image = rotate_image[abs(y):, :] - rotate_mask = rotate_mask[abs(y):, :] - start_y = y = 0 - else: - start_y = y - - # ------------------ - # 如果print-size大于image-size 则需要裁剪print - - if x + print_x > image_x: - rotate_image = rotate_image[:, :image_x - x] - rotate_mask = rotate_mask[:, :image_x - x] - - if y + print_y > image_y: - rotate_image = rotate_image[:image_y - y, :] - rotate_mask = rotate_mask[:image_y - y, :] - - # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = cv2.bitwise_xor(mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]], rotate_mask) - # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = cv2.add(print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]], rotate_image) - - # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = rotate_mask - # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = rotate_image - mask_background = self.stack_prin(mask_background, result['pattern_image'], rotate_mask, start_y, y, start_x, x) - print_background = self.stack_prin(print_background, result['pattern_image'], rotate_image, start_y, y, start_x, x) + new_size = (int(result['pattern_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['pattern_image'].shape[0] * single_print['print_scale_list'][i][1])) + mask = image.split()[3] + resized_source = image.resize(new_size) + resized_source_mask = mask.resize(new_size) + rotated_resized_source = resized_source.rotate(-single_print['print_angle_list'][i]) + rotated_resized_source_mask = resized_source_mask.rotate(-single_print['print_angle_list'][i]) + source_image_pil = Image.fromarray(cv2.cvtColor(print_background, cv2.COLOR_BGR2RGB)) + source_image_pil_mask = Image.fromarray(cv2.cvtColor(mask_background, cv2.COLOR_BGR2RGB)) + source_image_pil.paste(rotated_resized_source, (int(single_print['location'][i][0]), int(single_print['location'][i][1])), rotated_resized_source) + source_image_pil_mask.paste(rotated_resized_source_mask, (int(single_print['location'][i][0]), int(single_print['location'][i][1])), rotated_resized_source_mask) + print_background = cv2.cvtColor(np.array(source_image_pil), cv2.COLOR_RGBA2BGR) + mask_background = cv2.cvtColor(np.array(source_image_pil_mask), cv2.COLOR_RGBA2BGR) + ret, mask_background = cv2.threshold(mask_background, 124, 255, cv2.THRESH_BINARY) + # else: + # mask = self.get_mask_inv(image) + # mask = np.expand_dims(mask, axis=2) + # mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) + # mask = cv2.bitwise_not(mask) + # + # mask = cv2.resize(mask, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1]))) + # image = cv2.resize(image, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1]))) + # # 旋转后的坐标需要重新算 + # rotate_mask, _ = self.img_rotate(mask, single_print['print_angle_list'][i]) + # rotate_image, rotated_new_size = self.img_rotate(image, single_print['print_angle_list'][i]) + # # x, y = int(result['print']['location'][i][0] - rotated_new_size[0] - (rotate_mask.shape[0] - image.shape[0]) / 2), int(result['print']['location'][i][1] - rotated_new_size[1] - (rotate_mask.shape[1] - image.shape[1]) / 2) + # x, y = int(single_print['location'][i][0] - rotated_new_size[0]), int(single_print['location'][i][1] - rotated_new_size[1]) + # + # image_x = print_background.shape[1] # 底图宽 + # image_y = print_background.shape[0] # 底图高 + # print_x = rotate_image.shape[1] #印花宽 + # print_y = rotate_image.shape[0] #印花高 + # + # # 有bug + # # if x + print_x > image_x: + # # rotate_image = rotate_image[:, :x + print_x - image_x] + # # rotate_mask = rotate_mask[:, :x + print_x - image_x] + # # # + # # if y + print_y > image_y: + # # rotate_image = rotate_image[:y + print_y - image_y] + # # rotate_mask = rotate_mask[:y + print_y - image_y] + # + # # 不能是并行 + # # 当前第一轮的if (108以及115)是判断有没有过下界和右界。第二轮的是判断左上有没有超出。 如果这个样子的话,先裁了右边,再左移,region就会有问题 + # # 先挪 再判断 最后裁剪 + # + # # 如果print旋转了 或者 print贴边了 则需要判断 判断左界和上界是否小于0 + # if x <= 0: # 如果X轴偏移量小于0,说明印花需要被裁剪至合适大小 或当X轴偏移量大于印花宽度时,裁剪后的印花宽度为0 + # rotate_image = rotate_image[:, abs(x):] + # rotate_mask = rotate_mask[:, abs(x):] + # start_x = x = 0 + # else: + # start_x = x + # + # if y <= 0: # 如果X轴偏移量大于0,说明印花需要被裁剪至合适大小 或当Y轴偏移量大于印花宽度时,裁剪后的印花宽度为0 + # rotate_image = rotate_image[abs(y):, :] + # rotate_mask = rotate_mask[abs(y):, :] + # start_y = y = 0 + # else: + # start_y = y + # + # # ------------------ + # # 如果print-size大于image-size 则需要裁剪print + # + # if x + print_x > image_x: + # rotate_image = rotate_image[:, :image_x - x] + # rotate_mask = rotate_mask[:, :image_x - x] + # + # if y + print_y > image_y: + # rotate_image = rotate_image[:image_y - y, :] + # rotate_mask = rotate_mask[:image_y - y, :] + # + # # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = cv2.bitwise_xor(mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]], rotate_mask) + # # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = cv2.add(print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]], rotate_image) + # + # # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = rotate_mask + # # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = rotate_image + # mask_background = self.stack_prin(mask_background, result['pattern_image'], rotate_mask, start_y, y, start_x, x) + # print_background = self.stack_prin(print_background, result['pattern_image'], rotate_image, start_y, y, start_x, x) # gray_image = cv2.cvtColor(mask_background, cv2.COLOR_BGR2GRAY) # print_background = cv2.bitwise_and(print_background, print_background, mask=gray_image) From 34cc4b436979d4566817418d6bdbdb145fbe697c Mon Sep 17 00:00:00 2001 From: zhouchengrong Date: Tue, 2 Sep 2025 17:43:53 +0800 Subject: [PATCH 101/101] =?UTF-8?q?feat=EF=BC=88=E6=96=B0=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=89:=20fix=EF=BC=88=E4=BF=AE=E5=A4=8Dbug?= =?UTF-8?q?=EF=BC=89:=20docs=EF=BC=88=E6=96=87=E6=A1=A3=E5=8F=98=E6=9B=B4?= =?UTF-8?q?=EF=BC=89:=20refactor=EF=BC=88=E9=87=8D=E6=9E=84=EF=BC=89:=20te?= =?UTF-8?q?st(=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95):=20print=5Fpainting.py?= =?UTF-8?q?=20cv=E8=B4=B4=E5=9B=BE=E8=BD=ACpil=E8=B4=B4=E5=9B=BE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/api_route.py | 2 +- app/core/config.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/app/api/api_route.py b/app/api/api_route.py index 23885f9..eedb6fb 100644 --- a/app/api/api_route.py +++ b/app/api/api_route.py @@ -31,7 +31,7 @@ router.include_router(api_image2sketch.router, tags=['api_image2sketch'], prefix router.include_router(api_brighten.router, tags=['api_brighten'], prefix="/api") router.include_router(api_query_image.router, tags=['api_query_image'], prefix="/api") router.include_router(api_brand_dna.router, tags=['api_brand_dna'], prefix="/api") -# router.include_router(api_recommendation.router, tags=['api_recommendation'], prefix="/api") +router.include_router(api_recommendation.router, tags=['api_recommendation'], prefix="/api") router.include_router(api_mannequins_edit.router, tags=['api_mannequins_edit'], prefix="/api") router.include_router(api_pose_transform.router, tags=['api_pose_transform'], prefix="/api") router.include_router(api_clothing_seg.router, tags=['api_clothing_seg'], prefix="/api") diff --git a/app/core/config.py b/app/core/config.py index aed5463..6ec49ed 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -20,7 +20,7 @@ class Settings(BaseSettings): OSS = "minio" -DEBUG = True +DEBUG = False if DEBUG: LOGS_PATH = "logs/" CATEGORY_PATH = "service/attribute/config/descriptor/category/category_dis.csv"