diff --git a/.gitignore b/.gitignore index b2eb511..800fc84 100644 --- a/.gitignore +++ b/.gitignore @@ -148,4 +148,6 @@ app/logs/* *.pickle *.csv *.avi -*.json \ No newline at end of file +*.json +*.env* +config.backup.py \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..d134880 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,22 @@ +FROM python:3.12-slim + +# Install uv. +COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/ + +# Copy the application into the container. +COPY . /app + +# Install the application dependencies. +WORKDIR /app +RUN mkdir /seg_cache +# 更新索引并安装替代包 +RUN apt-get update && apt-get install -y \ + vim \ + libgl1 \ + libglib2.0-0 \ + && rm -rf /var/lib/apt/lists/* + +RUN uv sync --frozen --no-cache + +# Run the application. +CMD ["/app/.venv/bin/fastapi", "run", "app/main.py", "--port", "80", "--host", "0.0.0.0"] \ No newline at end of file diff --git a/README.md b/README.md index 023ed87..6085b85 100644 --- a/README.md +++ b/README.md @@ -23,11 +23,11 @@ $ pip install mmcv==1.4.2 -f https://download.openmmlab.com/mmcv/dist/cu117/torch1.13/index.html -2. 启动服务器 +1. 启动服务器 $ uvicorn app.main:app --host 0.0.0.0 --port 8000 -3. 打开 http://127.0.0.1:8000/docs +2. 打开 http://127.0.0.1:8000/docs Docker 部署 --------------- diff --git a/app/api/api_attribute_retrieve.py b/app/api/api_attribute_retrieve.py index 2a5ad4d..aee5731 100644 --- a/app/api/api_attribute_retrieve.py +++ b/app/api/api_attribute_retrieve.py @@ -2,8 +2,7 @@ import json import logging from fastapi import APIRouter, HTTPException - -from app.core.config import DEBUG +from app.core.config import settings from app.schemas.attribute_retrieve import * from app.schemas.response_template import ResponseModel from app.service.attribute.config import const, local_debug_const @@ -35,13 +34,13 @@ def attribute_recognition(request_item: list[AttributeRecognitionModel]): """ try: for item in request_item: - logger.debug(f"attribute_recognition request item is : @@@@@@:{json.dumps(item.dict())}") - if DEBUG: + logger.info(f"attribute_recognition request item is : @@@@@@:{json.dumps(item.dict(), indent=4)}") + if settings.DEBUG: service = AttributeRecognition(const=local_debug_const, request_data=request_item) else: service = AttributeRecognition(const=const, request_data=request_item) data = service.get_result() - logger.debug(f"attribute_recognition response @@@@@@:{json.dumps(data)}") + logger.info(f"attribute_recognition response @@@@@@:{json.dumps(data, indent=4)}") except Exception as e: logger.warning(f"attribute_recognition Run Exception @@@@@@:{e}") raise HTTPException(status_code=404, detail=str(e)) @@ -67,10 +66,10 @@ def category_recognition(request_item: list[CategoryRecognitionModel]): """ try: for item in request_item: - logger.info(f"category_recognition request item is : @@@@@@:{json.dumps(item.dict())}") + logger.info(f"category_recognition request item is : @@@@@@:{json.dumps(item.dict(), indent=4)}") service = CategoryRecognition(request_data=request_item) data = service.get_result() - logger.info(f"category_recognition response @@@@@@:{json.dumps(data)}") + logger.info(f"category_recognition response @@@@@@:{json.dumps(data, indent=4)}") except Exception as e: logger.warning(f"category_recognition Run Exception @@@@@@:{e}") raise HTTPException(status_code=404, detail=str(e)) diff --git a/app/api/api_brand_dna.py b/app/api/api_brand_dna.py index 2133ee9..c1dd432 100644 --- a/app/api/api_brand_dna.py +++ b/app/api/api_brand_dna.py @@ -26,7 +26,7 @@ def seg_product(request_item: BrandDnaModel): } """ try: - logger.info(f"brand dna request item is : @@@@@@:{json.dumps(request_item.dict())}") + logger.info(f"brand dna request item is : @@@@@@:{json.dumps(request_item.dict(),indent=4)}") service = BrandDna(request_item) result_url = service.get_result() except Exception as e: @@ -36,7 +36,7 @@ def seg_product(request_item: BrandDnaModel): @router.post("/GenerateBrand") -def GenerateBrand(request_data: GenerateBrandModel): +def generate_brand(request_data: GenerateBrandModel): """ 通过prompt 生成 brand name ,brand slogan , brand logo。 创建一个具有以下参数的请求体: diff --git a/app/api/api_brand_dna_initialize.py b/app/api/api_brand_dna_initialize.py index 72c0a25..9388bdd 100644 --- a/app/api/api_brand_dna_initialize.py +++ b/app/api/api_brand_dna_initialize.py @@ -9,7 +9,6 @@ from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.triggers.cron import CronTrigger from fastapi import HTTPException, APIRouter -from app.service.recommend.service import load_resources, matrix_data import pymysql from app.core.config import DB_CONFIG, TABLE_CATEGORIES, RECOMMEND_PATH_PREFIX from minio import Minio diff --git a/app/api/api_brighten.py b/app/api/api_brighten.py index cc5a03f..1cfeac2 100644 --- a/app/api/api_brighten.py +++ b/app/api/api_brighten.py @@ -5,10 +5,11 @@ import time from PIL import ImageEnhance from fastapi import APIRouter, HTTPException - +from minio import Minio +from app.core.config import settings from app.schemas.brighten import BrightenModel from app.schemas.response_template import ResponseModel -from app.service.utils.oss_client import oss_get_image, oss_upload_image +from app.service.utils.new_oss_client import oss_get_image, oss_upload_image router = APIRouter() logger = logging.getLogger() @@ -20,6 +21,9 @@ def increase_brightness(img, factor): return bright_img +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) + + @router.post("/brighten") async def brighten(request_item: BrightenModel): """ @@ -35,14 +39,14 @@ async def brighten(request_item: BrightenModel): """ try: start_time = time.time() - logger.info(f"brighten request item is : @@@@@@:{json.dumps(request_item.dict())}") - image = oss_get_image(bucket=request_item.image_url.split('/')[0], object_name=request_item.image_url[request_item.image_url.find('/') + 1:], data_type="PIL") + logger.info(f"brighten request item is : @@@@@@:{json.dumps(request_item.dict(),indent=4)}") + image = oss_get_image(oss_client=minio_client, bucket=request_item.image_url.split('/')[0], object_name=request_item.image_url[request_item.image_url.find('/') + 1:], data_type="PIL") new_image = increase_brightness(image, request_item.brighten_value) image_data = io.BytesIO() new_image.save(image_data, format='PNG') image_data.seek(0) image_bytes = image_data.read() - req = oss_upload_image(bucket=request_item.image_url.split('/')[0], object_name=request_item.image_url[request_item.image_url.find('/') + 1:], image_bytes=image_bytes) + req = oss_upload_image(oss_client=minio_client, bucket=request_item.image_url.split('/')[0], object_name=request_item.image_url[request_item.image_url.find('/') + 1:], image_bytes=image_bytes) brighten_url = f"{req.bucket_name}/{req.object_name}" logger.info(f"run time is : {time.time() - start_time}") except Exception as e: diff --git a/app/api/api_chat_robot.py b/app/api/api_chat_robot.py index c8bcf32..a05a1c2 100644 --- a/app/api/api_chat_robot.py +++ b/app/api/api_chat_robot.py @@ -30,9 +30,9 @@ def chat_robot(request_data: ChatRobotModel): } """ try: - logger.info(f"chat_robot request item is : @@@@@@:{json.dumps(request_data.dict())}") + logger.info(f"chat_robot request item is : @@@@@@:{json.dumps(request_data.dict(),indent=4)}") data = chat(post_data=request_data) - logger.info(f"chat_robot response @@@@@@:{json.dumps(data)}") + logger.info(f"chat_robot response @@@@@@:{json.dumps(data, indent=4)}") except Exception as e: logger.warning(f"chat_robot Run Exception @@@@@@:{e}") raise HTTPException(status_code=404, detail=str(e)) diff --git a/app/api/api_clothing_seg.py b/app/api/api_clothing_seg.py index e09b882..67fc782 100644 --- a/app/api/api_clothing_seg.py +++ b/app/api/api_clothing_seg.py @@ -42,7 +42,7 @@ def clothing_seg(request_item: ClothingSegModel): } """ try: - logger.info(f"clothing_seg request item is : @@@@@@:{json.dumps(request_item.dict())}") + logger.info(f"clothing_seg request item is : @@@@@@:{json.dumps(request_item.dict(),indent=4)}") server = ClothingSeg(request_item) result_url = server.get_result() except Exception as e: diff --git a/app/api/api_design.py b/app/api/api_design.py index 03e0b25..555819f 100644 --- a/app/api/api_design.py +++ b/app/api/api_design.py @@ -1,64 +1,77 @@ import json import logging -import os -from fastapi import APIRouter, HTTPException, UploadFile, File, Form, BackgroundTasks +import requests +from fastapi import APIRouter, HTTPException, BackgroundTasks -from app.schemas.design import DesignModel, DesignProgressModel, ModelProgressModel, DBGConfigModel, DesignStreamModel +from app.core.config import settings +from app.schemas.design import DesignModel, ModelProgressModel, DesignStreamModel, SAMRequestModel from app.schemas.response_template import ResponseModel -from app.service.design.model_process_service import model_transpose -from app.service.design_batch.service import start_design_batch_generate from app.service.design_fast.design_generate import design_generate, design_generate_v2 -from app.service.design_fast.utils.redis_utils import Redis +from app.service.design_fast.model_process_service import model_transpose router = APIRouter() logger = logging.getLogger() @router.post("/design") -def design(request_data: DesignModel, background_tasks: BackgroundTasks): +def design(request_data: DesignModel): """ - objects.items.transparent: - "transparent":{ - "mask_url":"test/transparent_test/transparent_mask.png", - "scale":0.1 - }, - mask_url 为空"" -> 单件衣服透明 - mask_url 非空"mask_url" -> 区域透明 + - **objects.items.transparent**: + ```json + "transparent":{ + "mask_url":"test/transparent_test/transparent_mask.png", + "scale":0.1 + }, + ``` + - **mask_url** 为空"" -> 单件衣服透明 + - **mask_url** 非空"mask_url" -> 区域透明 + - **transpose** 镜像模式 ,:"top_bottom"或"left_right" + - **rotate** 45, - 创建一个具有以下参数的请求体: - 示例参数: + - ** design 参数变更: + design detail 请求参数中 basic -> preview_submit 替换为design_type 可选参数 default ,merge (移除preview和submit) + design_type 参数说明: + defuault模式下 请求参数不变 + merge模式下 items -> 每个item需要新增 merge_image_path , merge_image_path为前端处理 print color等操作后的单件结果图 + + ** + + - 创建一个具有以下参数的请求体: + 示例参数: + ```json { "objects": [ { "basic": { "body_point_test": { "waistband_right": [ - 200, - 241 + 203, + 249 ], "hand_point_right": [ - 223, - 297 + 229, + 343 ], "waistband_left": [ - 112, - 241 + 119, + 248 ], "hand_point_left": [ - 92, - 305 + 97, + 343 ], "shoulder_left": [ - 99, - 116 + 108, + 107 ], "shoulder_right": [ - 215, - 116 + 212, + 107 ] }, "layer_order": true, + "design_type": "preview", "scale_bag": 0.7, "scale_earrings": 0.16, "self_template": true, @@ -67,14 +80,19 @@ def design(request_data: DesignModel, background_tasks: BackgroundTasks): }, "items": [ { - "businessId": 270372, - "color": "30 28 28", - "image_id": 69780, + "businessId": 2115382, + "color": "", + "image_id": 61686, "offset": [ 0, 0 ], - "path": "aida-sys-image/images/female/trousers/0825000630.jpg", + "path": "aida-sys-image/images/female/dress/0628000564.jpg", + "transpose": [ + 1, + 1 + ], + "rotate": 45, "print": { "element": { "element_angle_list": [], @@ -83,10 +101,30 @@ def design(request_data: DesignModel, background_tasks: BackgroundTasks): "location": [] }, "overall": { - "location": [], - "print_angle_list": [], - "print_path_list": [], - "print_scale_list": [] + "location": [ + [ + 53.0, + 118.5 + ] + ], + "print_angle_list": [ + 0.0 + ], + "print_path_list": [ + "aida-users/89/print/02d57aa8-f342-4e1d-b02c-b278f94dcfe6-3-89.png" + ], + "print_scale_list": [ + [ + 0.5, + 0.5 + ] + ], + "gap": [ + [ + 10, + 10 + ] + ] }, "single": { "location": [], @@ -100,104 +138,30 @@ def design(request_data: DesignModel, background_tasks: BackgroundTasks): 1.0, 1.0 ], - "type": "Trousers" + "seg_mask_url": "aida-clothing/mask/mask_9698b428-eb93-11f0-9327-0242c0a80003.png", + "type": "Dress" }, { - "businessId": 270373, - "color": "30 28 28", - "image_id": 98243, - "offset": [ - 0, - 0 - ], - "path": "aida-sys-image/images/female/blouse/0902003811.jpg", - "print": { - "element": { - "element_angle_list": [], - "element_path_list": [], - "element_scale_list": [], - "location": [] - }, - "overall": { - "location": [], - "print_angle_list": [], - "print_path_list": [], - "print_scale_list": [] - }, - "single": { - "location": [], - "print_angle_list": [], - "print_path_list": [], - "print_scale_list": [] - } - }, - "priority": 11, - "resize_scale": [ - 1.0, - 1.0 - ], - "type": "Blouse" - }, - { - "businessId": 270374, - "color": "172 68 68", - "image_id": 98244, - "offset": [ - 0, - 0 - ], - "path": "aida-sys-image/images/female/outwear/0825000410.jpg", - "print": { - "element": { - "element_angle_list": [], - "element_path_list": [], - "element_scale_list": [], - "location": [] - }, - "overall": { - "location": [], - "print_angle_list": [], - "print_path_list": [], - "print_scale_list": [] - }, - "single": { - "location": [], - "print_angle_list": [], - "print_path_list": [], - "print_scale_list": [] - } - }, - "priority": 12, - "resize_scale": [ - 1.0, - 1.0 - ], - "transparent":{ - "mask_url":"test/transparent_test/transparent_mask.png", - "scale":0.1 - }, - "type": "Outwear" - }, - { - "body_path": "aida-sys-image/models/female/5bdfe7ca-64eb-44e4-b03d-8e517520c795.png", - "image_id": 96090, + "body_path": "aida-sys-image/models/female/2e4815b9-1191-419d-94ed-5771239ca4a5.png", + "image_id": 67277, "type": "Body" } ] } ], - "process_id": "83" + "process_id": "89" } - """ - # logger.info(f"design request item is : @@@@@@:{json.dumps(request_data.dict())}") + ``` + """ + # logger.info(f"design request item is : @@@@@@:{json.dumps(request_data.dict(),indent=4)}") # data = generate(request_data=request_data) - # logger.info(f"design response @@@@@@:{json.dumps(data)}") + # logger.info(f"design response @@@@@@:{json.dumps(data, indent=4)}") # try: - logger.info(f"design request item is : @@@@@@:{json.dumps(request_data.dict())}") + logger.info(f"design request item is : @@@@@@:{json.dumps(request_data.dict(), indent=4)}") data = design_generate(request_data=request_data) - logger.info(f"design response @@@@@@:{json.dumps(data)}") + logger.info(f"design response @@@@@@:{json.dumps(data, indent=4)}") except Exception as e: logger.warning(f"design Run Exception @@@@@@:{e}") raise HTTPException(status_code=404, detail=str(e)) @@ -215,47 +179,48 @@ async def design_v2(request_data: DesignStreamModel, background_tasks: Backgroun "basic": { "body_point_test": { "waistband_right": [ - 200, - 241 + 203, + 249 ], "hand_point_right": [ - 223, - 297 + 229, + 343 ], "waistband_left": [ - 112, - 241 + 119, + 248 ], "hand_point_left": [ - 92, - 305 + 97, + 343 ], "shoulder_left": [ - 99, - 116 + 108, + 107 ], + "relation_type": "System", "shoulder_right": [ - 215, - 116 - ] + 212, + 107 + ], + "relation_id": 1020356 }, - "layer_order": true, + "layer_order": false, "scale_bag": 0.7, "scale_earrings": 0.16, - "self_template": true, + "self_template": false, "single_overall": "overall", "switch_category": "" }, "items": [ { - "businessId": 270372, - "color": "30 28 28", - "image_id": 69780, + "color": "209 196 171", + "image_id": 84093, "offset": [ - 0, - 0 + 1, + 1 ], - "path": "aida-sys-image/images/female/trousers/0825000630.jpg", + "path": "aida-users/89/sketchboard/female/Outwear/0943d209-7ce0-408c-bc61-83f15da94138.png", "print": { "element": { "element_angle_list": [], @@ -264,10 +229,23 @@ async def design_v2(request_data: DesignStreamModel, background_tasks: Backgroun "location": [] }, "overall": { - "location": [], - "print_angle_list": [], + "location": [ + [ + 0.0, + 0.0 + ] + ], + "print_angle_list": [ + 0.0, + 0.0 + ], "print_path_list": [], - "print_scale_list": [] + "print_scale_list": [ + [ + 0.0, + 0.0 + ] + ] }, "single": { "location": [], @@ -276,22 +254,20 @@ async def design_v2(request_data: DesignStreamModel, background_tasks: Backgroun "print_scale_list": [] } }, - "priority": 10, "resize_scale": [ 1.0, 1.0 ], - "type": "Trousers" + "type": "Outwear" }, { - "businessId": 270373, - "color": "30 28 28", - "image_id": 98243, + "color": "63 71 73", + "image_id": 100496, "offset": [ - 0, - 0 + 1, + 1 ], - "path": "aida-sys-image/images/female/blouse/0902003811.jpg", + "path": "aida-sys-image/images/female/blouse/0628001684.jpg", "print": { "element": { "element_angle_list": [], @@ -300,10 +276,23 @@ async def design_v2(request_data: DesignStreamModel, background_tasks: Backgroun "location": [] }, "overall": { - "location": [], - "print_angle_list": [], + "location": [ + [ + 0.0, + 0.0 + ] + ], + "print_angle_list": [ + 0.0, + 0.0 + ], "print_path_list": [], - "print_scale_list": [] + "print_scale_list": [ + [ + 0.0, + 0.0 + ] + ] }, "single": { "location": [], @@ -312,7 +301,6 @@ async def design_v2(request_data: DesignStreamModel, background_tasks: Backgroun "print_scale_list": [] } }, - "priority": 11, "resize_scale": [ 1.0, 1.0 @@ -320,14 +308,14 @@ async def design_v2(request_data: DesignStreamModel, background_tasks: Backgroun "type": "Blouse" }, { - "businessId": 270374, - "color": "172 68 68", - "image_id": 98244, + "color": "111 78 63", + "gradient": "aida-gradient/f69b98e8-4248-4f7a-98a2-21bac41bf3e0.png", + "image_id": 92193, "offset": [ - 0, - 0 + 1, + 1 ], - "path": "aida-sys-image/images/female/outwear/0825000410.jpg", + "path": "aida-sys-image/images/female/trousers/0825001160.jpg", "print": { "element": { "element_angle_list": [], @@ -336,10 +324,23 @@ async def design_v2(request_data: DesignStreamModel, background_tasks: Backgroun "location": [] }, "overall": { - "location": [], - "print_angle_list": [], + "location": [ + [ + 0.0, + 0.0 + ] + ], + "print_angle_list": [ + 0.0, + 0.0 + ], "print_path_list": [], - "print_scale_list": [] + "print_scale_list": [ + [ + 0.0, + 0.0 + ] + ] }, "single": { "location": [], @@ -348,31 +349,37 @@ async def design_v2(request_data: DesignStreamModel, background_tasks: Backgroun "print_scale_list": [] } }, - "priority": 12, "resize_scale": [ 1.0, 1.0 ], - "transparent":{ - "mask_url":"test/transparent_test/transparent_mask.png", - "scale":0.1 - }, - "type": "Outwear" + "type": "Trousers" }, { - "body_path": "aida-sys-image/models/female/5bdfe7ca-64eb-44e4-b03d-8e517520c795.png", - "image_id": 96090, + "body_path": "aida-sys-image/models/female/2e4815b9-1191-419d-94ed-5771239ca4a5.png", + "image_id": 67277, + "offset": [ + 1, + 1 + ], + "resize_scale": [ + 1.0, + 1.0 + ], "type": "Body" } - ] + ], + "objectSign": "65830966" } ], - "process_id": "83" + "process_id": "4802946666428422", + "requestId": "1d1e7641-0d62-4da2-adc0-b4404910723c", + "callback_url": "https://api.aida.com.hk/api/third/party/receiveDesignResults" } """ try: # 异步 - logger.info(f"generate_image request item is : @@@@@@:{json.dumps(request_data.dict())}") + logger.info(f"generate_image request item is : @@@@@@:{json.dumps(request_data.dict(), indent=4)}") background_tasks.add_task(design_generate_v2, request_data) except Exception as e: logger.warning(f"design Run Exception @@@@@@:{e}") @@ -380,30 +387,76 @@ async def design_v2(request_data: DesignStreamModel, background_tasks: Backgroun return ResponseModel() -@router.post('/get_progress') -def get_progress(request_data: DesignProgressModel): +@router.post("/seg_anything") +async def seg_anything(request_data: SAMRequestModel): """ - 获取design 进度 - 创建一个具有以下参数的请求体: - - **process_id**: 进度id + **Segment Anything 交互式分割接口** - 示例参数: - { - "process_id": "6878547032381675" - } + 通过传入图片路径和点击的点坐标,返回分割后的掩码数据。 + + ### 参数说明: + - **user_id**:用户id 用于存储分割图 + - **image_path**: 图片在服务器或云端的相对路径。 + - **type**: 推理类型 + - **box**: 框选矩形点位信息 + - **points**: 交互点的坐标列表。每个点为 [x, y] 像素格式。 + - **labels**: 坐标点的属性标签,必须与 points 长度一致: + - 1: **前景点** (代表想要分割出的区域) + - 0: **背景点** (代表想要排除的区域) + + ### 请求体示例: + ```json + point + { + "user_id": 1, + "image_path": "aida-users/89/sketch/4e8fe37d-7068-400a-ac94-c01647fa5f6f.png", + "type":"point", + "points": [[310, 403], [493, 375], [261, 266], [404, 484]], + "labels": [1, 1, 0, 1] + } + + box + { + "user_id": 1, + "image_path": "aida-users/89/sketch/4e8fe37d-7068-400a-ac94-c01647fa5f6f.png", + "type":"box", + "box": [350, 286, 544, 520] + } + ``` """ try: - logger.info(f"get_progress request item is : @@@@@@:{json.dumps(request_data.dict())}") - process_id = request_data.process_id - r = Redis() - data = r.read(key=process_id) - if data is None: - raise ValueError(f"No progress ID: {process_id}") - logging.info(f"get_progress process_id @@@@@@ : {process_id} , progress : {json.dumps(data)}") + logger.info(f"seg_anything request item is : @@@@@@:{json.dumps(request_data.dict(), indent=4)}") + data = requests.post(f"http://{settings.A6000_SERVICE_HOST}:10075/predict", json=request_data.dict()) + logger.info(f"seg_anything response @@@@@@:{json.dumps(json.loads(data.content), indent=4)}") + return ResponseModel(data=json.loads(data.content)) except Exception as e: - logger.warning(f"get_progress Run Exception @@@@@@:{e}") - raise HTTPException(status_code=404, detail=str(e)) - return ResponseModel(data=data) + logger.warning(f"seg_anything Run Exception @@@@@@:{e}") + + +# @router.post('/get_progress') +# def get_progress(request_data: DesignProgressModel): +# """ +# 获取design 进度 +# 创建一个具有以下参数的请求体: +# - **process_id**: 进度id +# +# 示例参数: +# { +# "process_id": "6878547032381675" +# } +# """ +# try: +# logger.info(f"get_progress request item is : @@@@@@:{json.dumps(request_data.dict(), indent=4)}") +# process_id = request_data.process_id +# r = Redis() +# data = r.read(key=process_id) +# if data is None: +# raise ValueError(f"No progress ID: {process_id}") +# logging.info(f"get_progress process_id @@@@@@ : {process_id} , progress : {json.dumps(data, indent=4)}") +# except Exception as e: +# logger.warning(f"get_progress Run Exception @@@@@@:{e}") +# raise HTTPException(status_code=404, detail=str(e)) +# return ResponseModel(data=data) @router.post('/model_process') @@ -419,44 +472,42 @@ def model_process(request_data: ModelProgressModel): } """ try: - logger.info(f"model_process request item is : @@@@@@:{json.dumps(request_data.dict())}") + logger.info(f"model_process request item is : @@@@@@:{json.dumps(request_data.dict(), indent=4)}") data = model_transpose(image_path=request_data.model_path) - logger.info(f"model_process response @@@@@@:{json.dumps(data)}") + logger.info(f"model_process response @@@@@@:{json.dumps(data, indent=4)}") except Exception as e: logger.warning(f"model_process Run Exception @@@@@@:{e}") raise HTTPException(status_code=404, detail=str(e)) return ResponseModel(data=data) -# ############################################################## - - -@router.post("/design_batch_generate") -async def design_batch(file: UploadFile = File(...), - tasks_id: str = Form(...), - user_id: str = Form(...), - file_name: str = Form(...), - total: int = Form(...) - ): - dbg_config = DBGConfigModel( - tasks_id=tasks_id, - user_id=user_id, - file_name=file_name, - total=total - ) - contents = await file.read() - file_name = file.filename - await save_request_file(contents, file_name) - return await start_design_batch_generate(dbg_config, contents) - - -async def save_request_file(contents, file_name): - # 创建保存文件的目录(如果不存在) - save_dir = os.path.join(os.getcwd(), "service/design_batch", "request_data") - if not os.path.exists(save_dir): - os.makedirs(save_dir) - # 处理文件 - file_path = os.path.join(save_dir, file_name) - with open(file_path, "wb") as f: - f.write(contents) +"""design 批量处理 停用""" +# @router.post("/design_batch_generate") +# async def design_batch(file: UploadFile = File(...), +# tasks_id: str = Form(...), +# user_id: str = Form(...), +# file_name: str = Form(...), +# total: int = Form(...) +# ): +# dbg_config = DBGConfigModel( +# tasks_id=tasks_id, +# user_id=user_id, +# file_name=file_name, +# total=total +# ) +# contents = await file.read() +# file_name = file.filename +# await save_request_file(contents, file_name) +# return await start_design_batch_generate(dbg_config, contents) +# +# +# async def save_request_file(contents, file_name): +# # 创建保存文件的目录(如果不存在) +# save_dir = os.path.join(os.getcwd(), "service/design_batch", "request_data") +# if not os.path.exists(save_dir): +# os.makedirs(save_dir) +# # 处理文件 +# file_path = os.path.join(save_dir, file_name) +# with open(file_path, "wb") as f: +# f.write(contents) diff --git a/app/api/api_design_pre_processing.py b/app/api/api_design_pre_processing.py index f260e22..46f9e5e 100644 --- a/app/api/api_design_pre_processing.py +++ b/app/api/api_design_pre_processing.py @@ -30,10 +30,10 @@ def design_pre_processing(request_data: DesignPreProcessingModel): } """ try: - logger.info(f"design_pre_processing request item is : @@@@@@:{json.dumps(request_data.dict())}") + logger.info(f"design_pre_processing request item is : @@@@@@:{json.dumps(request_data.dict(), indent=4)}") server = DesignPreprocessing() data = server.pipeline(image_list=request_data.sketches) - logger.info(f"design response @@@@@@:{json.dumps(data)}") + logger.info(f"design response @@@@@@:{json.dumps(data, indent=4)}") except Exception as e: logger.warning(f"design Run Exception @@@@@@:{e}") raise HTTPException(status_code=404, detail=str(e)) diff --git a/app/api/api_generate_image.py b/app/api/api_generate_image.py index 5bd5404..14271c6 100644 --- a/app/api/api_generate_image.py +++ b/app/api/api_generate_image.py @@ -33,18 +33,30 @@ def generate_image(request_item: GenerateImageModel, background_tasks: Backgroun - **version**: 使用模型版本 fast 或者 high 示例参数: + 1. txt 2 img { - "tasks_id": "123-89", - "prompt": "skeleton sitting by the side of a river looking soulful, concert poster, 4k, artistic", - "image_url": "aida-collection-element/87/Printboard/842c09cf-7297-42d9-9e6e-9c17d4a13cb5.jpg", - "mode": "img2img", - "category": "sketch", - "gender": "male", - "version": "fast" + "tasks_id": "bd2cf809-24bc-49a6-91c9-193c6272a52e-2-89", + "prompt": "a single item of sketch of dress, 4k, white background", + "image_url": "", + "mode": "txt2img", + "category": "sketch", + "gender": "Female", + "version": "fast" } + 2. img 2 img + { + "tasks_id": "b861d4fa-5ae3-4a30-9c7a-7ba6bb9aa37b-1-89", + "prompt": "a single item of sketch of dress, 4k, white background", + "image_url": "aida-collection-element/89/Sketchboard/548da3a2-834f-49a7-b52c-e729c5ab5062.png", + "mode": "img2img", + "category": "sketch", + "gender": "Female", + "version": "fast" + } + """ try: - logger.info(f"generate_image request item is : @@@@@@:{json.dumps(request_item.dict())}") + logger.info(f"generate_image request item is : @@@@@@:{json.dumps(request_item.dict(), indent=4)}") service = GenerateImage(request_item) background_tasks.add_task(service.get_result) except Exception as e: @@ -65,42 +77,41 @@ def generate_image(tasks_id: str): return ResponseModel(data=data['data']) -'''multi view''' +'''multi view 停用''' + +# @router.post("/generate_multi_view") +# def generate_multi_view(request_item: GenerateMultiViewModel, background_tasks: BackgroundTasks): +# """ +# 创建一个具有以下参数的请求体: +# - **tasks_id**: 任务id 用于取消生成任务和获取生成结果 +# - **image_url**: 前视角图的输入,minio或S3 url 地址 +# +# 示例参数: +# { +# "tasks_id": "123-89", +# "image_url": "aida-collection-element/87/Printboard/842c09cf-7297-42d9-9e6e-9c17d4a13cb5.jpg" +# } +# """ +# try: +# logger.info(f"generate_multi_view request item is : @@@@@@:{json.dumps(request_item.dict(),indent=4)}") +# service = GenerateMultiView(request_item) +# background_tasks.add_task(service.get_result) +# except Exception as e: +# logger.warning(f"generate_multi_view Run Exception @@@@@@:{e}") +# raise HTTPException(status_code=404, detail=str(e)) +# return ResponseModel() -@router.post("/generate_multi_view") -def generate_multi_view(request_item: GenerateMultiViewModel, background_tasks: BackgroundTasks): - """ - 创建一个具有以下参数的请求体: - - **tasks_id**: 任务id 用于取消生成任务和获取生成结果 - - **image_url**: 前视角图的输入,minio或S3 url 地址 - - 示例参数: - { - "tasks_id": "123-89", - "image_url": "aida-collection-element/87/Printboard/842c09cf-7297-42d9-9e6e-9c17d4a13cb5.jpg" - } - """ - try: - logger.info(f"generate_multi_view request item is : @@@@@@:{json.dumps(request_item.dict())}") - service = GenerateMultiView(request_item) - background_tasks.add_task(service.get_result) - except Exception as e: - logger.warning(f"generate_multi_view Run Exception @@@@@@:{e}") - raise HTTPException(status_code=404, detail=str(e)) - return ResponseModel() - - -@router.get("/generate_multi_view_cancel/{tasks_id}") -def generate_multi_view(tasks_id: str): - try: - logger.info(f"generate_cancel request item is : @@@@@@:{tasks_id}") - data = generate_multi_view_cancel(tasks_id) - logger.info(f"generate_cancel response @@@@@@:{data}") - except Exception as e: - logger.warning(f"generate_cancel Run Exception @@@@@@:{e}") - raise HTTPException(status_code=404, detail=str(e)) - return ResponseModel(data=data['data']) +# @router.get("/generate_multi_view_cancel/{tasks_id}") +# def generate_multi_view(tasks_id: str): +# try: +# logger.info(f"generate_cancel request item is : @@@@@@:{tasks_id}") +# data = generate_multi_view_cancel(tasks_id) +# logger.info(f"generate_cancel response @@@@@@:{data}") +# except Exception as e: +# logger.warning(f"generate_cancel Run Exception @@@@@@:{e}") +# raise HTTPException(status_code=404, detail=str(e)) +# return ResponseModel(data=data['data']) '''single logo''' @@ -122,7 +133,7 @@ def generate_single_logo(request_item: GenerateSingleLogoImageModel, background_ } """ try: - logger.info(f"generate_single_logo request item is : @@@@@@:{json.dumps(request_item.dict())}") + logger.info(f"generate_single_logo request item is : @@@@@@:{json.dumps(request_item.dict(), indent=4)}") service = GenerateSingleLogoImage(request_item) background_tasks.add_task(service.get_result) except Exception as e: @@ -167,7 +178,7 @@ def generate_product_image(request_item: GenerateProductImageModel, background_t } """ try: - logger.info(f"generate_product_image request item is : @@@@@@:{json.dumps(request_item.dict())}") + logger.info(f"generate_product_image request item is : @@@@@@:{json.dumps(request_item.dict(),indent=4)}") service = GenerateProductImage(request_item) background_tasks.add_task(service.get_result) except Exception as e: @@ -188,166 +199,164 @@ def generate_product_image(tasks_id: str): return ResponseModel(data=data['data']) -'''relight image''' +'''relight image 停用''' + +# @router.post("/generate_relight_image") +# def generate_relight_image(request_item: GenerateRelightImageModel, background_tasks: BackgroundTasks): +# """ +# 创建一个具有以下参数的请求体: +# - **tasks_id**: 任务id 用于取消生成任务和获取生成结果 +# - **prompt**: 想要生成图片的描述词 +# - **image_url**: 被生成图片的S3或minio url地址 +# - **direction**: 光源方向 Right Light Left Light Top Light Bottom Light +# - **product_type**: 输入single item 还是 overall item +# +# +# 示例参数: +# { +# "tasks_id": "123-89", +# "prompt": "beautiful woman, detailed face, sunshine, outdoor, warm atmosphere", +# "image_url": "aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png", +# "direction": "Right Light", +# "product_type": "overall" +# } +# """ +# try: +# logger.info(f"generate_relight_image request item is : @@@@@@:{json.dumps(request_item.dict(),indent=4)}") +# service = GenerateRelightImage(request_item) +# background_tasks.add_task(service.get_result) +# except Exception as e: +# logger.warning(f"generate_relight_image Run Exception @@@@@@:{e}") +# raise HTTPException(status_code=404, detail=str(e)) +# return ResponseModel() +# +# +# @router.get("/generate_relight_image_cancel_cancel/{tasks_id}") +# def generate_relight_image(tasks_id: str): +# try: +# logger.info(f"generate_relight_image_cancel_cancel request item is : @@@@@@:{tasks_id}") +# data = generate_relight_image_cancel(tasks_id) +# logger.info(f"generate_relight_image_cancel_cancel response @@@@@@:{data}") +# except Exception as e: +# logger.warning(f"generate_relight_image_cancel_cancel Run Exception @@@@@@:{e}") +# raise HTTPException(status_code=404, detail=str(e)) +# return ResponseModel(data=data['data']) -@router.post("/generate_relight_image") -def generate_relight_image(request_item: GenerateRelightImageModel, background_tasks: BackgroundTasks): - """ - 创建一个具有以下参数的请求体: - - **tasks_id**: 任务id 用于取消生成任务和获取生成结果 - - **prompt**: 想要生成图片的描述词 - - **image_url**: 被生成图片的S3或minio url地址 - - **direction**: 光源方向 Right Light Left Light Top Light Bottom Light - - **product_type**: 输入single item 还是 overall item +"""batch generate img 停用""" + +# @router.post("/batch_generate_product_image") +# async def batch_generate_product(request_batch_item: BatchGenerateProductImageModel): +# """ +# 创建一个具有以下参数的请求体: +# - **tasks_id**: 任务id 用于获取生成结果 +# - **prompt**: 想要生成图片的描述词 +# - **image_url**: 被生成图片的S3或minio url地址 +# - **image_strength**: 生成强度,越低越接近原图 +# - **product_type**: 输入single item 还是 overall item +# - **batch_size**: 批生成数量 +# +# +# 示例参数: +# { +# "tasks_id": "123-89", +# "prompt": "the best quality, masterpiece. detailed, high-res, simple background, studio photography, extremely detailed, updo, detailed face, face, close-up, HDR, UHD, 8K realistic, Highly detailed, simple background, Studio lighting", +# "image_url": "aida-results/result_00097282-ebb2-11ee-a822-b48351119060.png", +# "image_strength": 0.8, +# "product_type": "overall", +# "batch_size": 1 +# } +# """ +# return await start_product_batch_generate(request_batch_item) +# +# +# @router.post("/batch_generate_relight_image") +# async def batch_generate_relight(request_batch_item: BatchGenerateRelightImageModel): +# """ +# 创建一个具有以下参数的请求体: +# - **tasks_id**: 任务id 用于获取生成结果 +# - **prompt**: 想要生成图片的描述词 +# - **image_url**: 被生成图片的S3或minio url地址 +# - **direction**: 光源方向 Right Light Left Light Top Light Bottom Light +# - **product_type**: 输入single item 还是 overall item +# - **batch_size**: 批生成数量 +# +# +# 示例参数: +# { +# "tasks_id": "123-89", +# "prompt": "beautiful woman, detailed face, sunshine, outdoor, warm atmosphere", +# "image_url": "aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png", +# "direction": "Right Light", +# "product_type": "overall", +# "batch_size": 1 +# } +# """ +# return await start_relight_batch_generate(request_batch_item) - 示例参数: - { - "tasks_id": "123-89", - "prompt": "beautiful woman, detailed face, sunshine, outdoor, warm atmosphere", - "image_url": "aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png", - "direction": "Right Light", - "product_type": "overall" - } - """ - try: - logger.info(f"generate_relight_image request item is : @@@@@@:{json.dumps(request_item.dict())}") - service = GenerateRelightImage(request_item) - background_tasks.add_task(service.get_result) - except Exception as e: - logger.warning(f"generate_relight_image Run Exception @@@@@@:{e}") - raise HTTPException(status_code=404, detail=str(e)) - return ResponseModel() - - -@router.get("/generate_relight_image_cancel_cancel/{tasks_id}") -def generate_relight_image(tasks_id: str): - try: - logger.info(f"generate_relight_image_cancel_cancel request item is : @@@@@@:{tasks_id}") - data = generate_relight_image_cancel(tasks_id) - logger.info(f"generate_relight_image_cancel_cancel response @@@@@@:{data}") - except Exception as e: - logger.warning(f"generate_relight_image_cancel_cancel Run Exception @@@@@@:{e}") - raise HTTPException(status_code=404, detail=str(e)) - return ResponseModel(data=data['data']) - - -"""batch generate img""" - - -@router.post("/batch_generate_product_image") -async def batch_generate_product(request_batch_item: BatchGenerateProductImageModel): - """ - 创建一个具有以下参数的请求体: - - **tasks_id**: 任务id 用于获取生成结果 - - **prompt**: 想要生成图片的描述词 - - **image_url**: 被生成图片的S3或minio url地址 - - **image_strength**: 生成强度,越低越接近原图 - - **product_type**: 输入single item 还是 overall item - - **batch_size**: 批生成数量 - - - 示例参数: - { - "tasks_id": "123-89", - "prompt": "the best quality, masterpiece. detailed, high-res, simple background, studio photography, extremely detailed, updo, detailed face, face, close-up, HDR, UHD, 8K realistic, Highly detailed, simple background, Studio lighting", - "image_url": "aida-results/result_00097282-ebb2-11ee-a822-b48351119060.png", - "image_strength": 0.8, - "product_type": "overall", - "batch_size": 1 - } - """ - return await start_product_batch_generate(request_batch_item) - - -@router.post("/batch_generate_relight_image") -async def batch_generate_relight(request_batch_item: BatchGenerateRelightImageModel): - """ - 创建一个具有以下参数的请求体: - - **tasks_id**: 任务id 用于获取生成结果 - - **prompt**: 想要生成图片的描述词 - - **image_url**: 被生成图片的S3或minio url地址 - - **direction**: 光源方向 Right Light Left Light Top Light Bottom Light - - **product_type**: 输入single item 还是 overall item - - **batch_size**: 批生成数量 - - - 示例参数: - { - "tasks_id": "123-89", - "prompt": "beautiful woman, detailed face, sunshine, outdoor, warm atmosphere", - "image_url": "aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png", - "direction": "Right Light", - "product_type": "overall", - "batch_size": 1 - } - """ - return await start_relight_batch_generate(request_batch_item) - - -@router.post("/batch_generate_pose_transform_image") -async def batch_generate_pose_transform(request_batch_item: BatchPoseTransformModel): - """ - 创建一个具有以下参数的请求体: - - **tasks_id**: 任务id 用于取消生成任务和获取生成结果 - - **image_url**: 被生成图片的S3或minio url地址 - - **pose_id**: 1 - - **batch_size**: 批生成数量 - - - 示例参数: - { - "tasks_id": "123-89", - "image_url": "aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png", - "pose_id": "1", - "batch_size": 1 - } - """ - return await start_pose_transform_batch_generate(request_batch_item) - - -"""agent tool""" - - -@router.post("/agent_tool_generate_image") -def agent_tool_generate_image(request_item: AgentTollGenerateImageModel, background_tasks: BackgroundTasks): - """ - 创建一个具有以下参数的请求体: - - **prompt**: 想要生成图片的描述词 - - **category**: 生成图片的类别,sketch print 等等 - - **gender**: 生成sketch专用,服装类别 - - **version**: 使用模型版本 fast 或者 high - - **size**: 生成数量 - - **version**: 使用模型版本 fast 或者 high - - - 示例参数: - { - "prompt": "a single item of sketch of Wabi-sabi, skirt, tiered, 4k, white background", - "category": "sketch", - "gender": "male", - "size":2, - "version":"high" - } - """ - try: - logger.info(f"agent_tool_generate_image request item is : @@@@@@:{request_item.dict()}") - request_data = request_item.dict() - service = AgentToolGenerateImage(request_data['version']) - image_url_list, clothing_category_list = service.get_result( - prompt=request_data['prompt'], - size=request_data['size'], - version=request_data['version'], - category=request_data['category'], - gender=request_data['gender'] - ) - data = { - "image_url_list": image_url_list, - "clothing_category_list": clothing_category_list - } - logger.info(f"agent_tool_generate_image response item is : @@@@@@:{data}") - except Exception as e: - logger.warning(f"agent_tool_generate_image Run Exception @@@@@@:{e}") - raise HTTPException(status_code=404, detail=str(e)) - return ResponseModel(data=data) +# @router.post("/batch_generate_pose_transform_image") +# async def batch_generate_pose_transform(request_batch_item: BatchPoseTransformModel): +# """ +# 创建一个具有以下参数的请求体: +# - **tasks_id**: 任务id 用于取消生成任务和获取生成结果 +# - **image_url**: 被生成图片的S3或minio url地址 +# - **pose_id**: 1 +# - **batch_size**: 批生成数量 +# +# +# 示例参数: +# { +# "tasks_id": "123-89", +# "image_url": "aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png", +# "pose_id": "1", +# "batch_size": 1 +# } +# """ +# return await start_pose_transform_batch_generate(request_batch_item) +# +# +# """agent tool""" +# +# +# @router.post("/agent_tool_generate_image") +# def agent_tool_generate_image(request_item: AgentTollGenerateImageModel): +# """ +# 创建一个具有以下参数的请求体: +# - **prompt**: 想要生成图片的描述词 +# - **category**: 生成图片的类别,sketch print 等等 +# - **gender**: 生成sketch专用,服装类别 +# - **version**: 使用模型版本 fast 或者 high +# - **size**: 生成数量 +# - **version**: 使用模型版本 fast 或者 high +# +# +# 示例参数: +# { +# "prompt": "a single item of sketch of Wabi-sabi, skirt, tiered, 4k, white background", +# "category": "sketch", +# "gender": "male", +# "size":2, +# "version":"high" +# } +# """ +# try: +# logger.info(f"agent_tool_generate_image request item is : @@@@@@:{request_item.dict()}") +# request_data = request_item.dict() +# service = AgentToolGenerateImage(request_data['version']) +# image_url_list, clothing_category_list = service.get_result( +# prompt=request_data['prompt'], +# size=request_data['size'], +# version=request_data['version'], +# category=request_data['category'], +# gender=request_data['gender'] +# ) +# data = { +# "image_url_list": image_url_list, +# "clothing_category_list": clothing_category_list +# } +# logger.info(f"agent_tool_generate_image response item is : @@@@@@:{data}") +# except Exception as e: +# logger.warning(f"agent_tool_generate_image Run Exception @@@@@@:{e}") +# raise HTTPException(status_code=404, detail=str(e)) +# return ResponseModel(data=data) diff --git a/app/api/api_image2sketch.py b/app/api/api_image2sketch.py index cac7652..085fc72 100644 --- a/app/api/api_image2sketch.py +++ b/app/api/api_image2sketch.py @@ -14,22 +14,22 @@ logger = logging.getLogger() @router.post("/image2sketch") def image2sketch(request_item: Image2SketchModel): """ - 创建一个具有以下参数的请求体: - - **image_url**: 提取图片url - - **default_style**: 原始、 1、2、3、4、5 - - **sketch_bucket**: sketch保存的bucket - - **sketch_name**: sketch保存的object name + 创建一个具有以下参数的请求体: + - **image_url**: 提取图片url + - **default_style**: 原始、 1、2、3、4、5 + - **sketch_bucket**: sketch保存的bucket + - **sketch_name**: sketch保存的object name - 示例参数: - { - "image_url": "test/image2sketch/real_Dress_3200fecdc83d0c556c2bd96aedbd7fbf.jpg_Img.jpg", - "default_style": 0, - "sketch_bucket": "test", - "sketch_name": "image2sketch/area_fill_img.png" - } - """ + 示例参数: + { + "image_url": "test/image2sketch/real_Dress_3200fecdc83d0c556c2bd96aedbd7fbf.jpg_Img.jpg", + "default_style": 0, + "sketch_bucket": "test", + "sketch_name": "image2sketch/area_fill_img.png" + } + """ try: - logger.info(f"image2sketch request item is : @@@@@@:{json.dumps(request_item.dict())}") + logger.info(f"image2sketch request item is : @@@@@@:{json.dumps(request_item.dict(),indent=4)}") service = LineArtService(request_item) result_url = service.get_result() except Exception as e: diff --git a/app/api/api_import_sys_sketch.py b/app/api/api_import_sys_sketch.py new file mode 100644 index 0000000..3654124 --- /dev/null +++ b/app/api/api_import_sys_sketch.py @@ -0,0 +1,116 @@ +import logging +import sys +from typing import Optional +from fastapi import APIRouter, HTTPException, Query +from concurrent.futures import ThreadPoolExecutor +import threading + +from app.schemas.response_template import ResponseModel +from app.service.recommendation_system.import_sys_sketch_to_milvus import main as import_main + +logger = logging.getLogger() +router = APIRouter() + +# 使用线程池执行器来运行长时间任务 +executor = ThreadPoolExecutor(max_workers=1) +# 用于跟踪任务状态 +task_status = {"running": False} + + +def run_import_task(batch_size: int, retry_times: int, limit: Optional[int], offset: int, skip_create_collection: bool): + """在后台线程中运行导入任务""" + original_argv = None + try: + task_status["running"] = True + # 保存原始 sys.argv + original_argv = sys.argv.copy() + + # 模拟命令行参数 + sys.argv = [ + "import_sys_sketch_to_milvus.py", + "--batch-size", str(batch_size), + "--retry-times", str(retry_times), + ] + if limit is not None: + sys.argv.extend(["--limit", str(limit)]) + if offset > 0: + sys.argv.extend(["--offset", str(offset)]) + if skip_create_collection: + sys.argv.append("--skip-create-collection") + + import_main() + task_status["running"] = False + logger.info("导入任务完成") + except Exception as e: + task_status["running"] = False + logger.error(f"导入任务失败: {e}", exc_info=True) + raise + finally: + # 恢复原始 sys.argv + if original_argv is not None: + sys.argv = original_argv + + +@router.post("/import-sys-sketch", response_model=ResponseModel) +async def import_sys_sketch( + batch_size: int = Query(1000, description="批量处理大小(默认:1000)"), + retry_times: int = Query(3, description="失败重试次数(默认:3)"), + limit: Optional[int] = Query(None, description="限制处理数量(用于测试,默认:不限制)"), + offset: int = Query(0, description="起始偏移量(默认:0)"), + skip_create_collection: bool = Query(False, description="跳过创建集合(如果集合已存在)"), +): + """ + 从 t_sys_file 导入系统图向量到 Milvus + + 该接口会异步执行导入任务,任务在后台运行。 + """ + try: + # 检查是否有任务正在运行 + if task_status["running"]: + raise HTTPException( + status_code=409, + detail="已有导入任务正在运行,请等待完成后再试" + ) + + # 在后台线程中执行任务 + executor.submit( + run_import_task, + batch_size, + retry_times, + limit, + offset, + skip_create_collection + ) + + return ResponseModel( + code=200, + msg="导入任务已启动,正在后台执行", + data={ + "status": "started", + "batch_size": batch_size, + "retry_times": retry_times, + "limit": limit, + "offset": offset, + "skip_create_collection": skip_create_collection + } + ) + except HTTPException: + raise + except Exception as e: + logger.error(f"启动导入任务失败: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=f"启动导入任务失败: {str(e)}") + + +@router.get("/import-sys-sketch/status", response_model=ResponseModel) +async def get_import_status(): + """ + 获取导入任务状态 + """ + return ResponseModel( + code=200, + msg="OK", + data={ + "running": task_status["running"] + } + ) + diff --git a/app/api/api_mannequins_edit.py b/app/api/api_mannequins_edit.py index 6ff34d4..7a5a56f 100644 --- a/app/api/api_mannequins_edit.py +++ b/app/api/api_mannequins_edit.py @@ -35,10 +35,10 @@ def mannequins_edit(request_data: MannequinModel): }** """ try: - logger.info(f"mannequins_edit request item is : @@@@@@:{json.dumps(request_data.dict())}") + logger.info(f"mannequins_edit request item is : @@@@@@:{json.dumps(request_data.dict(),indent=4)}") service = MannequinEditService(request_data) data = service() - logger.info(f"mannequins_edit response @@@@@@:{json.dumps(data)}") + logger.info(f"mannequins_edit response @@@@@@:{json.dumps(data, indent=4)}") except Exception as e: logger.warning(f"mannequins_edit Run Exception @@@@@@:{e}") raise HTTPException(status_code=404, detail=str(e)) diff --git a/app/api/api_pose_transform.py b/app/api/api_pose_transform.py index 534e296..b915ad7 100644 --- a/app/api/api_pose_transform.py +++ b/app/api/api_pose_transform.py @@ -4,55 +4,55 @@ import logging import requests from fastapi import APIRouter, BackgroundTasks, HTTPException -from app.core.config import COMFYUI_SERVER_ADDRESS +from app.core.config import settings from app.schemas.comfyui_i2v import ComfyuiI2VModel, ComfyuiFLF2VModel from app.schemas.pose_transform import PoseTransformModel from app.schemas.response_template import ResponseModel from app.service.comfyui_I2V.flf2v_server import ComfyUIServerFLF2V from app.service.comfyui_I2V.i2v_server import ComfyUIServerI2V from app.service.comfyui_I2V.pose2v_server import ComfyUIServerPose2V -from app.service.generate_image.service_pose_transform import PoseTransformService, infer_cancel as pose_transform_infer_cancel router = APIRouter() logger = logging.getLogger() +"""停用""" -@router.post("/pose_transform") -def pose_transform(request_item: PoseTransformModel, background_tasks: BackgroundTasks): - """ - 创建一个具有以下参数的请求体: - - **tasks_id**: 任务id 用于取消生成任务和获取生成结果 - - **image_url**: 被生成图片的S3或minio url地址 - - **pose_id**: 1 +# @router.post("/pose_transform") +# def pose_transform(request_item: PoseTransformModel, background_tasks: BackgroundTasks): +# """ +# 创建一个具有以下参数的请求体: +# - **tasks_id**: 任务id 用于取消生成任务和获取生成结果 +# - **image_url**: 被生成图片的S3或minio url地址 +# - **pose_id**: 1 +# +# +# 示例参数: +# { +# "tasks_id": "123-89", +# "image_url": "aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png", +# "pose_id": "1" +# } +# """ +# try: +# logger.info(f"pose_transform request item is : @@@@@@:{json.dumps(request_item.dict(),indent=4)}") +# service = PoseTransformService(request_item) +# background_tasks.add_task(service.get_result) +# except Exception as e: +# logger.warning(f"pose_transform Run Exception @@@@@@:{e}") +# raise HTTPException(status_code=404, detail=str(e)) +# return ResponseModel() - 示例参数: - { - "tasks_id": "123-89", - "image_url": "aida-results/result_0000b606-1902-11ef-9424-0242ac180002.png", - "pose_id": "1" - } - """ - try: - logger.info(f"pose_transform request item is : @@@@@@:{json.dumps(request_item.dict())}") - service = PoseTransformService(request_item) - background_tasks.add_task(service.get_result) - except Exception as e: - logger.warning(f"pose_transform Run Exception @@@@@@:{e}") - raise HTTPException(status_code=404, detail=str(e)) - return ResponseModel() - - -@router.get("/pose_transform_cancel/{tasks_id}") -def pose_transform_cancel(tasks_id: str): - try: - logger.info(f"pose_transform_cancel request item is : @@@@@@:{tasks_id}") - data = pose_transform_infer_cancel(tasks_id) - logger.info(f"pose_transform_cancel response @@@@@@:{data}") - except Exception as e: - logger.warning(f"pose_transform_cancel Run Exception @@@@@@:{e}") - raise HTTPException(status_code=404, detail=str(e)) - return ResponseModel(data=data['data']) +# @router.get("/pose_transform_cancel/{tasks_id}") +# def pose_transform_cancel(tasks_id: str): +# try: +# logger.info(f"pose_transform_cancel request item is : @@@@@@:{tasks_id}") +# data = pose_transform_infer_cancel(tasks_id) +# logger.info(f"pose_transform_cancel response @@@@@@:{data}") +# except Exception as e: +# logger.warning(f"pose_transform_cancel Run Exception @@@@@@:{e}") +# raise HTTPException(status_code=404, detail=str(e)) +# return ResponseModel(data=data['data']) """ @@ -77,7 +77,7 @@ def comfyui_image_pose_2_video(request_item: PoseTransformModel, background_task } """ try: - logger.info(f"image_pose_2_video request item is : @@@@@@:{json.dumps(request_item.dict())}") + logger.info(f"image_pose_2_video request item is : @@@@@@:{json.dumps(request_item.dict(),indent=4)}") service = ComfyUIServerPose2V(request_item) background_tasks.add_task(service.get_result) except Exception as e: @@ -107,7 +107,7 @@ def comfyui_image_2_video(request_item: ComfyuiI2VModel, background_tasks: Backg } """ try: - logger.info(f"image_2_video request item is : @@@@@@:{json.dumps(request_item.dict())}") + logger.info(f"image_2_video request item is : @@@@@@:{json.dumps(request_item.dict(),indent=4)}") service = ComfyUIServerI2V(request_item) background_tasks.add_task(service.get_result) except Exception as e: @@ -139,7 +139,7 @@ def comfyui_flf_2_video(request_item: ComfyuiFLF2VModel, background_tasks: Backg } """ try: - logger.info(f"flf_2_video request item is : @@@@@@:{json.dumps(request_item.dict())}") + logger.info(f"flf_2_video request item is : @@@@@@:{json.dumps(request_item.dict(),indent=4)}") service = ComfyUIServerFLF2V(request_item) background_tasks.add_task(service.get_result) except Exception as e: @@ -153,7 +153,7 @@ def comfyui_i_2_video_cancel(tasks_id: str): try: logger.info(f"comfyui_i_2_video_cancel request item is : @@@@@@:{tasks_id}") response = requests.post( - f"http://{COMFYUI_SERVER_ADDRESS}/interrupt", + f"http://{settings.COMFYUI_SERVER_ADDRESS}/interrupt", json={"prompt_id": tasks_id} ) data = {} diff --git a/app/api/api_precompute.py b/app/api/api_precompute.py new file mode 100644 index 0000000..afebac7 --- /dev/null +++ b/app/api/api_precompute.py @@ -0,0 +1,85 @@ +import logging +from fastapi import APIRouter, HTTPException +from concurrent.futures import ThreadPoolExecutor + +from app.schemas.response_template import ResponseModel +from app.service.recommendation_system.precompute import run_precompute + +logger = logging.getLogger() +router = APIRouter() + +# 使用线程池执行器来运行长时间任务 +executor = ThreadPoolExecutor(max_workers=1) +# 用于跟踪任务状态 +task_status = {"running": False} + + +def run_precompute_task(): + """在后台线程中运行预计算任务""" + try: + task_status["running"] = True + logger.info("开始执行预计算任务...") + run_precompute() + task_status["running"] = False + logger.info("预计算任务完成") + except Exception as e: + task_status["running"] = False + logger.error(f"预计算任务失败: {e}", exc_info=True) + raise + + +@router.post("/precompute", response_model=ResponseModel) +async def precompute(): + """ + 运行预计算任务 + + 该接口会异步执行预计算任务,包括: + 1. 优化数据库表结构 + 2. 历史数据迁移 + 3. 初始用户偏好向量生成 + + 任务在后台运行。 + """ + try: + # 检查是否有任务正在运行 + if task_status["running"]: + raise HTTPException( + status_code=409, + detail="已有预计算任务正在运行,请等待完成后再试" + ) + + # 在后台线程中执行任务 + executor.submit(run_precompute_task) + + return ResponseModel( + code=200, + msg="预计算任务已启动,正在后台执行", + data={ + "status": "started", + "tasks": [ + "优化数据库表结构", + "历史数据迁移", + "初始用户偏好向量生成" + ] + } + ) + except HTTPException: + raise + except Exception as e: + logger.error(f"启动预计算任务失败: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=f"启动预计算任务失败: {str(e)}") + + +@router.get("/precompute/status", response_model=ResponseModel) +async def get_precompute_status(): + """ + 获取预计算任务状态 + """ + return ResponseModel( + code=200, + msg="OK", + data={ + "running": task_status["running"] + } + ) + diff --git a/app/api/api_prompt_generation.py b/app/api/api_prompt_generation.py index 4df957f..b4a42b8 100644 --- a/app/api/api_prompt_generation.py +++ b/app/api/api_prompt_generation.py @@ -1,13 +1,10 @@ -import json import logging -import time from fastapi import APIRouter, HTTPException from app.schemas.prompt_generation import PromptGenerationImageModel, ImageRequest from app.schemas.response_template import ResponseModel -from app.service.prompt_generation.chatgpt_for_translation import get_translation_from_llama3, \ - get_prompt_from_image +from app.service.prompt_generation.chatgpt_for_translation import get_translation_from_llama3, get_prompt_from_image router = APIRouter() logger = logging.getLogger() @@ -34,19 +31,19 @@ def prompt_generation(request_data: PromptGenerationImageModel): raise HTTPException(status_code=404, detail=str(e)) return ResponseModel(data=data) - -@router.post("/img2prompt") -def get_prompt_from_img(img: ImageRequest): - """ - 自动识别图片并输出为prompt - - :param img: 图片的minio地址 - :return: 图片的文字描述 - """ - text = ("Please describe the clothing in the image and provide a line art description of the outfit. " - "The description should allow for the reconstruction of the corresponding line art based on the details " - "given.") - logger.info(f"get_prompt_from_img request item is : @@@@@@:{img}") - description = get_prompt_from_image(img, text) - logger.info(f"生成的图片描述 response @@@@@@:{description}") - return description +# 停用 +# @router.post("/img2prompt") +# def get_prompt_from_img(img: ImageRequest): +# """ +# 自动识别图片并输出为prompt +# +# :param img: 图片的minio地址 +# :return: 图片的文字描述 +# """ +# text = ("Please describe the clothing in the image and provide a line art description of the outfit. " +# "The description should allow for the reconstruction of the corresponding line art based on the details " +# "given.") +# logger.info(f"get_prompt_from_img request item is : @@@@@@:{img}") +# description = get_prompt_from_image(img, text) +# logger.info(f"生成的图片描述 response @@@@@@:{description}") +# return description diff --git a/app/api/api_query_image.py b/app/api/api_query_image.py index ca0dbe6..1a32c30 100644 --- a/app/api/api_query_image.py +++ b/app/api/api_query_image.py @@ -26,9 +26,9 @@ def query_image(request_data: QueryImageModel): } """ try: - logger.info(f"query_image request item is : @@@@@@:{json.dumps(request_data.dict())}") + logger.info(f"query_image request item is : @@@@@@:{json.dumps(request_data.dict(), indent=4)}") data = query(request_data.gender, request_data.content) - logger.info(f"query_image response @@@@@@:{json.dumps(data)}") + logger.info(f"query_image response @@@@@@:{json.dumps(data, indent=4)}") except Exception as e: logger.warning(f"query_image Run Exception @@@@@@:{e}") raise HTTPException(status_code=404, detail=str(e)) diff --git a/app/api/api_recommendation.py b/app/api/api_recommendation.py index 477ab08..ac1e9ec 100644 --- a/app/api/api_recommendation.py +++ b/app/api/api_recommendation.py @@ -1,207 +1,206 @@ import io import logging import sys -import time -from typing import List -import os -import json -import math -import random -import numpy as np +from typing import List, Optional +from fastapi import HTTPException, APIRouter, Query from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger -from fastapi import HTTPException, APIRouter -from app.service.recommend.service import load_resources, matrix_data +from app.service.recommendation_system.recommendation_api import get_recommendations as get_new_recommendations +from app.service.recommendation_system.incremental_listener import start_background_listener +from app.service.recommendation_system.milvus_client import create_collection sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8') logger = logging.getLogger() router = APIRouter() +# ========== 旧版推荐接口(基于 npy 矩阵,已废弃)========== +# @router.get("/recommend/{user_id}/{category}/{num_recommendations}/{brand_id}/{brand_scale}", response_model=List[str]) +# async def get_recommendations(user_id: int, category: str, brand_id: int, brand_scale: float, num_recommendations: int = 10): +# """ +# :param user_id: 4 +# :param category: female_skirt +# :param num_recommendations: 1 +# :return: +# [ +# "aida-sys-image/images/female/skirt/903000017.jpg" +# ] +# """ +# try: +# start_time = time.time() +# cache_key = (user_id, category) +# # === 新增:用户存在性检查 === +# user_exists_inter = user_id in matrix_data["user_index_interaction"] +# user_exists_feat = user_id in matrix_data["user_index_feature"] +# +# # 任一矩阵不存在用户则返回随机推荐 +# if not (user_exists_inter and user_exists_feat): +# logger.info(f"用户 {user_id} 数据不完整,触发随机推荐") +# return get_random_recommendations(category, num_recommendations) +# +# # 检查缓存 +# if cache_key in matrix_data["cached_scores"]: +# processed_inter, processed_feat = matrix_data["cached_scores"][cache_key] +# valid_sketch_idxs_inter = matrix_data["cached_valid_idxs"][cache_key] +# else: +# # 实时计算逻辑(同原代码) +# user_idx_inter = matrix_data["user_index_interaction"].get(user_id) +# user_idx_feature = matrix_data["user_index_feature"].get(user_id) +# +# category_iids = matrix_data["category_to_iids"].get(category, []) +# valid_sketch_idxs_inter = [ +# idx for iid, idx in matrix_data["sketch_index_interaction"].items() +# if iid in category_iids +# ] +# +# # 处理交互分数 +# raw_inter_scores = [] +# if user_idx_inter is not None and valid_sketch_idxs_inter: +# raw_inter_scores = matrix_data["interaction_matrix"][user_idx_inter, valid_sketch_idxs_inter] +# processed_inter = raw_inter_scores * 0.7 +# +# # 处理特征分数 +# valid_sketch_idxs_feature = [ +# idx for iid, idx in matrix_data["sketch_index_feature"].items() +# if iid in category_iids +# ] +# raw_feat_scores = [] +# if user_idx_feature is not None and valid_sketch_idxs_feature: +# raw_feat_scores = matrix_data["feature_matrix"][user_idx_feature, valid_sketch_idxs_feature] +# raw_feat_scores = (raw_feat_scores - np.min(raw_feat_scores)) / ( +# np.max(raw_feat_scores) - np.min(raw_feat_scores) + 1e-8) +# processed_feat = raw_feat_scores +# else: +# processed_feat = np.array([]) +# +# # 更新缓存 +# matrix_data["cached_scores"][cache_key] = (processed_inter, processed_feat) +# matrix_data["cached_valid_idxs"][cache_key] = valid_sketch_idxs_inter +# +# # 合并分数 +# if brand_id is not None: +# brand_idx_feature = matrix_data["brand_index_map"].get(brand_id) +# +# brand_feat_valid = ( +# matrix_data["brand_feature_matrix"].size > 0 and # 矩阵非空 +# brand_idx_feature is not None and +# valid_sketch_idxs_feature # 有可用索引 +# ) +# +# if brand_feat_valid: +# raw_brand_feat_scores = matrix_data["brand_feature_matrix"][ +# brand_idx_feature, valid_sketch_idxs_feature +# ] +# raw_brand_feat_scores = (raw_brand_feat_scores - np.min(raw_brand_feat_scores)) / ( +# np.max(raw_brand_feat_scores) - np.min(raw_brand_feat_scores) + 1e-8 +# ) +# processed_brand_feat = raw_brand_feat_scores +# +# # 如果 processed_feat 是空的,替换为全 0,避免 shape 不一致 +# if processed_feat.size == 0: +# processed_feat = np.zeros_like(processed_brand_feat) +# +# final_scores = processed_inter + 0.3 * ( +# (1 - brand_scale) * processed_feat + brand_scale * processed_brand_feat +# ) +# else: +# # brand 信息不可用 +# final_scores = processed_inter + 0.3 * processed_feat if processed_feat.size > 0 else processed_inter +# else: +# final_scores = processed_inter + 0.3 * processed_feat if processed_feat.size > 0 else processed_inter +# +# valid_sketch_idxs = matrix_data["cached_valid_idxs"][cache_key] +# +# # 概率采样 +# scores = np.array(final_scores) +# +# # 调整后的概率转换(带温度控制的softmax) +# def calibrated_softmax(scores, temperature=1.0): +# scores = scores / temperature +# scale = scores - max(scores) +# exps = np.exp(scale) +# return exps / np.sum(exps) +# +# probs = calibrated_softmax(scores, 0.09) +# +# chosen_indices = np.random.choice( +# len(valid_sketch_idxs), +# size=min(num_recommendations, len(valid_sketch_idxs)), +# p=probs, +# replace=False +# ) +# recommendations = [matrix_data["iid_to_sketch"][valid_sketch_idxs[idx]] for idx in chosen_indices] +# +# logger.info(f"推荐生成完成,耗时: {time.time() - start_time:.2f}秒") +# return recommendations +# except Exception as e: +# logger.error(f"推荐失败: {str(e)}", exc_info=True) +# raise HTTPException(status_code=500, detail=str(e)) + @router.on_event("startup") async def startup_event(): - # 初始加载 - load_resources() + """启动时初始化增量监听任务""" + try: + # 屏蔽 apscheduler 的 INFO 日志 + logging.getLogger("apscheduler").setLevel(logging.WARNING) - # 配置定时任务 - scheduler = BackgroundScheduler() - scheduler.add_job( - load_resources, - trigger=CronTrigger(hour=0, minute=30), - name="每日资源刷新" - ) - scheduler.start() - logger.info("定时任务已启动") + # 确保 Milvus 集合已创建(若已存在则直接返回) + try: + create_collection() + except Exception as exc: + logger.error("Milvus 集合创建/检查失败,不影响服务继续启动: %s", exc, exc_info=True) + + # 配置定时任务 + scheduler = BackgroundScheduler() + start_background_listener(scheduler) + scheduler.start() + logger.info("增量监听定时任务已启动") + except Exception as e: + logger.error(f"启动增量监听任务失败: {e}", exc_info=True) -def softmax(scores): - max_score = max(scores) - exp_scores = [math.exp(s - max_score) for s in scores] - sum_exp = sum(exp_scores) - return [s / sum_exp for s in exp_scores] +@router.get("/recommend/{user_id}/{category}", response_model=List[str]) +async def recommend( + user_id: int, + category: str, + style: Optional[str] = Query( + None, + description="风格样式(可选):若传入,则在利用分支对同 style 的候选进行加分", + ), +): + """新版推荐接口(Milvus + Redis 偏好向量)。""" + try: + results = get_new_recommendations(user_id, category, style) + path = results[0] if results else "" + return [path] + except Exception as e: + logger.error("新版推荐接口失败 [user=%s, category=%s]: %s", user_id, category, e, exc_info=True) + raise HTTPException(status_code=500, detail=str(e)) -# def get_random_recommendations(category: str, num: int) -> List[str]: -# """根据预加载热度向量推荐(冷启动)""" -# try: -# heat_data = matrix_data.get("heat_data", {}) -# -# if category not in heat_data: -# raise ValueError(f"热度数据缺少类别 {category},使用随机推荐") -# -# heat_dict = heat_data[category] # {url: score} -# urls = list(heat_dict.keys()) -# scores = list(heat_dict.values()) -# -# if not urls: -# raise ValueError("该类别下无热度记录,使用随机推荐") -# -# probs = softmax(scores) -# sample_size = min(num, len(urls)) -# sampled_urls = random.choices(urls, weights=probs, k=sample_size) -# -# return sampled_urls -# -# except Exception as e: -# # 回退:完全随机推荐 -# all_iids = list(matrix_data["iid_to_sketch"].keys()) -# category_iids = matrix_data["category_to_iids"].get(category, all_iids) -# sample_size = min(num, len(category_iids)) -# sampled = np.random.choice(category_iids, size=sample_size, replace=False) -# return [matrix_data["iid_to_sketch"][iid] for iid in sampled] - -def get_random_recommendations(category: str, num: int) -> List[str]: - """全品类随机推荐""" - all_iids = list(matrix_data["iid_to_sketch"].keys()) - # 优先从当前品类选择 - category_iids = matrix_data["category_to_iids"].get(category, all_iids) - # 确保不超出实际数量 - sample_size = min(num, len(category_iids)) - sampled = np.random.choice(category_iids, size=sample_size, replace=False) - return [matrix_data["iid_to_sketch"][iid] for iid in sampled] - - -@router.get("/recommend/{user_id}/{category}/{num_recommendations}/{brand_id}/{brand_scale}", response_model=List[str]) -async def get_recommendations(user_id: int, category: str, brand_id: int, brand_scale: float, num_recommendations: int = 10): +@router.get("/redis/user_pref") +async def get_all_user_preferences(): """ - :param user_id: 4 - :param category: female_skirt - :param num_recommendations: 1 - :return: - [ - "aida-sys-image/images/female/skirt/903000017.jpg" - ] + 获取所有以 user_pref 为前缀的 Redis key 信息 """ try: - logger.info(f"user_id:{user_id}-----category:{category}-----brand_id:{brand_id}-----brand_scale:{brand_scale}-----num_recommendations:{num_recommendations}") - start_time = time.time() - cache_key = (user_id, category) - # === 新增:用户存在性检查 === - user_exists_inter = user_id in matrix_data["user_index_interaction"] - user_exists_feat = user_id in matrix_data["user_index_feature"] - - # 任一矩阵不存在用户则返回随机推荐 - if not (user_exists_inter and user_exists_feat): - logger.info(f"用户 {user_id} 数据不完整,触发随机推荐") - return get_random_recommendations(category, num_recommendations) - - # 检查缓存 - if cache_key in matrix_data["cached_scores"]: - processed_inter, processed_feat = matrix_data["cached_scores"][cache_key] - valid_sketch_idxs_inter = matrix_data["cached_valid_idxs"][cache_key] - else: - # 实时计算逻辑(同原代码) - user_idx_inter = matrix_data["user_index_interaction"].get(user_id) - user_idx_feature = matrix_data["user_index_feature"].get(user_id) - - category_iids = matrix_data["category_to_iids"].get(category, []) - valid_sketch_idxs_inter = [ - idx for iid, idx in matrix_data["sketch_index_interaction"].items() - if iid in category_iids - ] - - # 处理交互分数 - raw_inter_scores = [] - if user_idx_inter is not None and valid_sketch_idxs_inter: - raw_inter_scores = matrix_data["interaction_matrix"][user_idx_inter, valid_sketch_idxs_inter] - processed_inter = raw_inter_scores * 0.7 - - # 处理特征分数 - valid_sketch_idxs_feature = [ - idx for iid, idx in matrix_data["sketch_index_feature"].items() - if iid in category_iids - ] - raw_feat_scores = [] - if user_idx_feature is not None and valid_sketch_idxs_feature: - raw_feat_scores = matrix_data["feature_matrix"][user_idx_feature, valid_sketch_idxs_feature] - raw_feat_scores = (raw_feat_scores - np.min(raw_feat_scores)) / ( - np.max(raw_feat_scores) - np.min(raw_feat_scores) + 1e-8) - processed_feat = raw_feat_scores - else: - processed_feat = np.array([]) - - # 更新缓存 - matrix_data["cached_scores"][cache_key] = (processed_inter, processed_feat) - matrix_data["cached_valid_idxs"][cache_key] = valid_sketch_idxs_inter - - # 合并分数 - if brand_id is not None: - brand_idx_feature = matrix_data["brand_index_map"].get(brand_id) - - brand_feat_valid = ( - matrix_data["brand_feature_matrix"].size > 0 and # 矩阵非空 - brand_idx_feature is not None and - valid_sketch_idxs_feature # 有可用索引 - ) - - if brand_feat_valid: - raw_brand_feat_scores = matrix_data["brand_feature_matrix"][ - brand_idx_feature, valid_sketch_idxs_feature - ] - raw_brand_feat_scores = (raw_brand_feat_scores - np.min(raw_brand_feat_scores)) / ( - np.max(raw_brand_feat_scores) - np.min(raw_brand_feat_scores) + 1e-8 - ) - processed_brand_feat = raw_brand_feat_scores - - # 如果 processed_feat 是空的,替换为全 0,避免 shape 不一致 - if processed_feat.size == 0: - processed_feat = np.zeros_like(processed_brand_feat) - - final_scores = processed_inter + 0.3 * ( - (1 - brand_scale) * processed_feat + brand_scale * processed_brand_feat - ) - else: - # brand 信息不可用 - final_scores = processed_inter + 0.3 * processed_feat if processed_feat.size > 0 else processed_inter - else: - final_scores = processed_inter + 0.3 * processed_feat if processed_feat.size > 0 else processed_inter - - valid_sketch_idxs = matrix_data["cached_valid_idxs"][cache_key] - - # 概率采样 - scores = np.array(final_scores) - - # 调整后的概率转换(带温度控制的softmax) - def calibrated_softmax(scores, temperature=1.0): - scores = scores / temperature - scale = scores - max(scores) - exps = np.exp(scale) - return exps / np.sum(exps) - - probs = calibrated_softmax(scores, 0.09) - - chosen_indices = np.random.choice( - len(valid_sketch_idxs), - size=min(num_recommendations, len(valid_sketch_idxs)), - p=probs, - replace=False - ) - recommendations = [matrix_data["iid_to_sketch"][valid_sketch_idxs[idx]] for idx in chosen_indices] - - logger.info(f"推荐生成完成,耗时: {time.time() - start_time:.2f}秒") - return recommendations - + from app.service.utils.redis_utils import Redis + from app.service.recommendation_system.config import REDIS_KEY_USER_PREF_PREFIX + + # 扫描所有匹配 user_pref:* 的 key + pattern = f"{REDIS_KEY_USER_PREF_PREFIX}:*" + keys = Redis.scan_keys(pattern) + + # 直接返回所有 key 和原始 value + result = {} + for key in keys: + # 读取对应的值 + value = Redis.read(key) + if value: + result[key] = value + + return result + except Exception as e: - logger.error(f"推荐失败: {str(e)}", exc_info=True) - raise HTTPException(status_code=500, detail=str(e)) + logger.error("获取用户偏好数据失败: %s", e, exc_info=True) + raise HTTPException(status_code=500, detail=str(e)) \ No newline at end of file diff --git a/app/api/api_route.py b/app/api/api_route.py index eedb6fb..1af7b9f 100644 --- a/app/api/api_route.py +++ b/app/api/api_route.py @@ -1,38 +1,42 @@ from fastapi import APIRouter - -from app.api import api_attribute_retrieve, api_query_image +from app.api import api_attribute_retrieve from app.api import api_brand_dna -from app.api import api_brighten -from app.api import api_chat_robot from app.api import api_clothing_seg from app.api import api_design from app.api import api_design_pre_processing -from app.api import api_extraction_project_info from app.api import api_generate_image -from app.api import api_image2sketch from app.api import api_mannequins_edit from app.api import api_pose_transform +from app.api import api_precompute from app.api import api_prompt_generation from app.api import api_recommendation -from app.api import api_super_resolution from app.api import api_test router = APIRouter() router.include_router(api_test.router, tags=["test"], prefix="/test") -router.include_router(api_super_resolution.router, tags=["super_resolution"], prefix="/api") router.include_router(api_generate_image.router, tags=["generate_image"], prefix="/api") router.include_router(api_attribute_retrieve.router, tags=["attribute_retrieve"], prefix="/api") router.include_router(api_design.router, tags=['design'], prefix="/api") -router.include_router(api_chat_robot.router, tags=['chat_robot'], prefix="/api") router.include_router(api_prompt_generation.router, tags=['prompt_generation'], prefix="/api") router.include_router(api_design_pre_processing.router, tags=['design_pre_processing'], prefix="/api") -router.include_router(api_image2sketch.router, tags=['api_image2sketch'], prefix="/api") -router.include_router(api_brighten.router, tags=['api_brighten'], prefix="/api") -router.include_router(api_query_image.router, tags=['api_query_image'], prefix="/api") router.include_router(api_brand_dna.router, tags=['api_brand_dna'], prefix="/api") router.include_router(api_recommendation.router, tags=['api_recommendation'], prefix="/api") +router.include_router(api_precompute.router, tags=['api_precompute'], prefix="/api") router.include_router(api_mannequins_edit.router, tags=['api_mannequins_edit'], prefix="/api") router.include_router(api_pose_transform.router, tags=['api_pose_transform'], prefix="/api") router.include_router(api_clothing_seg.router, tags=['api_clothing_seg'], prefix="/api") -router.include_router(api_extraction_project_info.router, tags=['api_extraction_project_info'], prefix="/api") + +"""停用""" +# from app.api import api_chat_robot +# from app.api import api_query_image +# from app.api import api_brighten +# from app.api import api_extraction_project_info +# from app.api import api_image2sketch +# from app.api import api_super_resolution +# router.include_router(api_image2sketch.router, tags=['api_image2sketch'], prefix="/api") +# router.include_router(api_query_image.router, tags=['api_query_image'], prefix="/api") +# router.include_router(api_chat_robot.router, tags=['chat_robot'], prefix="/api") +# router.include_router(api_super_resolution.router, tags=["super_resolution"], prefix="/api") +# router.include_router(api_brighten.router, tags=['api_brighten'], prefix="/api") +# router.include_router(api_extraction_project_info.router, tags=['api_extraction_project_info'], prefix="/api") diff --git a/app/api/api_super_resolution.py b/app/api/api_super_resolution.py index ce853fd..1cbb938 100644 --- a/app/api/api_super_resolution.py +++ b/app/api/api_super_resolution.py @@ -27,7 +27,7 @@ def super_resolution(request_item: SuperResolutionModel, background_tasks: Backg } """ try: - logger.info(f"super_resolution request item is : @@@@@@:{json.dumps(request_item.dict())}") + logger.info(f"super_resolution request item is : @@@@@@:{json.dumps(request_item.dict(),indent=4)}") service = SuperResolution(request_item) background_tasks.add_task(service.sr_result) except Exception as e: diff --git a/app/api/api_test.py b/app/api/api_test.py index c7bdb62..7e34afa 100644 --- a/app/api/api_test.py +++ b/app/api/api_test.py @@ -4,8 +4,7 @@ import logging from fastapi import APIRouter from fastapi import HTTPException -from app.core.config import SR_RABBITMQ_QUEUES, GI_RABBITMQ_QUEUES, GPI_RABBITMQ_QUEUES, GRI_RABBITMQ_QUEUES, OSS, JAVA_STREAM_API_URL, GMV_RABBITMQ_QUEUES, SLOGAN_RABBITMQ_QUEUES, GEN_SINGLE_LOGO_RABBITMQ_QUEUES, PS_RABBITMQ_QUEUES, BATCH_GPI_RABBITMQ_QUEUES, BATCH_GRI_RABBITMQ_QUEUES, \ - BATCH_PS_RABBITMQ_QUEUES, RABBITMQ_ENV +from app.core.config import settings, SR_RABBITMQ_QUEUES, GMV_RABBITMQ_QUEUES, PS_RABBITMQ_QUEUES, SLOGAN_RABBITMQ_QUEUES, GI_RABBITMQ_QUEUES, GPI_RABBITMQ_QUEUES, GRI_RABBITMQ_QUEUES, BATCH_GPI_RABBITMQ_QUEUES, BATCH_GRI_RABBITMQ_QUEUES, BATCH_PS_RABBITMQ_QUEUES from app.schemas.response_template import ResponseModel logger = logging.getLogger() @@ -15,9 +14,9 @@ router = APIRouter() @router.get("{id}") def test(id: int): data = { - "RABBITMQ_ENV":RABBITMQ_ENV, - "超分 SR_RABBITMQ_QUEUES": SR_RABBITMQ_QUEUES, - "多视角 GMV_RABBITMQ_QUEUES": GMV_RABBITMQ_QUEUES, + "RABBITMQ_ENV": settings.SERVE_ENV, + # "超分 SR_RABBITMQ_QUEUES": SR_RABBITMQ_QUEUES, + # "多视角 GMV_RABBITMQ_QUEUES": GMV_RABBITMQ_QUEUES, "pose transform PS_RABBITMQ_QUEUES": PS_RABBITMQ_QUEUES, "logan SLOGAN_RABBITMQ_QUEUES": SLOGAN_RABBITMQ_QUEUES, "image and single logo GI_RABBITMQ_QUEUES": GI_RABBITMQ_QUEUES, @@ -29,10 +28,9 @@ def test(id: int): "batch relight BATCH_GRI_RABBITMQ_QUEUES": BATCH_GRI_RABBITMQ_QUEUES, "batch pose transform BATCH_PS_RABBITMQ_QUEUES": BATCH_PS_RABBITMQ_QUEUES, - "JAVA_STREAM_API_URL": JAVA_STREAM_API_URL, - "local_oss_server": OSS + "JAVA_STREAM_API_URL": settings.JAVA_STREAM_API_URL, } - logger.info(json.dumps(data)) + logger.info(json.dumps(data, ensure_ascii=False, indent=4)) if id == 1: raise HTTPException(status_code=404, detail="Item not found") diff --git a/app/core/config.backup.py b/app/core/config.backup.py new file mode 100644 index 0000000..e6c7283 --- /dev/null +++ b/app/core/config.backup.py @@ -0,0 +1,235 @@ +import os + +import pika +from dotenv import load_dotenv +from pydantic import BaseSettings + +BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')) +load_dotenv(os.path.join(BASE_DIR, '.env')) + + +class Settings(BaseSettings): + PROJECT_NAME: str = 'FASTAPI BASE' + SECRET_KEY: str = '' + API_PREFIX: str = '' + BACKEND_CORS_ORIGINS: list[str] = ['*'] + DATABASE_URL: str = '' + ACCESS_TOKEN_EXPIRE_SECONDS: int = 60 * 60 * 24 * 7 # Token expired after 7 days + SECURITY_ALGORITHM: str = 'HS256' + LOGGING_CONFIG_FILE: str = os.path.join(BASE_DIR, 'logging_env.py') + + +OSS = "minio" +DEBUG = False +if DEBUG: + LOGS_PATH = "logs/" + CATEGORY_PATH = "service/attribute/config/descriptor/category/category_dis.csv" + SEG_CACHE_PATH = "../seg_cache/" + POSE_TRANSFORM_VIDEO_PATH = "../pose_transform_video/" + RECOMMEND_PATH_PREFIX = "service/recommend/" + CHROMADB_PATH = "./chromadb/" +else: + LOGS_PATH = "app/logs/" + CATEGORY_PATH = "app/service/attribute/config/descriptor/category/category_dis.csv" + SEG_CACHE_PATH = "/seg_cache/" + POSE_TRANSFORM_VIDEO_PATH = "/pose_transform_video/" + RECOMMEND_PATH_PREFIX = "app/service/recommend/" + CHROMADB_PATH = "/chromadb/" + +# RABBITMQ_ENV = "" # 生产环境 +RABBITMQ_ENV = os.getenv("RABBITMQ_ENV", "-dev") +# RABBITMQ_ENV = "-local" # 本地测试环境 + +if RABBITMQ_ENV == "-dev": + JAVA_STREAM_API_URL = f"https://develop.api.aida.com.hk/api/third/party/receiveDesignResults" +elif RABBITMQ_ENV == "-prod": + JAVA_STREAM_API_URL = f"https://api.aida.com.hk/api/third/party/receiveDesignResults" + +settings = Settings() + +# minio 配置 +MINIO_URL = "www.minio-api.aida.com.hk" +MINIO_ACCESS = 'vXKFLSJkYeEq2DrSZvkB' +MINIO_SECRET = 'uKTZT3x7C43WvPN9QTc99DiRkwddWZrG9Uh3JVlR' +MINIO_SECURE = True + +# S3 配置 +S3_ACCESS_KEY = "AKIAVD3OJIMF6UJFLSHZ" +S3_AWS_SECRET_ACCESS_KEY = "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" +S3_REGION_NAME = "ap-east-1" + +# redis 配置 +REDIS_HOST = "10.1.1.240" +REDIS_PORT = "6379" +REDIS_DB = "2" + +# rabbitmq config +RABBITMQ_PARAMS = { + "host": "18.167.251.121", + "port": 5672, + "credentials": pika.credentials.PlainCredentials(username='rabbit', password='123456'), + "virtual_host": "/" +} + +# milvus 配置 +MILVUS_URL = "http://10.1.1.240:19530" +MILVUS_TOKEN = "root:Milvus" +MILVUS_ALIAS = "default" +MILVUS_TABLE_KEYPOINT = "keypoint_cache_2" +MILVUS_TABLE_SEG = "seg_cache" + +# Mysql 配置 +DB_HOST = '18.167.251.121' # 数据库主机地址 +# DB_PORT = int( 33006) +DB_PORT = 33008 # 数据库端口 +DB_USERNAME = 'aida_con_python' # 数据库用户名 +DB_PASSWORD = '123456' # 数据库密码 +DB_NAME = 'aida' # 数据库库名 + +# openai +os.environ['SERPAPI_API_KEY'] = "a793513017b0718db7966207c31703d280d12435c982f1e67bbcbffa52e7632c" +OPENAI_STREAM = True +BUFFER_THRESHOLD = 6 # must be even number +SINGLE_TOKEN_THRESHOLD = 200 +TOKEN_THRESHOLD = 600 +OPENAI_TEMPERATURE = 0 + +# OPENAI_API_KEY = "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" +OPENAI_API_KEY = "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" +OPENAI_MODEL = "gpt-3.5-turbo-0613" +OPENAI_MODEL_LIST = {"gpt-3.5-turbo-0613", + "gpt-3.5-turbo-16k-0613", + "gpt-4-0314", + "gpt-4-32k-0314", + "gpt-4-0613", + "gpt-4-32k-0613", } + +# SR service config +SR_MODEL_NAME = "super_resolution" +SR_TRITON_URL = "10.1.1.240:10031" +SR_MINIO_BUCKET = "aida-users" +SR_RABBITMQ_QUEUES = f"SuperResolution{RABBITMQ_ENV}" + +# GenerateImage service config +FAST_GI_MODEL_URL = '10.1.1.243:10011' +FAST_GI_MODEL_NAME = 'stable_diffusion_xl' + +GI_MODEL_URL = '10.1.1.240:10061' +GI_MODEL_NAME = 'flux' + +GMV_MODEL_URL = '10.1.1.243:10081' +GMV_MODEL_NAME = 'multi_view' + +GMV_RABBITMQ_QUEUES = f"GenerateMultiView{RABBITMQ_ENV}" + +GI_MINIO_BUCKET = "aida-users" +GI_RABBITMQ_QUEUES = f"GenerateImage{RABBITMQ_ENV}" +GI_SYS_IMAGE_URL = "aida-sys-image/generate_image/white_image.jpg" + +# SLOGAN service config +SLOGAN_RABBITMQ_QUEUES = f"Slogan{RABBITMQ_ENV}" + +# Generate Single Logo service config +GSL_MODEL_URL = '10.1.1.243:10041' +GSL_MINIO_BUCKET = "aida-users" +GSL_MODEL_NAME = 'stable_diffusion_xl_transparent' +GEN_SINGLE_LOGO_RABBITMQ_QUEUES = f"GenSingleLogo{RABBITMQ_ENV}" + +# Generate Product service config +# GPI_RABBITMQ_QUEUES = os.getenv("GEN_PRODUCT_IMAGE_RABBITMQ_QUEUES", f"ToProductImage{RABBITMQ_ENV}") +# GPI_MODEL_NAME_OVERALL = 'sdxl_ensemble_all' +# GPI_MODEL_URL = '10.1.1.243:10051' + +# Generate Product service config 旧版product img 模型 +GPI_RABBITMQ_QUEUES = f"ToProductImage{RABBITMQ_ENV}" +BATCH_GPI_RABBITMQ_QUEUES = f"BatchToProductImage{RABBITMQ_ENV}" +GPI_MODEL_NAME_OVERALL = 'diffusion_ensemble_all' +GPI_MODEL_NAME_SINGLE = 'stable_diffusion_1_5_cnet' +GPI_MODEL_URL = '10.1.1.243:10051' + +# Generate Single Logo service config +GRI_RABBITMQ_QUEUES = f"Relight{RABBITMQ_ENV}" +BATCH_GRI_RABBITMQ_QUEUES = f"BatchRelight{RABBITMQ_ENV}" +GRI_MODEL_NAME_OVERALL = 'diffusion_relight_ensemble' +GRI_MODEL_NAME_SINGLE = 'stable_diffusion_1_5_relight' +GRI_MODEL_URL = '10.1.1.240:10051' + +# Pose Transform service config + +PS_RABBITMQ_QUEUES = f"PoseTransform{RABBITMQ_ENV}" +BATCH_PS_RABBITMQ_QUEUES = f"BatchPoseTransform{RABBITMQ_ENV}" +PT_MODEL_URL = '10.1.1.243:10061' + +# SEG service config +SEGMENTATION = { + "new_model_name": "seg_knet", + "name": "seg_ocrnet_hr18", + "input": "seg_input__0", + "output": "seg_output__0", +} +# ollama config +OLLAMA_URL = "http://10.1.1.240:11434/api/embeddings" + +# design batch +BATCH_DESIGN_RABBITMQ_QUEUES = f"DesignBatch{RABBITMQ_ENV}" + +# DESIGN config +DESIGN_MODEL_URL = '10.1.1.240:10000' +AIDA_CLOTHING = "aida-clothing" +KEYPOINT_RESULT_TABLE_FIELD_SET = ('neckline_left', 'neckline_right', 'shoulder_left', 'shoulder_right', 'armpit_left', 'armpit_right', + 'cuff_left_in', 'cuff_left_out', 'cuff_right_in', 'cuff_right_out', 'waistband_left', 'waistband_right') + +# DESIGN 预处理 +IF_DEBUG_SHOW = False + +# 优先级 +PRIORITY_DICT = { + 'earring_front': 99, + 'bag_front': 98, + 'hairstyle_front': 97, + 'outwear_front': 20, + 'tops_front': 19, + 'dress_front': 18, + 'blouse_front': 17, + 'skirt_front': 16, + 'trousers_front': 15, + 'bottoms_front': 14, + 'shoes_right': 1, + 'shoes_left': 1, + 'body': 0, + 'bottoms_back': -14, + 'trousers_back': -15, + 'skirt_back': -16, + 'blouse_back': -17, + 'dress_back': -18, + 'tops_back': -19, + 'outwear_back': -20, + 'hairstyle_back': -97, + 'bag_back': -98, + 'earring_back': -99, +} + +QWEN_API_KEY = "sk-f31c29e61ac2498ba5e307aaa6dc10e0" + +DB_CONFIG = { + "host": "18.167.251.121", + "port": 3306, + "user": "root", + "password": "QWa998345", + "database": "aida", + "charset": "utf8mb4" +} + +TABLE_CATEGORIES = { + "female_dress": "female/dress", + "female_outwear": "female/outwear", + "female_trousers": "female/trousers", + "female_skirt": "female/skirt", + "female_blouse": "female/blouse", + "male_tops": "male/tops", + "male_bottoms": "male/bottoms", + "male_outwear": "male/outwear" +} + +# --- ComfyUI 配置信息 --- +COMFYUI_SERVER_ADDRESS = "10.1.2.227:8080" # 替换为您的 ComfyUI 服务器地址 diff --git a/app/core/config.py b/app/core/config.py index 30ca5ef..77d4509 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -1,188 +1,95 @@ -import os - -import pika -from dotenv import load_dotenv -from pydantic import BaseSettings - -BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')) -load_dotenv(os.path.join(BASE_DIR, '.env')) +from pydantic import Field +from pydantic_settings import BaseSettings, SettingsConfigDict class Settings(BaseSettings): - PROJECT_NAME: str = 'FASTAPI BASE' - SECRET_KEY: str = '' - API_PREFIX: str = '' - BACKEND_CORS_ORIGINS: list[str] = ['*'] - DATABASE_URL: str = '' - ACCESS_TOKEN_EXPIRE_SECONDS: int = 60 * 60 * 24 * 7 # Token expired after 7 days - SECURITY_ALGORITHM: str = 'HS256' - LOGGING_CONFIG_FILE: str = os.path.join(BASE_DIR, 'logging_env.py') + """ + 应用配置类。Pydantic Settings 会自动从环境变量和 .env 文件中加载这些值。 + """ + model_config = SettingsConfigDict( + env_file='.env', + env_file_encoding='utf-8', + # extra='ignore' # 忽略环境变量中多余的键 + ) + # --- 服务端口配置信息 --- + PORT: int = Field(default=8001, description="") + # --- 服务环境 配置信息 --- + SERVE_ENV: str = Field(default='', description="") + # --- 开发状态 配置信息 --- + DEBUG: bool = Field(default=False, description="") + # --- 千问api 配置信息 --- + QWEN_API_KEY: str = Field(default="", description="") + # --- ComfyUI 配置信息 --- + COMFYUI_SERVER_ADDRESS: str = Field(default='', description="") -OSS = "minio" -DEBUG = False -if DEBUG: - LOGS_PATH = "logs/" - CATEGORY_PATH = "service/attribute/config/descriptor/category/category_dis.csv" - SEG_CACHE_PATH = "../seg_cache/" - POSE_TRANSFORM_VIDEO_PATH = "../pose_transform_video/" - RECOMMEND_PATH_PREFIX = "service/recommend/" - CHROMADB_PATH = "./chromadb/" -else: - LOGS_PATH = "app/logs/" - CATEGORY_PATH = "app/service/attribute/config/descriptor/category/category_dis.csv" - SEG_CACHE_PATH = "/seg_cache/" - POSE_TRANSFORM_VIDEO_PATH = "/pose_transform_video/" - RECOMMEND_PATH_PREFIX = "app/service/recommend/" - CHROMADB_PATH = "/chromadb/" + # --- minio 配置信息 --- + MINIO_URL: str = Field(default='', description="") + MINIO_ACCESS: str = Field(default='', description="") + MINIO_SECRET: str = Field(default='', description="") + MINIO_SECURE: bool = Field(default=True, description="") -# RABBITMQ_ENV = "" # 生产环境 -RABBITMQ_ENV = os.getenv("RABBITMQ_ENV", "-prod") -# RABBITMQ_ENV = "-local" # 本地测试环境 + # --- redis 配置信息 --- + REDIS_HOST: str = Field(default='', description="") + REDIS_PORT: str = Field(default='', description="") + REDIS_DB: int = Field(default=0, description="") + + # --- mysql 配置信息 --- + MYSQL_HOST: str = Field(default='', description="") + MYSQL_PORT: int = Field(default='', description="") + MYSQL_USER: str = Field(default='', description="") + MYSQL_PASSWORD: str = Field(default='', description="") + MYSQL_DB: str = Field(default='', description="") + MYSQL_CHARSET: str = Field(default='utf8mb4', description="") + + # --- rabbit-mq 配置信息 --- + MQ_HOST: str = Field(default='', description="") + MQ_PORT: str = Field(default='', description="") + MQ_USERNAME: str = Field(default='', description="") + MQ_PASSWORD: str = Field(default='', description="") + MQ_VIRTUAL_HOST: str = Field(default='/', description="") + MQ_ENV: str = Field(default='', description="") + + # --- milvus 配置信息 --- + MILVUS_URL: str = Field(default='', description="") + MILVUS_TOKEN: str = Field(default='', description="") + MILVUS_ALIAS: str = Field(default='', description="") + + # --- ollama 配置信息 --- + CHROMADB_PATH: str = Field(default='', description="") + + # --- ollama 配置信息 --- + OLLAMA_URL: str = Field(default='', description="") + + # --- Design Callback Java 接口 --- + JAVA_STREAM_API_URL: str = Field(default='', description="") + + # --- 服务器IP --- + A6000_SERVICE_HOST: str = Field(default='', description="") + B_4_X_4090_SERVICE_HOST: str = Field(default='', description="") + + # --- 其他配置信息 以下均为Docker容器内配置--- + LOGS_PATH: str = Field(default="/logs/", description="") + CATEGORY_PATH: str = Field(default="/app/service/attribute/config/descriptor/category/category_dis.csv", description="") + SEG_CACHE_PATH: str = Field(default="/seg_cache/", description="") + RECOMMEND_PATH_PREFIX: str = Field(default="/app/service/recommend/", description="") -if RABBITMQ_ENV == "-dev": - JAVA_STREAM_API_URL = f"https://develop.api.aida.com.hk/api/third/party/receiveDesignResults" -elif RABBITMQ_ENV == "-prod": - JAVA_STREAM_API_URL = f"https://api.aida.com.hk/api/third/party/receiveDesignResults" settings = Settings() -# minio 配置 -MINIO_URL = "www.minio-api.aida.com.hk" -MINIO_ACCESS = 'vXKFLSJkYeEq2DrSZvkB' -MINIO_SECRET = 'uKTZT3x7C43WvPN9QTc99DiRkwddWZrG9Uh3JVlR' -MINIO_SECURE = True - -# S3 配置 -S3_ACCESS_KEY = "AKIAVD3OJIMF6UJFLSHZ" -S3_AWS_SECRET_ACCESS_KEY = "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" -S3_REGION_NAME = "ap-east-1" - -# redis 配置 -REDIS_HOST = "10.1.1.240" -REDIS_PORT = "6379" -REDIS_DB = "2" - -# rabbitmq config -RABBITMQ_PARAMS = { - "host": "18.167.251.121", - "port": 5672, - "credentials": pika.credentials.PlainCredentials(username='rabbit', password='123456'), - "virtual_host": "/" +"""Design 服务""" +# 推荐服装类别映射 +TABLE_CATEGORIES = { + "female_dress": "female/dress", + "female_outwear": "female/outwear", + "female_trousers": "female/trousers", + "female_skirt": "female/skirt", + "female_blouse": "female/blouse", + "male_tops": "male/tops", + "male_bottoms": "male/bottoms", + "male_outwear": "male/outwear" } - -# milvus 配置 -MILVUS_URL = "http://10.1.1.240:19530" -MILVUS_TOKEN = "root:Milvus" -MILVUS_ALIAS = "default" -MILVUS_TABLE_KEYPOINT = "keypoint_cache_2" -MILVUS_TABLE_SEG = "seg_cache" - -# Mysql 配置 -DB_HOST = '18.167.251.121' # 数据库主机地址 -# DB_PORT = int( 33006) -DB_PORT = 33008 # 数据库端口 -DB_USERNAME = 'aida_con_python' # 数据库用户名 -DB_PASSWORD = '123456' # 数据库密码 -DB_NAME = 'aida' # 数据库库名 - -# openai -os.environ['SERPAPI_API_KEY'] = "a793513017b0718db7966207c31703d280d12435c982f1e67bbcbffa52e7632c" -OPENAI_STREAM = True -BUFFER_THRESHOLD = 6 # must be even number -SINGLE_TOKEN_THRESHOLD = 200 -TOKEN_THRESHOLD = 600 -OPENAI_TEMPERATURE = 0 - -# OPENAI_API_KEY = "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" -OPENAI_API_KEY = "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" -OPENAI_MODEL = "gpt-3.5-turbo-0613" -OPENAI_MODEL_LIST = {"gpt-3.5-turbo-0613", - "gpt-3.5-turbo-16k-0613", - "gpt-4-0314", - "gpt-4-32k-0314", - "gpt-4-0613", - "gpt-4-32k-0613", } - -# SR service config -SR_MODEL_NAME = "super_resolution" -SR_TRITON_URL = "10.1.1.240:10031" -SR_MINIO_BUCKET = "aida-users" -SR_RABBITMQ_QUEUES = f"SuperResolution{RABBITMQ_ENV}" - -# GenerateImage service config -FAST_GI_MODEL_URL = '10.1.1.243:10011' -FAST_GI_MODEL_NAME = 'stable_diffusion_xl' - -GI_MODEL_URL = '10.1.1.240:10061' -GI_MODEL_NAME = 'flux' - -GMV_MODEL_URL = '10.1.1.243:10081' -GMV_MODEL_NAME = 'multi_view' - -GMV_RABBITMQ_QUEUES = f"GenerateMultiView{RABBITMQ_ENV}" - -GI_MINIO_BUCKET = "aida-users" -GI_RABBITMQ_QUEUES = f"GenerateImage{RABBITMQ_ENV}" -GI_SYS_IMAGE_URL = "aida-sys-image/generate_image/white_image.jpg" - -# SLOGAN service config -SLOGAN_RABBITMQ_QUEUES = f"Slogan{RABBITMQ_ENV}" - -# Generate Single Logo service config -GSL_MODEL_URL = '10.1.1.243:10041' -GSL_MINIO_BUCKET = "aida-users" -GSL_MODEL_NAME = 'stable_diffusion_xl_transparent' -GEN_SINGLE_LOGO_RABBITMQ_QUEUES = f"GenSingleLogo{RABBITMQ_ENV}" - -# Generate Product service config -# GPI_RABBITMQ_QUEUES = os.getenv("GEN_PRODUCT_IMAGE_RABBITMQ_QUEUES", f"ToProductImage{RABBITMQ_ENV}") -# GPI_MODEL_NAME_OVERALL = 'sdxl_ensemble_all' -# GPI_MODEL_URL = '10.1.1.243:10051' - -# Generate Product service config 旧版product img 模型 -GPI_RABBITMQ_QUEUES = f"ToProductImage{RABBITMQ_ENV}" -BATCH_GPI_RABBITMQ_QUEUES = f"BatchToProductImage{RABBITMQ_ENV}" -GPI_MODEL_NAME_OVERALL = 'diffusion_ensemble_all' -GPI_MODEL_NAME_SINGLE = 'stable_diffusion_1_5_cnet' -GPI_MODEL_URL = '10.1.1.243:10051' - -# Generate Single Logo service config -GRI_RABBITMQ_QUEUES = f"Relight{RABBITMQ_ENV}" -BATCH_GRI_RABBITMQ_QUEUES = f"BatchRelight{RABBITMQ_ENV}" -GRI_MODEL_NAME_OVERALL = 'diffusion_relight_ensemble' -GRI_MODEL_NAME_SINGLE = 'stable_diffusion_1_5_relight' -GRI_MODEL_URL = '10.1.1.240:10051' - -# Pose Transform service config - -PS_RABBITMQ_QUEUES = f"PoseTransform{RABBITMQ_ENV}" -BATCH_PS_RABBITMQ_QUEUES = f"BatchPoseTransform{RABBITMQ_ENV}" -PT_MODEL_URL = '10.1.1.243:10061' - -# SEG service config -SEGMENTATION = { - "new_model_name": "seg_knet", - "name": "seg_ocrnet_hr18", - "input": "seg_input__0", - "output": "seg_output__0", -} -# ollama config -OLLAMA_URL = "http://10.1.1.240:11434/api/embeddings" - -# design batch -BATCH_DESIGN_RABBITMQ_QUEUES = f"DesignBatch{RABBITMQ_ENV}" - -# DESIGN config -DESIGN_MODEL_URL = '10.1.1.240:10000' -AIDA_CLOTHING = "aida-clothing" -KEYPOINT_RESULT_TABLE_FIELD_SET = ('neckline_left', 'neckline_right', 'shoulder_left', 'shoulder_right', 'armpit_left', 'armpit_right', - 'cuff_left_in', 'cuff_left_out', 'cuff_right_in', 'cuff_right_out', 'waistband_left', 'waistband_right') - -# DESIGN 预处理 -IF_DEBUG_SHOW = False - -# 优先级 +# Design前后排优先级 PRIORITY_DICT = { 'earring_front': 99, 'bag_front': 98, @@ -208,28 +115,71 @@ PRIORITY_DICT = { 'bag_back': -98, 'earring_back': -99, } +# Design 关键点字段 +KEYPOINT_RESULT_TABLE_FIELD_SET = ('neckline_left', 'neckline_right', 'shoulder_left', 'shoulder_right', 'armpit_left', 'armpit_right', 'cuff_left_in', 'cuff_left_out', 'cuff_right_in', 'cuff_right_out', 'waistband_left', 'waistband_right') +# milvus配置信息 +MILVUS_TABLE_KEYPOINT = "keypoint_cache_2" -QWEN_API_KEY = "sk-f31c29e61ac2498ba5e307aaa6dc10e0" +# ollama 地址 +OLLAMA_URL = f"http://{settings.A6000_SERVICE_HOST}:11434/api/embeddings" -DB_CONFIG = { - "host": "18.167.251.121", - "port": 3306, - "user": "root", - "password": "QWa998345", - "database": "aida", - "charset": "utf8mb4" -} +"""Triton Server Config""" +# Design +DESIGN_MODEL_URL = f'{settings.A6000_SERVICE_HOST}:10000' +DESIGN_MODEL_NAME = 'seg_knet' +# Generate Image +GI_MODEL_URL = f'{settings.A6000_SERVICE_HOST}:10061' +GI_MODEL_NAME = 'flux' +# Generate Single Logo +GSL_MODEL_URL = f'{settings.B_4_X_4090_SERVICE_HOST}:10041' +GSL_MODEL_NAME = 'stable_diffusion_xl_transparent' +# Generate Product (整套和单品) +GPI_MODEL_URL = f'{settings.B_4_X_4090_SERVICE_HOST}:10051' +GPI_MODEL_NAME_OVERALL = 'diffusion_ensemble_all' +GPI_MODEL_NAME_SINGLE = 'stable_diffusion_1_5_cnet' -TABLE_CATEGORIES = { - "female_dress": "female/dress", - "female_outwear": "female/outwear", - "female_trousers": "female/trousers", - "female_skirt": "female/skirt", - "female_blouse": "female/blouse", - "male_tops": "male/tops", - "male_bottoms": "male/bottoms", - "male_outwear": "male/outwear" -} +# 以下停用中...************* +# 多视角生成 +GMV_MODEL_URL = f'{settings.B_4_X_4090_SERVICE_HOST}:10081' +GMV_MODEL_NAME = 'multi_view' +# 超分 +SR_MODEL_NAME = "super_resolution" +SR_TRITON_URL = f"{settings.A6000_SERVICE_HOST}:10031" +# 打光 +GRI_MODEL_URL = f'{settings.A6000_SERVICE_HOST}:10051' +GRI_MODEL_NAME_OVERALL = 'diffusion_relight_ensemble' +GRI_MODEL_NAME_SINGLE = 'stable_diffusion_1_5_relight' +# agent 图片生成 +FAST_GI_MODEL_URL = f'{settings.B_4_X_4090_SERVICE_HOST}:10011' +FAST_GI_MODEL_NAME = 'stable_diffusion_xl' +# 图转视频 triton版 +PT_MODEL_URL = f'{settings.B_4_X_4090_SERVICE_HOST}:10061' -# --- ComfyUI 配置信息 --- -COMFYUI_SERVER_ADDRESS = "10.1.2.227:8080" # 替换为您的 ComfyUI 服务器地址 +# ************* + +"""MQ 队列信息""" +# 生成图片 moodboard printboard sketchboard +GI_RABBITMQ_QUEUES = f"GenerateImage-{settings.SERVE_ENV}" +# 生成slogan +SLOGAN_RABBITMQ_QUEUES = f"Slogan-{settings.SERVE_ENV}" +# 转产品图 +GPI_RABBITMQ_QUEUES = f"ToProductImage-{settings.SERVE_ENV}" +# 产品图转视频 +PS_RABBITMQ_QUEUES = f"PoseTransform-{settings.SERVE_ENV}" + +# 以下停用中...************* +# 产品图打光 +GRI_RABBITMQ_QUEUES = f"Relight-{settings.SERVE_ENV}" +# 超分 +SR_RABBITMQ_QUEUES = f"SuperResolution-{settings.SERVE_ENV}" +# 生成多视图 +GMV_RABBITMQ_QUEUES = f"GenerateMultiView-{settings.SERVE_ENV}" +# 批量转产品图 +BATCH_GPI_RABBITMQ_QUEUES = f"BatchToProductImage-{settings.SERVE_ENV}" +# 批量打光 +BATCH_GRI_RABBITMQ_QUEUES = f"BatchRelight-{settings.SERVE_ENV}" +# 批量图片转视频 +BATCH_PS_RABBITMQ_QUEUES = f"BatchPoseTransform-{settings.SERVE_ENV}" +# 批量design +BATCH_DESIGN_RABBITMQ_QUEUES = f"DesignBatch-{settings.SERVE_ENV}" +# ************* diff --git a/app/core/mysql_config.py b/app/core/mysql_config.py new file mode 100644 index 0000000..1dd846a --- /dev/null +++ b/app/core/mysql_config.py @@ -0,0 +1,10 @@ +from app.core.config import settings + +DB_CONFIG = { + "host": settings.MYSQL_HOST, + "port": settings.MYSQL_PORT, + "user": settings.MYSQL_USER, + "password": settings.MYSQL_PASSWORD, + "database": settings.MYSQL_DB, + "charset": settings.MYSQL_CHARSET, +} diff --git a/app/core/rabbit_mq_config.py b/app/core/rabbit_mq_config.py new file mode 100644 index 0000000..e4634b0 --- /dev/null +++ b/app/core/rabbit_mq_config.py @@ -0,0 +1,10 @@ +# rabbitmq config +import pika +from app.core.config import settings + +RABBITMQ_PARAMS = { + "host": settings.MQ_HOST, + "port": settings.MQ_PORT, + "credentials": pika.credentials.PlainCredentials(username=settings.MQ_USERNAME, password=settings.MQ_PASSWORD), + "virtual_host": settings.MQ_VIRTUAL_HOST, +} diff --git a/app/design_batch/request_data/requests_data.json b/app/design_batch/request_data/requests_data.json index 1dba8d1..32e7c52 100644 --- a/app/design_batch/request_data/requests_data.json +++ b/app/design_batch/request_data/requests_data.json @@ -79,12 +79,8 @@ } ] } - ], "process_id": "87", - "tasks_id": , + "tasks_id": "" } - -//用 openai jsonl -// \ No newline at end of file diff --git a/app/main.py b/app/main.py index cbdce4a..e1c6f95 100644 --- a/app/main.py +++ b/app/main.py @@ -1,31 +1,40 @@ +# 1. 这里的顺序至关重要!必须在最顶端 +import sys + +try: + import asyncore +except ImportError: + import pyasyncore + + sys.modules['asyncore'] = pyasyncore import logging.config import uvicorn -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger from fastapi import FastAPI -from fastapi import HTTPException, Request +from fastapi import HTTPException from fastapi.responses import JSONResponse from app.api.api_route import router from app.core.config import settings from app.core.record_api_count import count_api_calls from app.schemas.response_template import ResponseModel -from app.service.recommend.service import load_resources from logging_env import LOGGER_CONFIG_DICT +from dotenv import load_dotenv +from starlette.middleware.cors import CORSMiddleware logging.config.dictConfig(LOGGER_CONFIG_DICT) logging.getLogger("pika").setLevel(logging.WARNING) -from starlette.middleware.cors import CORSMiddleware - logger = logging.getLogger(__name__) +load_dotenv() + def get_application() -> FastAPI: application = FastAPI( - title=settings.PROJECT_NAME, docs_url="/docs", redoc_url='/re-docs', - openapi_url=f"{settings.API_PREFIX}/openapi.json", + docs_url="/docs", + redoc_url='/re-docs', + openapi_url=f"/openapi.json", description=''' Base frame with FastAPI - Super Resolution API @@ -34,13 +43,13 @@ def get_application() -> FastAPI: ) application.add_middleware( CORSMiddleware, - allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS], + allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) application.middleware("http")(count_api_calls) - application.include_router(router=router, prefix=settings.API_PREFIX) + application.include_router(router=router) return application @@ -48,14 +57,12 @@ app = get_application() @app.exception_handler(HTTPException) -async def http_exception_handler(request: Request, exc: HTTPException): +async def http_exception_handler(exc: HTTPException): return JSONResponse( status_code=exc.status_code, content=ResponseModel(code=exc.status_code, msg=exc.detail, data=exc.detail).dict() ) - - if __name__ == '__main__': - uvicorn.run(app, host="0.0.0.0", port=8000) + uvicorn.run(app, host="0.0.0.0", port=settings.PORT) diff --git a/app/schemas/design.py b/app/schemas/design.py index 3fb6de7..6f0a633 100644 --- a/app/schemas/design.py +++ b/app/schemas/design.py @@ -1,4 +1,15 @@ -from pydantic import BaseModel +from typing import List, Optional + +from pydantic import BaseModel, Field + + +class SAMRequestModel(BaseModel): + user_id: int = Field(..., description="用户id, 必填字段") + image_path: str = Field(..., description="图片路径,必填字段") + type: str = Field(..., description="推理类型,必填字段") + points: Optional[List[List[float]]] = None + labels: Optional[List[int]] = None + box: Optional[List[int]] = None class DesignModel(BaseModel): diff --git a/app/service/attribute/service_att_recognition.py b/app/service/attribute/service_att_recognition.py index f93146e..c007184 100644 --- a/app/service/attribute/service_att_recognition.py +++ b/app/service/attribute/service_att_recognition.py @@ -1,22 +1,24 @@ #!/usr/bin/env python # -*- coding: UTF-8 -*- -import logging from pprint import pprint -import torch + import cv2 import mmcv import numpy as np import pandas as pd -from minio import Minio +import torch import tritonclient.http as httpclient -from app.core.config import * +from minio import Minio + +from app.core.config import settings, DESIGN_MODEL_URL from app.schemas.attribute_retrieve import AttributeRecognitionModel -from app.service.utils.oss_client import oss_get_image +from app.service.utils.new_oss_client import oss_get_image + +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) class AttributeRecognition: def __init__(self, const, request_data): - # self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) self.request_data = [] for i, sketch in enumerate(request_data): self.request_data.append( @@ -96,11 +98,12 @@ class AttributeRecognition: res = {**dict1, **dict2} return res - def get_image(self, url): + @staticmethod + def get_image(url): # response = self.minio_client.get_object(url.split("/", 1)[0], url.split("/", 1)[1]) # img = np.frombuffer(response.data, np.uint8) # 转成8位无符号整型 # img = cv2.imdecode(img, cv2.IMREAD_COLOR) # - img = oss_get_image(bucket=url.split("/", 1)[0], object_name=url.split("/", 1)[1], data_type="cv2") + img = oss_get_image(oss_client=minio_client, bucket=url.split("/", 1)[0], object_name=url.split("/", 1)[1], data_type="cv2") img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) return img diff --git a/app/service/attribute/service_category_recognition.py b/app/service/attribute/service_category_recognition.py index 7c277c9..5a04ba2 100644 --- a/app/service/attribute/service_category_recognition.py +++ b/app/service/attribute/service_category_recognition.py @@ -7,24 +7,25 @@ @Date :2023/9/16 18:31:08 @detail : """ +from minio import Minio from skimage import transform import cv2 import mmcv import numpy as np import pandas as pd -from minio import Minio import tritonclient.http as httpclient import torch -from app.core.config import * +from app.core.config import settings, DESIGN_MODEL_URL from app.schemas.attribute_retrieve import CategoryRecognitionModel -from app.service.utils.oss_client import oss_get_image +from app.service.utils.new_oss_client import oss_get_image + +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) class CategoryRecognition: def __init__(self, request_data): - self.attr_type = pd.read_csv(CATEGORY_PATH) - # self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) + self.attr_type = pd.read_csv(settings.CATEGORY_PATH) self.request_data = [] self.triton_client = httpclient.InferenceServerClient(url=DESIGN_MODEL_URL) for sketch in request_data: @@ -46,13 +47,14 @@ class CategoryRecognition: preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0) return preprocessed_img - def get_image(self, url): + @staticmethod + def get_image(url): # Get data of an object. # Read data from response. # response = self.minio_client.get_object(url.split("/", 1)[0], url.split("/", 1)[1]) # img = np.frombuffer(response.data, np.uint8) # 转成8位无符号整型 # img = cv2.imdecode(img, cv2.IMREAD_COLOR) # 解码 - img = oss_get_image(bucket=url.split("/", 1)[0], object_name=url.split("/", 1)[1], data_type="cv2") + img = oss_get_image(oss_client=minio_client, bucket=url.split("/", 1)[0], object_name=url.split("/", 1)[1], data_type="cv2") img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) return img @@ -68,7 +70,7 @@ class CategoryRecognition: colattr = list(self.attr_type['labelName']) - task = self.attr_type['taskName'][0] + # self.attr_type['taskName'][0] maxsc = np.max(scores[0][:5]) indexs = np.argwhere(scores == maxsc)[:, 1] diff --git a/app/service/brand_dna/service.py b/app/service/brand_dna/service.py index 393d75a..148e1e9 100644 --- a/app/service/brand_dna/service.py +++ b/app/service/brand_dna/service.py @@ -9,15 +9,16 @@ import torch.nn.functional as F import tritonclient.http as httpclient from minio import Minio -from app.core.config import MINIO_URL, MINIO_ACCESS, MINIO_SECRET, MINIO_SECURE, DESIGN_MODEL_URL, CATEGORY_PATH +from app.core.config import DESIGN_MODEL_URL +from app.core.config import settings from app.schemas.brand_dna import BrandDnaModel -from app.service.attribute.config import local_debug_const, const +from app.service.attribute.config import const from app.service.utils.generate_uuid import generate_uuid from app.service.utils.new_oss_client import oss_upload_image, oss_get_image -logger = logging.getLogger() +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) -minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) +logger = logging.getLogger() class BrandDna: @@ -25,7 +26,7 @@ class BrandDna: self.sketch_bucket = "test" self.image_url = request_item.image_url self.is_brand_dna = request_item.is_brand_dna - self.attr_type = pd.read_csv(CATEGORY_PATH) + self.attr_type = pd.read_csv(settings.CATEGORY_PATH) # self.attr_type = pd.read_csv(r"E:\workspace\trinity_client_aida\app\service\attribute\config\descriptor\category\category_dis.csv") self.att_client = httpclient.InferenceServerClient(url=DESIGN_MODEL_URL) self.seg_client = httpclient.InferenceServerClient(url='10.1.1.243:30000') diff --git a/app/service/brand_dna/service_generate_brand_info.py b/app/service/brand_dna/service_generate_brand_info.py index 367a4d6..fa8d2e5 100644 --- a/app/service/brand_dna/service_generate_brand_info.py +++ b/app/service/brand_dna/service_generate_brand_info.py @@ -3,23 +3,25 @@ import logging import cv2 import numpy as np import tritonclient.grpc as grpcclient -from langchain.output_parsers import ResponseSchema, StructuredOutputParser +from langchain_classic.output_parsers import ResponseSchema, StructuredOutputParser from langchain_community.chat_models import ChatTongyi from langchain_core.prompts import PromptTemplate -# from langchain_openai import ChatOpenAI from minio import Minio from tritonclient.utils import np_to_triton_dtype -from app.core.config import GI_MODEL_URL, MINIO_URL, MINIO_ACCESS, MINIO_SECRET, MINIO_SECURE, GI_MODEL_NAME +from app.core.config import GI_MODEL_URL, GI_MODEL_NAME from app.schemas.brand_dna import GenerateBrandModel from app.service.utils.generate_uuid import generate_uuid from app.service.utils.new_oss_client import oss_upload_image +from app.core.config import settings + class GenerateBrandInfo: def __init__(self, request_data): # minio client init - self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) + self.generate_logo_prompt = None + self.minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) # user info init self.user_id = request_data.user_id @@ -55,7 +57,7 @@ class GenerateBrandInfo: return self.result_data def llm_generate_brand_info(self): - output = self.model(self._input.to_messages()) + output = self.model.invoke(self._input.to_messages()) brand_data = self.output_parser.parse(output.content) self.result_data = brand_data self.generate_logo_prompt = brand_data['brand_logo_prompt'] @@ -87,8 +89,8 @@ class GenerateBrandInfo: def upload_logo_image(self, image, object_name): try: _, img_byte_array = cv2.imencode('.jpg', image) - object_name = f'{self.user_id}/{self.category}/{object_name}' - req = oss_upload_image(oss_client=self.minio_client, bucket="aida-users", object_name=object_name, image_bytes=img_byte_array) + object_name = f'{self.user_id}/{self.category}/{object_name}.jpg' + oss_upload_image(oss_client=self.minio_client, bucket="aida-users", object_name=object_name, image_bytes=img_byte_array) image_url = f"aida-users/{object_name}" return image_url except Exception as e: diff --git a/app/service/brand_dna/test.py b/app/service/brand_dna/test.py deleted file mode 100644 index 966f76e..0000000 --- a/app/service/brand_dna/test.py +++ /dev/null @@ -1,32 +0,0 @@ -from dotenv import load_dotenv -from langchain.output_parsers import StructuredOutputParser, ResponseSchema -from langchain_core.prompts import PromptTemplate -from langchain_openai import ChatOpenAI - -# 加载.env文件的环境变量 -load_dotenv() - -# 创建一个大语言模型,model指定了大语言模型的种类 -model = ChatOpenAI(model="qwen2.5-14b-instruct") - -# 想要接收的响应模式 -response_schemas = [ - ResponseSchema(name="brand_name", description="Brand name."), - ResponseSchema(name="brand_slogan", description="Brand slogan."), - ResponseSchema(name="brand_logo_prompt", description="prompt required for brand logo generation.") -] -output_parser = StructuredOutputParser.from_response_schemas(response_schemas) -format_instructions = output_parser.get_format_instructions() -prompt = PromptTemplate( - template="你是一个时装品牌的设计师。根据用户输入提取出brand name,brand slogan,brand logo 描述。如果没有以上内容,需要你根据用户输入随意发挥。随后根据brand logo 描述生成一个prompt,这个prompt用于生成模型.\n{format_instructions}\n{question}", - input_variables=["question"], - partial_variables={"format_instructions": format_instructions} -) -_input = prompt.format_prompt(question="brand name: cat home") - -output = model(_input.to_messages()) -brand_data = output_parser.parse(output.content) - - -def generate_logo(bucket_name, object_name, prompt): - pass diff --git a/app/service/chat_robot/script/agents/agent_executor.py b/app/service/chat_robot/script/agents/agent_executor.py index cc69936..68460d9 100644 --- a/app/service/chat_robot/script/agents/agent_executor.py +++ b/app/service/chat_robot/script/agents/agent_executor.py @@ -3,27 +3,20 @@ import json import logging from typing import Any, Dict, List, Optional, Union, Tuple -from langchain.agents import AgentExecutor -from langchain.callbacks.manager import Callbacks, CallbackManager -from langchain.load.dump import dumpd -from langchain.schema import RUN_KEY, RunInfo +from langchain_classic.agents import AgentExecutor +from langchain_classic.schema import RUN_KEY from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.callbacks import Callbacks, CallbackManager +from langchain_core.load import dumpd +from langchain_core.outputs import RunInfo class CustomAgentExecutor(AgentExecutor): - def __call__( - self, - inputs: Union[Dict[str, Any], Any], - return_only_outputs: bool = False, - callbacks: Callbacks = None, - session_key: str = "", - *, - tags: Optional[List[str]] = None, - include_run_info: bool = False, - ) -> Dict[str, Any]: + def __call__(self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Callbacks = None, session_key: str = "", *, tags: Optional[List[str]] = None, include_run_info: bool = False, **kwargs) -> Dict[str, Any]: """Run the logic of this chain and add to output if desired. Args: + **kwargs: inputs: Dictionary of inputs, or single input if chain expects only one param. return_only_outputs: boolean for whether to return only outputs in the @@ -72,7 +65,7 @@ class CustomAgentExecutor(AgentExecutor): """Validate and prep outputs.""" self._validate_outputs(outputs) if self.memory is not None and outputs['need_record']: - self.memory.save_context(inputs, outputs, session_key) + self.memory.save_context(inputs, outputs) if return_only_outputs: return outputs else: @@ -95,7 +88,7 @@ class CustomAgentExecutor(AgentExecutor): ) inputs = {list(_input_keys)[0]: inputs} if self.memory is not None: - external_context = self.memory.load_memory_variables(inputs, session_key) + external_context = self.memory.load_memory_variables(inputs) inputs = dict(inputs, **external_context) self._validate_inputs(inputs) return inputs @@ -119,7 +112,8 @@ class CustomAgentExecutor(AgentExecutor): {return_value_key: observation}, "", ) - except: + except Exception as e: + print(e) pass # Invalid tools won't be in the map, so we return False. diff --git a/app/service/chat_robot/script/agents/conversational_functions_agent.py b/app/service/chat_robot/script/agents/conversational_functions_agent.py index eb362a7..6a42307 100644 --- a/app/service/chat_robot/script/agents/conversational_functions_agent.py +++ b/app/service/chat_robot/script/agents/conversational_functions_agent.py @@ -1,26 +1,15 @@ import json -import re +from dataclasses import dataclass from json import JSONDecodeError from typing import List, Tuple, Any, Union -from dataclasses import dataclass -from langchain.callbacks.manager import Callbacks -from langchain.agents import ( - OpenAIFunctionsAgent, -) -from langchain.schema import ( - AgentAction, - AgentFinish, - BaseMessage, - OutputParserException -) -from langchain.schema.messages import ( - AIMessage, - FunctionMessage -) -from langchain.tools import BaseTool, StructuredTool -# from langchain.tools.convert_to_openai import FunctionDescription -from langchain.utils.openai_functions import FunctionDescription +from langchain_classic.agents import OpenAIFunctionsAgent +from langchain_community.utils.ernie_functions import FunctionDescription +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.callbacks import Callbacks +from langchain_core.exceptions import OutputParserException +from langchain_core.messages import BaseMessage, AIMessage, FunctionMessage +from langchain_core.tools import BaseTool @dataclass @@ -76,7 +65,6 @@ def _create_function_message( content = observation return FunctionMessage( name=agent_action.tool, - content=content, ) @@ -177,6 +165,7 @@ class ConversationalFunctionsAgent(OpenAIFunctionsAgent): into it. Args: + callbacks: intermediate_steps: Steps the LLM has taken to date, along with observations **kwargs: User inputs. **kwargs: Including user's input string diff --git a/app/service/chat_robot/script/callbacks/openai_token_record_callback.py b/app/service/chat_robot/script/callbacks/openai_token_record_callback.py index 83d22a1..198fb14 100644 --- a/app/service/chat_robot/script/callbacks/openai_token_record_callback.py +++ b/app/service/chat_robot/script/callbacks/openai_token_record_callback.py @@ -2,18 +2,16 @@ from typing import Any, Dict from langchain_community.callbacks.openai_info import OpenAICallbackHandler -from langchain.schema import LLMResult from langchain_community.callbacks.openai_info import standardize_model_name, MODEL_COST_PER_1K_TOKENS, \ get_openai_token_cost_for_model - - -# from langchain.callbacks.openai_info import standardize_model_name, MODEL_COST_PER_1K_TOKENS, get_openai_token_cost_for_model +from langchain_core.outputs import LLMResult class OpenAITokenRecordCallbackHandler(OpenAICallbackHandler): need_record: bool = True response_type: str = "string" """Callback Handler that tracks OpenAI info and write to redis after agent finish""" + def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Collect token usage.""" if response.llm_output is None: @@ -22,7 +20,7 @@ class OpenAITokenRecordCallbackHandler(OpenAICallbackHandler): if "token_usage" not in response.llm_output: return None if "function_call" in response.generations[0][0].message.additional_kwargs: - if response.generations[0][0].message.additional_kwargs["function_call"]["name"] in ["sql_db_query", "sql_db_schema","tutorial_tool"]: + if response.generations[0][0].message.additional_kwargs["function_call"]["name"] in ["sql_db_query", "sql_db_schema", "tutorial_tool"]: self.need_record = False if response.generations[0][0].message.additional_kwargs["function_call"]["name"] == "sql_db_query": self.response_type = "image" @@ -39,6 +37,7 @@ class OpenAITokenRecordCallbackHandler(OpenAICallbackHandler): self.total_tokens += token_usage.get("total_tokens", 0) self.prompt_tokens += prompt_tokens self.completion_tokens += completion_tokens + return None def on_chain_end(self, outputs: Dict, **kwargs: Any) -> None: """Write token usage to redis.""" diff --git a/app/service/chat_robot/script/database.py b/app/service/chat_robot/script/database.py index bcc0b2c..934c82c 100644 --- a/app/service/chat_robot/script/database.py +++ b/app/service/chat_robot/script/database.py @@ -44,12 +44,17 @@ class CustomDatabase(SQLDatabase): final_str = "\n\n".join(tables) return final_str - def run(self, command: str, fetch: str = "all") -> str: + def run(self, command: str, fetch: str = "all", **kwargs) -> str: """Execute a SQL command and return a string representing the results. If the statement returns rows, a string of the results is returned. If the statement returns no rows, an empty string is returned. + Args: + command: + fetch: + **kwargs: + """ with self._engine.begin() as connection: if self._schema is not None: diff --git a/app/service/chat_robot/script/main.py b/app/service/chat_robot/script/main.py index 573fae5..1bfe322 100644 --- a/app/service/chat_robot/script/main.py +++ b/app/service/chat_robot/script/main.py @@ -1,15 +1,15 @@ import json import logging -from langchain.agents import Tool -from langchain.callbacks import FileCallbackHandler -from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder -from langchain.schema import SystemMessage, AIMessage -from langchain.utilities import SerpAPIWrapper +from langchain_community.utilities import SerpAPIWrapper +from langchain_core.callbacks import FileCallbackHandler +from langchain_core.messages import SystemMessage, AIMessage +from langchain_core.prompts import MessagesPlaceholder, HumanMessagePromptTemplate, ChatPromptTemplate +from langchain_core.tools import Tool from langchain_community.chat_models import ChatTongyi from loguru import logger -from app.core.config import * +from app.core.config import settings from app.service.chat_robot.script.agents import CustomAgentExecutor, ConversationalFunctionsAgent from app.service.chat_robot.script.database import CustomDatabase from app.service.chat_robot.script.memory import UserConversationBufferWindowMemory @@ -30,10 +30,10 @@ log_handler = FileCallbackHandler(logfile) # # callbacks=[OpenAICallbackHandler()] # ) -llm = ChatTongyi(api_key=QWEN_API_KEY) +llm = ChatTongyi(api_key=settings.QWEN_API_KEY) search = SerpAPIWrapper() -db = CustomDatabase.from_uri(f'mysql+pymysql://{DB_USERNAME}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/attribute_retrieval_V3', +db = CustomDatabase.from_uri(f'mysql+pymysql://{settings.DB_USERNAME}:{settings.DB_PASSWORD}@{settings.DB_HOST}:{settings.DB_PORT}/attribute_retrieval_V3', include_tables=['female_top', 'female_skirt', 'female_pants', 'female_dress', 'female_outwear', 'male_bottom', 'male_top', 'male_outwear'], engine_args={"pool_recycle": 7200}) @@ -43,11 +43,11 @@ tools = [ description="Can be used to perform Internet searches", func=search.run ), - QuerySQLDataBaseTool(db=db, return_direct=False), + QuerySQLDataBaseTool(db=db), InfoSQLDatabaseTool(db=db), ListSQLDatabaseTool(db=db), # QuerySQLCheckerTool(db=db, llm=OpenAI(temperature=0, openai_api_key=OPENAI_API_KEY)), - QuerySQLCheckerTool(db=db, llm=ChatTongyi(temperature=0, api_key=QWEN_API_KEY)), + QuerySQLCheckerTool(db=db, llm=ChatTongyi(api_key=settings.QWEN_API_KEY)), # Tool( # name="tutorial_tool", # description="Utilize this tool to retrieve specific statements related to user guidance tutorials." @@ -133,5 +133,5 @@ def chat(post_data): 'completion_tokens': final_outputs['completion_tokens'], 'response_type': final_outputs["response_type"] } - logging.info(json.dumps(api_response)) + logging.info(json.dumps(api_response, indent=4)) return api_response diff --git a/app/service/chat_robot/script/memory/user_buffer_window.py b/app/service/chat_robot/script/memory/user_buffer_window.py index 1c70f02..d6315e0 100644 --- a/app/service/chat_robot/script/memory/user_buffer_window.py +++ b/app/service/chat_robot/script/memory/user_buffer_window.py @@ -3,13 +3,12 @@ from typing import Any, Dict, List, Tuple import json import redis +from langchain_classic.memory.chat_memory import BaseChatMemory +from langchain_classic.memory.utils import get_prompt_input_key +from langchain_core.messages import messages_from_dict, get_buffer_string, BaseMessage, HumanMessage, AIMessage, message_to_dict from redis import Redis -from langchain.memory.chat_memory import BaseChatMemory -from langchain.schema.messages import BaseMessage, get_buffer_string, HumanMessage, AIMessage -from langchain.schema.messages import _message_to_dict, messages_from_dict -from langchain.memory.utils import get_prompt_input_key -from app.core.config import * +from app.core.config import settings class UserConversationBufferWindowMemory(BaseChatMemory): @@ -24,8 +23,8 @@ class UserConversationBufferWindowMemory(BaseChatMemory): @classmethod def from_redis( cls, - host: str = REDIS_HOST, - port: int = REDIS_PORT, + host: str = settings.REDIS_HOST, + port: int = settings.REDIS_PORT, db: int = 3, **kwargs ): @@ -79,7 +78,7 @@ class UserConversationBufferWindowMemory(BaseChatMemory): return inputs[prompt_input_key], outputs[output_key] def add_message(self, key: str, message: BaseMessage) -> None: - self.redis_client.lpush(key, json.dumps(_message_to_dict(message))) + self.redis_client.lpush(key, json.dumps(message_to_dict(message))) def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str], key: str = "") -> None: """Save context from this conversation to buffer.""" diff --git a/app/service/chat_robot/script/service/CallQWen.py b/app/service/chat_robot/script/service/CallQWen.py index 5ab74ba..ea0c75b 100644 --- a/app/service/chat_robot/script/service/CallQWen.py +++ b/app/service/chat_robot/script/service/CallQWen.py @@ -5,10 +5,10 @@ from dashscope import Generation from retry import retry from urllib3.exceptions import NewConnectionError -from app.core.config import * +from app.core.config import settings from app.service.chat_robot.script.callbacks.qwen_callback_handler import QWenCallbackHandler from app.service.chat_robot.script.database import CustomDatabase -from app.service.chat_robot.script.prompt import FASHION_CHAT_BOT_PREFIX, TOOLS_FUNCTIONS_SUFFIX, TUTORIAL_TOOL_RETURN, \ +from app.service.chat_robot.script.prompt import TOOLS_FUNCTIONS_SUFFIX, TUTORIAL_TOOL_RETURN, \ GET_LANGUAGE_PREFIX, FASHION_CHAT_BOT_PREFIX_TEMP from app.service.search_image_with_text.service import query @@ -149,7 +149,7 @@ tools = [ } ] -db = CustomDatabase.from_uri(f'mysql+pymysql://{DB_USERNAME}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/attribute_retrieval_V3', +db = CustomDatabase.from_uri(f'mysql+pymysql://{settings.MYSQL_USER}:{settings.MYSQL_PASSWORD}@{settings.MYSQL_HOST}:{settings.MYSQL_PORT}/attribute_retrieval_V3', include_tables=['female_top', 'female_skirt', 'female_pants', 'female_dress', 'female_outwear', 'male_bottom', 'male_top', 'male_outwear'], engine_args={"pool_recycle": 7200}) @@ -159,7 +159,7 @@ qwen = QWenCallbackHandler() def search_from_internet(message): response = Generation.call( model='qwen-turbo', - api_key=QWEN_API_KEY, + api_key=settings.QWEN_API_KEY, messages=message, prompt='The output must be in English.Keep the final result under 200 words.' # tools=tools, @@ -190,7 +190,7 @@ def get_image_from_vector_db(gender, content): def get_response(messages): response = Generation.call( model='qwen-max', - api_key=QWEN_API_KEY, + api_key=settings.QWEN_API_KEY, messages=messages, tools=tools, # seed=random.randint(1, 10000), # 设置随机数种子seed,如果没有设置,则随机数种子默认为1234 @@ -203,7 +203,7 @@ def get_response(messages): def get_assistant_response(messages): response = Generation.call( model='qwen-max', - api_key=QWEN_API_KEY, + api_key=settings.QWEN_API_KEY, messages=messages, # seed=random.randint(1, 10000), # 设置随机数种子seed,如果没有设置,则随机数种子默认为1234 result_format='message', # 将输出设置为message形式 @@ -212,8 +212,10 @@ def get_assistant_response(messages): return response +global tool_info + + def call_with_messages(message): - global tool_info user_input = message print('\n') @@ -241,7 +243,7 @@ def call_with_messages(message): response_type = "chat" while flag and count <= 3: - first_response = get_response(messages) + first_response = get_response assistant_output = first_response.output.choices[0].message QWenCallbackHandler.on_llm_end(qwen, first_response.usage) print(f"\n大模型第 {count} 轮输出信息:{first_response}\n") @@ -260,7 +262,7 @@ def call_with_messages(message): ] tool_info['content'] = search_from_internet(message) flag = False - result_content = tool_info['content'].output.text + result_content = tool_info['content'] # 如果模型选择的工具是get_database_table # elif assistant_output.tool_calls[0]['function']['name'] == 'get_database_table': # tool_info = {"name": "get_database_table", "role": "tool", 'content': get_database_table()} diff --git a/app/service/chat_robot/script/tools/sql_tools.py b/app/service/chat_robot/script/tools/sql_tools.py index 89576d3..1f82616 100644 --- a/app/service/chat_robot/script/tools/sql_tools.py +++ b/app/service/chat_robot/script/tools/sql_tools.py @@ -2,21 +2,15 @@ """Tools for interacting with a SQL database.""" from typing import Any, Dict, Optional, Type -from pydantic import BaseModel, Extra, Field, root_validator - -from langchain.base_language import BaseLanguageModel -from langchain.callbacks.manager import ( - AsyncCallbackManagerForToolRun, - CallbackManagerForToolRun, -) -from langchain.chains.llm import LLMChain -from langchain.prompts import PromptTemplate +from langchain_community.tools.sql_database.prompt import QUERY_CHECKER +from langchain_community.tools.sql_database.tool import _QuerySQLCheckerToolInput # from langchain.sql_database import SQLDatabase from langchain_community.utilities import SQLDatabase -from langchain.tools.base import BaseTool -from langchain_community.tools.sql_database.prompt import QUERY_CHECKER - -from langchain_community.tools.sql_database.tool import QuerySQLCheckerTool, _QuerySQLCheckerToolInput +from langchain_core.callbacks import CallbackManagerForToolRun, AsyncCallbackManagerForToolRun +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import PromptTemplate +from langchain_core.tools import BaseTool +from pydantic import BaseModel, Extra, Field, root_validator class BaseSQLDatabaseTool(BaseModel): @@ -62,7 +56,7 @@ class QuerySQLDataBaseTool(BaseSQLDatabaseTool, BaseTool): "LIMIT 1'" "Example Input 2: 'SELECT img_name FROM top WHERE sleeve_length = 'Long' AND type = 'Blouse' " "order by rand() LIMIT 2'" - ) + ) def _run( self, @@ -95,9 +89,9 @@ class InfoSQLDatabaseTool(BaseSQLDatabaseTool, BaseTool): "Input to this tool is a comma-separated list of tables, output is the schema and sample rows for those tables." "There are eight tables covering eight fashion categories: female_top, female_pants, female_dress," "female_skirt, female_outwear, male_bottom, male_top, and male_outwear." - + "Example Input: 'female_outwear, male_top'" - ) + ) def _run( self, @@ -183,11 +177,11 @@ class QuerySQLCheckerTool(BaseSQLDatabaseTool, BaseTool): args_schema: Type[BaseModel] = _QuerySQLCheckerToolInput @root_validator(pre=True) - def initialize_llm_chain(cls, values: Dict[str, Any]) -> Dict[str, Any]: + def initialize_llm_chain(self, values: Dict[str, Any]) -> Dict[str, Any]: if "llm_chain" not in values: # from langchain.chains.llm import LLMChain - llm = values.get("llm") # type: ignore[arg-type] + llm = values.get("llm") # type: ignore[arg-type] prompt = PromptTemplate( template=QUERY_CHECKER, input_variables=["dialect", "query"] ) diff --git a/app/service/chat_robot/script/tools/tutorial_tool.py b/app/service/chat_robot/script/tools/tutorial_tool.py index c08eb9d..64c70ba 100644 --- a/app/service/chat_robot/script/tools/tutorial_tool.py +++ b/app/service/chat_robot/script/tools/tutorial_tool.py @@ -1,6 +1,6 @@ from typing import Any -from langchain.tools.base import BaseTool +from langchain_core.tools import BaseTool from app.service.chat_robot.script.prompt import TUTORIAL_TOOL_RETURN diff --git a/app/service/clothing_seg/service.py b/app/service/clothing_seg/service.py index 34ea7ee..e3cf83c 100644 --- a/app/service/clothing_seg/service.py +++ b/app/service/clothing_seg/service.py @@ -9,14 +9,14 @@ from PIL import Image from minio import Minio from tritonclient.utils import np_to_triton_dtype -from app.core.config import * +from app.core.config import settings from app.schemas.clothing_seg import ClothingSegModel from app.service.design_fast.utils.design_ensemble import get_seg_result from app.service.utils.decorator import RunTime from app.service.utils.generate_uuid import generate_uuid from app.service.utils.new_oss_client import oss_get_image, oss_upload_image -minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) class ClothingSeg: @@ -64,9 +64,9 @@ class ClothingSeg: if image_type == "sketch": if len(image.shape) == 2: image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) - seg_mask = get_seg_result(1, image[:, :, :3]) + seg_mask = get_seg_result(image[:, :, :3]) else: - seg_mask = get_seg_result(1, image[:, :, :3]) + seg_mask = get_seg_result(image[:, :, :3]) temp = seg_mask != 0.0 mask = (255 * (temp + 0).astype(np.uint8)) x_min, y_min, x_max, y_max = get_bounding_box(mask) diff --git a/app/service/comfyui_I2V/flf2v_server.py b/app/service/comfyui_I2V/flf2v_server.py index 448f9ad..6a14550 100644 --- a/app/service/comfyui_I2V/flf2v_server.py +++ b/app/service/comfyui_I2V/flf2v_server.py @@ -12,7 +12,8 @@ from PIL import Image from minio import Minio, S3Error from moviepy.video.io.VideoFileClip import VideoFileClip -from app.core.config import MINIO_URL, MINIO_ACCESS, MINIO_SECRET, MINIO_SECURE, COMFYUI_SERVER_ADDRESS, PS_RABBITMQ_QUEUES, DEBUG +from app.core.config import PS_RABBITMQ_QUEUES +from app.core.config import settings from app.schemas.comfyui_i2v import ComfyuiFLF2VModel from app.service.generate_image.utils.mq import publish_status @@ -305,13 +306,14 @@ workflow_json = { class ComfyUIServerFLF2V: def __init__(self, request_data): + self.pose_transform_data = None self.start_image_url = request_data.start_image_url self.end_image_url = request_data.end_image_url self.prompt = request_data.prompt self.tasks_id = request_data.tasks_id self.user_id = self.tasks_id[self.tasks_id.rfind('-') + 1:] self.server_status_data = {'tasks_id': self.tasks_id, 'status': 'PENDING', 'message': "pending", 'gif_url': '', 'video_url': '', 'image_url': ''} - self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) + self.minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) def get_result(self): workflow_json['6']['inputs']['text'] = self.prompt @@ -341,7 +343,7 @@ class ComfyUIServerFLF2V: # 1. 提交任务 prompt_response = self.queue_prompt(workflow_json, self.tasks_id) if not prompt_response: - return + return None prompt_id = prompt_response.get("prompt_id") logger.info(f" 任务已提交,Prompt ID: {prompt_id}") @@ -361,6 +363,7 @@ class ComfyUIServerFLF2V: } logger.info(file_list) return self.process_and_upload_comfyui_video(filename=file_list['filename'], subfolder=file_list['subfolder'], prompt_id=prompt_response['prompt_id']), prompt_id + return None def download_from_minio_in_memory(self, image_url): bucket = image_url.split('/')[0] @@ -391,8 +394,9 @@ class ComfyUIServerFLF2V: logger.error(f"❌ MinIO 下载过程中发生未知错误: {e}") return None, None - def upload_in_memory_file_to_comfyui(self, in_memory_file, filename): - upload_url = f"http://{COMFYUI_SERVER_ADDRESS}/upload/image" + @staticmethod + def upload_in_memory_file_to_comfyui(in_memory_file, filename): + upload_url = f"http://{settings.COMFYUI_SERVER_ADDRESS}/upload/image" data = { "overwrite": "true", @@ -430,7 +434,7 @@ class ComfyUIServerFLF2V: # 1. 从 ComfyUI 获取视频二进制数据 mp4_bytes = self.get_comfyui_video_bytes(filename, subfolder) if not mp4_bytes: - return + return None # 2. 准备进行视频处理 # moviepy 不支持直接使用 bytes,需要将 bytes 写入一个 BytesIO 或临时文件 @@ -518,7 +522,7 @@ class ComfyUIServerFLF2V: self.pose_transform_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'gif_url': f'aida-users/{GIF_OBJECT}', 'video_url': f'aida-users/{MP4_OBJECT}', 'image_url': f'aida-users/{FRAME_OBJECT}'} # 推送消息 - if not DEBUG: + if not settings.DEBUG: publish_status(json.dumps(self.pose_transform_data), PS_RABBITMQ_QUEUES) logger.info( f" [x] Sent to: {PS_RABBITMQ_QUEUES} data:@@@@ {json.dumps(self.pose_transform_data, indent=4)}") @@ -530,13 +534,14 @@ class ComfyUIServerFLF2V: return None # --- 辅助函数:提交任务到队列 --- - def queue_prompt(self, prompt, client_id): + @staticmethod + def queue_prompt(prompt, client_id): """向 ComfyUI 提交工作流提示。""" p = {"prompt": prompt, "client_id": client_id, "prompt_id": client_id} data = json.dumps(p).encode('utf-8') # 提交任务到 /prompt 端点 - response = requests.post(f"http://{COMFYUI_SERVER_ADDRESS}/prompt", data=data) + response = requests.post(f"http://{settings.COMFYUI_SERVER_ADDRESS}/prompt", data=data) # print(f"-------------{response.text}") # print(f"------------{client_id}") @@ -547,9 +552,10 @@ class ComfyUIServerFLF2V: logger.warning(response.text) return None - def poll_history(self, prompt_id, interval_seconds=5): + @staticmethod + def poll_history(prompt_id, interval_seconds=5): """步骤 2: 轮询 /history/{prompt_id} 检查任务是否完成""" - url = f"http://{COMFYUI_SERVER_ADDRESS}/history/{prompt_id}" + url = f"http://{settings.COMFYUI_SERVER_ADDRESS}/history/{prompt_id}" logger.info(f"⏳ 开始轮询状态 (间隔 {interval_seconds} 秒)...") @@ -574,7 +580,8 @@ class ComfyUIServerFLF2V: logger.info(f"⚠️ 轮询时发生错误: {e}") pass - def get_comfyui_video_bytes(self, filename: str, subfolder: str, file_type: str = "output"): + @staticmethod + def get_comfyui_video_bytes(filename: str, subfolder: str, file_type: str = "output"): """ 从 ComfyUI 的 /view 端点获取视频文件的二进制数据。 @@ -586,7 +593,7 @@ class ComfyUIServerFLF2V: 返回: - 视频文件的二进制内容 (bytes) 或 None。 """ - url = f"http://{COMFYUI_SERVER_ADDRESS}/view" + url = f"http://{settings.COMFYUI_SERVER_ADDRESS}/view" params = { "filename": filename, "subfolder": subfolder, diff --git a/app/service/comfyui_I2V/i2v_server.py b/app/service/comfyui_I2V/i2v_server.py index bcb34c4..0a48eda 100644 --- a/app/service/comfyui_I2V/i2v_server.py +++ b/app/service/comfyui_I2V/i2v_server.py @@ -12,8 +12,8 @@ from PIL import Image from minio import Minio, S3Error from moviepy.video.io.VideoFileClip import VideoFileClip -from app.core.config import MINIO_URL, MINIO_ACCESS, MINIO_SECRET, MINIO_SECURE, COMFYUI_SERVER_ADDRESS, PS_RABBITMQ_QUEUES, DEBUG -from app.schemas.comfyui_i2v import ComfyuiPose2VModel, ComfyuiI2VModel +from app.core.config import PS_RABBITMQ_QUEUES, settings +from app.schemas.comfyui_i2v import ComfyuiI2VModel from app.service.generate_image.utils.mq import publish_status logger = logging.getLogger() @@ -293,13 +293,14 @@ workflow_json = { class ComfyUIServerI2V: def __init__(self, request_data): + self.pose_transform_data = None self.image_url = request_data.image_url self.prompt = request_data.prompt self.tasks_id = request_data.tasks_id self.user_id = self.tasks_id[self.tasks_id.rfind('-') + 1:] self.server_status_data = {'tasks_id': self.tasks_id, 'status': 'PENDING', 'message': "pending", 'gif_url': '', 'video_url': '', 'image_url': ''} - self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) + self.minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) def get_result(self): workflow_json['93']['inputs']['text'] = self.prompt @@ -319,7 +320,7 @@ class ComfyUIServerI2V: # 1. 提交任务 prompt_response = self.queue_prompt(workflow_json, self.tasks_id) if not prompt_response: - return + return None prompt_id = prompt_response.get("prompt_id") logger.info(f" 任务已提交,Prompt ID: {prompt_id}") outputs = self.poll_history(prompt_id) @@ -339,6 +340,7 @@ class ComfyUIServerI2V: } logger.info(file_list) return self.process_and_upload_comfyui_video(filename=file_list['filename'], subfolder=file_list['subfolder'], prompt_id=prompt_response['prompt_id']), prompt_id + return None def download_from_minio_in_memory(self, image_url): bucket = image_url.split('/')[0] @@ -369,8 +371,9 @@ class ComfyUIServerI2V: logger.error(f"❌ MinIO 下载过程中发生未知错误: {e}") return None, None - def upload_in_memory_file_to_comfyui(self, in_memory_file, filename): - upload_url = f"http://{COMFYUI_SERVER_ADDRESS}/upload/image" + @staticmethod + def upload_in_memory_file_to_comfyui(in_memory_file, filename): + upload_url = f"http://{settings.COMFYUI_SERVER_ADDRESS}/upload/image" data = { "overwrite": "true", @@ -408,7 +411,7 @@ class ComfyUIServerI2V: # 1. 从 ComfyUI 获取视频二进制数据 mp4_bytes = self.get_comfyui_video_bytes(filename, subfolder) if not mp4_bytes: - return + return None # 2. 准备进行视频处理 # moviepy 不支持直接使用 bytes,需要将 bytes 写入一个 BytesIO 或临时文件 @@ -496,7 +499,7 @@ class ComfyUIServerI2V: self.pose_transform_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'gif_url': f'aida-users/{GIF_OBJECT}', 'video_url': f'aida-users/{MP4_OBJECT}', 'image_url': f'aida-users/{FRAME_OBJECT}'} # 推送消息 - if not DEBUG: + if not settings.DEBUG: publish_status(json.dumps(self.pose_transform_data), PS_RABBITMQ_QUEUES) logger.info( f" [x] Sent to: {PS_RABBITMQ_QUEUES} data:@@@@ {json.dumps(self.pose_transform_data, indent=4)}") @@ -508,13 +511,14 @@ class ComfyUIServerI2V: return None # --- 辅助函数:提交任务到队列 --- - def queue_prompt(self, prompt, client_id): + @staticmethod + def queue_prompt(prompt, client_id): """向 ComfyUI 提交工作流提示。""" p = {"prompt": prompt, "client_id": client_id, "prompt_id": client_id} data = json.dumps(p).encode('utf-8') # 提交任务到 /prompt 端点 - response = requests.post(f"http://{COMFYUI_SERVER_ADDRESS}/prompt", data=data) + response = requests.post(f"http://{settings.COMFYUI_SERVER_ADDRESS}/prompt", data=data) # print(f"-------------{response.text}") # print(f"------------{client_id}") @@ -525,9 +529,10 @@ class ComfyUIServerI2V: logger.warning(response.text) return None - def poll_history(self, prompt_id, interval_seconds=5): + @staticmethod + def poll_history(prompt_id, interval_seconds=5): """步骤 2: 轮询 /history/{prompt_id} 检查任务是否完成""" - url = f"http://{COMFYUI_SERVER_ADDRESS}/history/{prompt_id}" + url = f"http://{settings.COMFYUI_SERVER_ADDRESS}/history/{prompt_id}" logger.info(f"⏳ 开始轮询状态 (间隔 {interval_seconds} 秒)...") @@ -552,7 +557,8 @@ class ComfyUIServerI2V: logger.info(f"⚠️ 轮询时发生错误: {e}") pass - def get_comfyui_video_bytes(self, filename: str, subfolder: str, file_type: str = "output"): + @staticmethod + def get_comfyui_video_bytes(filename: str, subfolder: str, file_type: str = "output"): """ 从 ComfyUI 的 /view 端点获取视频文件的二进制数据。 @@ -564,7 +570,7 @@ class ComfyUIServerI2V: 返回: - 视频文件的二进制内容 (bytes) 或 None。 """ - url = f"http://{COMFYUI_SERVER_ADDRESS}/view" + url = f"http://{settings.COMFYUI_SERVER_ADDRESS}/view" params = { "filename": filename, "subfolder": subfolder, diff --git a/app/service/comfyui_I2V/pose2v_server.py b/app/service/comfyui_I2V/pose2v_server.py index d1db7bc..17e3fc5 100644 --- a/app/service/comfyui_I2V/pose2v_server.py +++ b/app/service/comfyui_I2V/pose2v_server.py @@ -13,7 +13,7 @@ from PIL import Image from minio import Minio, S3Error from moviepy.video.io.VideoFileClip import VideoFileClip -from app.core.config import REDIS_HOST, REDIS_PORT, REDIS_DB, MINIO_URL, MINIO_ACCESS, MINIO_SECRET, MINIO_SECURE, COMFYUI_SERVER_ADDRESS, PS_RABBITMQ_QUEUES, DEBUG +from app.core.config import settings from app.schemas.comfyui_i2v import ComfyuiPose2VModel from app.service.generate_image.utils.mq import publish_status @@ -371,11 +371,11 @@ class ComfyUIServerPose2V: self.pose_num = request_data.pose_id self.tasks_id = request_data.tasks_id self.user_id = self.tasks_id[self.tasks_id.rfind('-') + 1:] - self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) + self.redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True) self.pose_transform_data = {'tasks_id': self.tasks_id, 'status': 'PENDING', 'message': "pending", 'gif_url': '', 'video_url': '', 'image_url': ''} self.redis_client.set(self.tasks_id, json.dumps(self.pose_transform_data)) self.redis_client.expire(self.tasks_id, 600) - self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) + self.minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) def get_result(self): workflow_json['174']['inputs']['file'] = video_map[self.pose_num] @@ -389,7 +389,7 @@ class ComfyUIServerPose2V: # 1. 提交任务 prompt_response = self.queue_prompt(workflow_json, self.tasks_id) if not prompt_response: - return + return None prompt_id = prompt_response.get("prompt_id") logger.info(f" 任务已提交,Prompt ID: {prompt_id}") @@ -411,6 +411,7 @@ class ComfyUIServerPose2V: } logger.info(file_list) return self.process_and_upload_comfyui_video(filename=file_list['filename'], subfolder=file_list['subfolder'], prompt_id=prompt_response['prompt_id']), prompt_id + return None def read_tasks_status(self): status_data = self.redis_client.get(self.tasks_id) @@ -492,8 +493,9 @@ class ComfyUIServerPose2V: except Exception as e: logger.error(f"❌ 发生未知错误: {e}") - def upload_in_memory_file_to_comfyui(self, in_memory_file, filename): - upload_url = f"http://{COMFYUI_SERVER_ADDRESS}/upload/image" + @staticmethod + def upload_in_memory_file_to_comfyui(in_memory_file, filename): + upload_url = f"http://{settings.COMFYUI_SERVER_ADDRESS}/upload/image" data = { "overwrite": "true", @@ -531,7 +533,7 @@ class ComfyUIServerPose2V: # 1. 从 ComfyUI 获取视频二进制数据 mp4_bytes = self.get_comfyui_video_bytes(filename, subfolder) if not mp4_bytes: - return + return None # 2. 准备进行视频处理 # moviepy 不支持直接使用 bytes,需要将 bytes 写入一个 BytesIO 或临时文件 @@ -619,10 +621,10 @@ class ComfyUIServerPose2V: self.pose_transform_data = {'tasks_id': self.tasks_id, 'status': 'SUCCESS', 'message': "success", 'gif_url': f'aida-users/{GIF_OBJECT}', 'video_url': f'aida-users/{MP4_OBJECT}', 'image_url': f'aida-users/{FRAME_OBJECT}'} # 推送消息 - if not DEBUG: - publish_status(json.dumps(self.pose_transform_data), PS_RABBITMQ_QUEUES) + if not settings.DEBUG: + publish_status(json.dumps(self.pose_transform_data), settings.COMFYUI_SERVER_ADDRESS) logger.info( - f" [x] Sent to: {PS_RABBITMQ_QUEUES} data:@@@@ {json.dumps(self.pose_transform_data, indent=4)}") + f" [x] Sent to: {settings.COMFYUI_SERVER_ADDRESS} data:@@@@ {json.dumps(self.pose_transform_data, indent=4)}") return "\n🎉 所有任务完成!" @@ -631,13 +633,15 @@ class ComfyUIServerPose2V: return None # --- 辅助函数:提交任务到队列 --- - def queue_prompt(self, prompt, client_id): + @staticmethod + def queue_prompt(prompt, client_id): """向 ComfyUI 提交工作流提示。""" p = {"prompt": prompt, "client_id": client_id, "prompt_id": client_id} data = json.dumps(p).encode('utf-8') # 提交任务到 /prompt 端点 - response = requests.post(f"http://{COMFYUI_SERVER_ADDRESS}/prompt", data=data) + # noinspection HttpUrlsUsage + response = requests.post(f"http://{settings.COMFYUI_SERVER_ADDRESS}/prompt", data=data) # print(f"-------------{response.text}") # print(f"------------{client_id}") @@ -648,9 +652,10 @@ class ComfyUIServerPose2V: logger.warning(response.text) return None - def poll_history(self, prompt_id, interval_seconds=5): + @staticmethod + def poll_history(prompt_id, interval_seconds=5): """步骤 2: 轮询 /history/{prompt_id} 检查任务是否完成""" - url = f"http://{COMFYUI_SERVER_ADDRESS}/history/{prompt_id}" + url = f"http://{settings.COMFYUI_SERVER_ADDRESS}/history/{prompt_id}" logger.info(f"⏳ 开始轮询状态 (间隔 {interval_seconds} 秒)...") @@ -675,7 +680,8 @@ class ComfyUIServerPose2V: logger.info(f"⚠️ 轮询时发生错误: {e}") pass - def get_comfyui_video_bytes(self, filename: str, subfolder: str, file_type: str = "output"): + @staticmethod + def get_comfyui_video_bytes(filename: str, subfolder: str, file_type: str = "output"): """ 从 ComfyUI 的 /view 端点获取视频文件的二进制数据。 @@ -687,7 +693,7 @@ class ComfyUIServerPose2V: 返回: - 视频文件的二进制内容 (bytes) 或 None。 """ - url = f"http://{COMFYUI_SERVER_ADDRESS}/view" + url = f"http://{settings.COMFYUI_SERVER_ADDRESS}/view" params = { "filename": filename, "subfolder": subfolder, diff --git a/app/service/design/core/layer.py b/app/service/design/core/layer.py deleted file mode 100644 index 0628851..0000000 --- a/app/service/design/core/layer.py +++ /dev/null @@ -1,116 +0,0 @@ -import logging - -import numpy as np -import cv2 -from matplotlib import pyplot as plt - -from PIL import Image - - -def show(img, win_name="temp"): - cv2.imshow(win_name, img) - cv2.waitKey(0) - - -def crop(img): - mid_point_h, mid_point_w = int(img.shape[0] / 2 + 30), int(img.shape[1] / 2) - img_roi = img[mid_point_h - 520: mid_point_h + 520, mid_point_w - 340: mid_point_w + 340] - return img_roi - - -class Layer(object): - def __init__(self): - self._layer = [] - - @property - def layer(self): - return self._layer - - def insert(self, layer_instance): - if layer_instance['name'] == 'body': - self._body = layer_instance - self._layer.append(layer_instance) - - def sort(self, priority): - self._layer.sort(key=lambda x: priority[x['name']]) - - # def merge(self, cfg): - # """ - # opencv shape order (height, width, channel) - # image coordinate system: - # |------------->x (width) - # | - # | - # | - # y (height) - # Returns: - # - # - # """ - # base_image = Image.new('RGBA', self._layer[1]['image'].size, (0, 0, 0, 0)) - # for layer in self._layer: - # y, x = layer['position'] - # base_image.paste(layer['image'], (x, y), layer['image']) - # # base_image.show() - # - # for x in self._layer: - # if np.all(x['mask'] == 0): - # continue - # # obtain region of interest about roi(roi) and item-image(roi_image, roi_mask) - # roi, roi_mask, roi_image, signal = self.get_roi(dst=dst, image=x) - # temp_bg = np.expand_dims(cv2.bitwise_not(roi_mask), axis=2).repeat(3, axis=2) - # tmp1 = (roi * (temp_bg / 255)).astype(np.uint8) - # temp_fg = np.expand_dims(roi_mask, axis=2).repeat(3, axis=2) - # tmp2 = (roi_image * (temp_fg / 255)).astype(np.uint8) - # - # roi[:] = cv2.add(tmp1, tmp2) - # # show(cv2.resize(dst, (int(dst.shape[1] * 0.5), int(dst.shape[0] * 0.5)), interpolation=cv2.INTER_AREA), - # # win_name=x.get('name')) - # # crop image and get the central part - # if cfg.get('basic')['self_template'] == False: - # dst_roi = crop(dst) - # else: - # dst_roi = dst - # return dst_roi, signal - # - # @staticmethod - # def get_roi(dst, image): - # signal = False - # dst_y, dst_x = dst.shape[:2] - # roi_height, roi_width = image['mask'].shape - # roi_y0, roi_x0 = image['position'] - # - # if roi_y0 < 0: - # roi_yin = 0 - # mask_yin = -roi_y0 - # signal = True - # else: - # roi_yin = roi_y0 - # mask_yin = 0 - # if roi_y0 + roi_height > dst_y: - # roi_yout = dst_y - # mask_yout = dst_y - roi_y0 - # signal = True - # else: - # roi_yout = roi_height + roi_y0 - # mask_yout = roi_height - # # x part - # if roi_x0 < 0: - # roi_xin = 0 - # mask_xin = -roi_x0 - # signal = True - # else: - # roi_xin = roi_x0 - # mask_xin = 0 - # if roi_x0 + roi_width > dst_x: - # roi_xout = dst_x - # mask_xout = dst_x - roi_x0 - # signal = True - # else: - # roi_xout = roi_width + roi_x0 - # mask_xout = roi_width - # - # roi = dst[roi_yin: roi_yout, roi_xin: roi_xout] - # roi_mask = image['mask'][mask_yin: mask_yout, mask_xin: mask_xout] - # roi_image = image['image'][mask_yin: mask_yout, mask_xin: mask_xout] - # return roi, roi_mask, roi_image, signal diff --git a/app/service/design/core/priority.py b/app/service/design/core/priority.py deleted file mode 100644 index dc111ea..0000000 --- a/app/service/design/core/priority.py +++ /dev/null @@ -1,45 +0,0 @@ -class Priority(object): - """Item layer priority levels. - """ - - def __init__(self, item_list): - self._priority = dict( - earring_front=99, - bag_front=98, - hairstyle_front=97, - outwear_front=20, - bottoms_front=19, - dress_front=18, - blouse_front=17, - skirt_front=16, - trousers_front=15, - tops_front=14, - shoes_right=1, - shoes_left=1, - body=0, - tops_back=-14, - trousers_back=-15, - skirt_back=-16, - blouse_back=-17, - dress_back=-18, - bottoms_back=-19, - outwear_back=-20, - hairstyle_back=-97, - bag_back=-98, - earring_back=-99, - ) - self.clothing_start_num = 10 - if not isinstance(item_list, list): - raise ValueError('item_list must be a list!') - for cate in item_list: - cate = cate.lower() - if cate not in ('outwear', 'dress', 'blouse', 'skirt', 'trousers', 'tops', 'bottoms'): - raise ValueError(f'Item type error. Cannot recognize {cate}') - for i, cate in enumerate(item_list): - cate = cate.lower() - self._priority[f'{cate}_front'] = self.clothing_start_num - i - self._priority[f'{cate}_back'] = -(self.clothing_start_num - i) - - @property - def priority(self): - return self._priority diff --git a/app/service/design/items/__init__.py b/app/service/design/items/__init__.py deleted file mode 100644 index e3e6bd5..0000000 --- a/app/service/design/items/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -from .builder import ITEMS, build_item -from .clothing import Clothing # 4.0 sec -from .body import Body -from .top import Top, Blouse, Outwear, Dress -from .bottom import Bottom, Trousers, Skirt -from .shoes import Shoes -from .bag import Bag -from .others import Hairstyle, Earring - -__all__ = [ - 'ITEMS', 'build_item', - 'Clothing', 'Body', - 'Top', 'Blouse', 'Outwear', 'Dress', - 'Bottom', 'Trousers', 'Skirt', - 'Shoes', 'Bag', 'Hairstyle', 'Earring' -] diff --git a/app/service/design/items/bag.py b/app/service/design/items/bag.py deleted file mode 100644 index 12b4c68..0000000 --- a/app/service/design/items/bag.py +++ /dev/null @@ -1,45 +0,0 @@ -import random - -from .builder import ITEMS -from .clothing import Clothing - - -@ITEMS.register_module() -class Bag(Clothing): - def __init__(self, **kwargs): - pipeline = [ - dict(type='LoadImageFromFile', path=kwargs['path'], color=kwargs['color']), - dict(type='KeypointDetection'), - dict(type='ContourDetection'), - dict(type='Painting'), - dict(type='Scaling'), - dict(type='Split'), - # dict(type='ImageShow', key=['image', 'mask', 'pattern_image']), - ] - kwargs.update(pipeline=pipeline) - super(Bag, self).__init__(**kwargs) - - @staticmethod - def calculate_start_point(keypoint_type, scale, clothes_point, body_point): - """ - align left - Args: - keypoint_type: string, "hand_point" - scale: float - clothes_point: dict{'left': [x1, y1, z1], 'right': [x2, y2, z2]} - body_point: dict, containing keypoint data of body figure - - Returns: - start_point: tuple (y', x') - x' = y_body - y1 * scale - y' = x_body - x1 * scale - """ - location = random.choice(seq=['left', 'right']) - if location == 'left': - side_indicator = f'{keypoint_type}_left' - else: - side_indicator = f'{keypoint_type}_right' - # clothes_point = {k: tuple(map(lambda x: int(scale * x), v[0: 2])) for k, v in clothes_point.items()} - start_point = (body_point[side_indicator][1] - int(int(clothes_point[keypoint_type].split("_")[1]) * scale), - body_point[side_indicator][0] - int(int(clothes_point[keypoint_type].split("_")[0]) * scale)) - return start_point diff --git a/app/service/design/items/body.py b/app/service/design/items/body.py deleted file mode 100644 index c336ae9..0000000 --- a/app/service/design/items/body.py +++ /dev/null @@ -1,36 +0,0 @@ -import cv2 - -from .builder import ITEMS -from .pipelines import Compose - - -@ITEMS.register_module() -class Body(object): - def __init__(self, **kwargs): - pipeline = [ - dict(type='LoadBodyImageFromFile', body_path=kwargs['body_path']), - # dict(type='ImageShow', key=['body_image', "body_mask"]) - ] - self.pipeline = Compose(pipeline) - self.result = dict() - - def process(self): - self.pipeline(self.result) - pass - - def organize(self, layer): - body_layer = dict(priority=0, - name=type(self).__name__.lower(), - image=self.result['body_image'], - image_url=self.result['image_url'], - mask_image=None, - mask_url=None, - sacle=1, - # mask=self.result['body_mask'], - position=(0, 0)) - layer.insert(body_layer) - - @staticmethod - def show(img): - cv2.imshow('', img) - cv2.waitKey(0) diff --git a/app/service/design/items/bottom.py b/app/service/design/items/bottom.py deleted file mode 100644 index e01ec02..0000000 --- a/app/service/design/items/bottom.py +++ /dev/null @@ -1,39 +0,0 @@ -from .builder import ITEMS -from .clothing import Clothing - - -@ITEMS.register_module() -class Bottom(Clothing): - def __init__(self, pipeline, **kwargs): - if pipeline is None: - pipeline = [ - dict(type='LoadImageFromFile', path=kwargs['path'], color=kwargs['color'], print_dict=kwargs['print']), - dict(type='KeypointDetection'), - dict(type='ContourDetection'), - # dict(type='Segmentation'), - dict(type='Painting', painting_flag=True), - dict(type='PrintPainting', print_flag=True), - dict(type='Scaling'), - dict(type='Split'), - # dict(type='ImageShow', key=['image', 'mask', 'pattern_image', 'print_image']), - ] - kwargs.update(pipeline=pipeline) - super(Bottom, self).__init__(**kwargs) - - -@ITEMS.register_module() -class Trousers(Bottom): - def __init__(self, pipeline=None, **kwargs): - super(Trousers, self).__init__(pipeline, **kwargs) - - -@ITEMS.register_module() -class Skirt(Bottom): - def __init__(self, pipeline=None, **kwargs): - super(Skirt, self).__init__(pipeline, **kwargs) - - -@ITEMS.register_module() -class Bottoms(Bottom): - def __init__(self, pipeline=None, **kwargs): - super(Bottoms, self).__init__(pipeline, **kwargs) diff --git a/app/service/design/items/builder.py b/app/service/design/items/builder.py deleted file mode 100644 index 26e04f1..0000000 --- a/app/service/design/items/builder.py +++ /dev/null @@ -1,9 +0,0 @@ -from mmcv.utils import Registry, build_from_cfg - -ITEMS = Registry('item') -PIPELINES = Registry('pipeline') - - -def build_item(cfg, default_args=None): - item = build_from_cfg(cfg, ITEMS, default_args) - return item diff --git a/app/service/design/items/clothing.py b/app/service/design/items/clothing.py deleted file mode 100644 index 953cecf..0000000 --- a/app/service/design/items/clothing.py +++ /dev/null @@ -1,100 +0,0 @@ -import cv2 - -from app.core.config import PRIORITY_DICT -from .builder import ITEMS -from .pipelines import Compose - - -@ITEMS.register_module() -class Clothing(object): - def __init__(self, pipeline, **kwargs): - self.pipeline = Compose(pipeline) - self.result = dict(name=type(self).__name__.lower(), **kwargs) - - def process(self): - self.pipeline(self.result) - - def apply_scale(self, img): - scale = self.result['scale'] - height, width = img.shape[0: 2] - if len(img.shape) > 2: - height, width = img.shape[0: 2] - scaled_img = cv2.resize(img, (int(width * scale), int(height * scale)), interpolation=cv2.INTER_AREA) - return scaled_img - - def organize(self, layer): - start_point = self.calculate_start_point(self.result['keypoint'], self.result['scale'], self.result['clothes_keypoint'], self.result['body_point_test'], self.result["offset"], self.result["resize_scale"]) - - front_layer = dict(priority=self.result.get("priority", None) if self.result.get("layer_order", False) else PRIORITY_DICT.get(f'{type(self).__name__.lower()}_front', None), - name=f'{type(self).__name__.lower()}_front', - image=self.result["front_image"], - # mask_image=self.result['front_mask_image'], - image_url=self.result['front_image_url'], - mask_url=self.result['mask_url'], - sacle=self.result['scale'], - clothes_keypoint=self.result['clothes_keypoint'], - position=start_point, - resize_scale=self.result["resize_scale"], - mask=cv2.resize(self.result['mask'], self.result["front_image"].size), - gradient_string=self.result['gradient_string'] if 'gradient_string' in self.result.keys() else "", - pattern_image_url=self.result['pattern_image_url'], - pattern_image=self.result['pattern_image'] - - ) - layer.insert(front_layer) - - back_layer = dict(priority=-self.result.get("priority", 0) if self.result.get("layer_order", False) else PRIORITY_DICT.get(f'{type(self).__name__.lower()}_back', None), - name=f'{type(self).__name__.lower()}_back', - image=self.result["back_image"], - # mask_image=self.result['back_mask_image'], - image_url=self.result['back_image_url'], - mask_url=self.result['mask_url'], - sacle=self.result['scale'], - clothes_keypoint=self.result['clothes_keypoint'], - position=start_point, - resize_scale=self.result["resize_scale"], - mask=cv2.resize(self.result['mask'], self.result["front_image"].size), - gradient_string=self.result['gradient_string'] if 'gradient_string' in self.result.keys() else "", - pattern_image_url=self.result['pattern_image_url'], - ) - layer.insert(back_layer) - - @staticmethod - def calculate_start_point(keypoint_type, scale, clothes_point, body_point, offset, resize_scale): - """ - Align left - Args: - keypoint_type: string, "waistband" | "shoulder" | "ear_point" - scale: float - clothes_point: dict{'left': [x1, y1, z1], 'right': [x2, y2, z2]} - body_point: dict, containing keypoint data of body figure - - Returns: - start_point: tuple (x', y') - x' = y_body - y1 * scale + offset - y' = x_body - x1 * scale + offset - - """ - - side_indicator = f'{keypoint_type}_left' - - # if keypoint_type == "ear_point": - # start_point = (body_point[side_indicator][1] - int(int(clothes_point[side_indicator].split("_")[1]) * scale), - # body_point[side_indicator][0] - int(int(clothes_point[side_indicator].split("_")[0]) * scale)) - # else: - # start_point = ( - # int(body_point[side_indicator][1] + offset[1] - int(clothes_point[side_indicator].split("_")[0]) * scale), # y - # int(body_point[side_indicator][0] + offset[0] - int(clothes_point[side_indicator].split("_")[1]) * scale) # x - # ) - - # milvus_DB_keypoint_cache: - start_point = ( - int(body_point[side_indicator][1] + offset[1] - int(clothes_point[side_indicator][0]) * scale), # y - int(body_point[side_indicator][0] + offset[0] - int(clothes_point[side_indicator][1]) * scale) # x - ) - # start_point = ( - # int(body_point[side_indicator][1] + offset[1] - int(clothes_point[side_indicator].split("_")[0]) * scale), # y - # int(body_point[side_indicator][0] + offset[0] - int(clothes_point[side_indicator].split("_")[1]) * scale) # x - # ) - - return start_point diff --git a/app/service/design/items/others.py b/app/service/design/items/others.py deleted file mode 100644 index 5cb5796..0000000 --- a/app/service/design/items/others.py +++ /dev/null @@ -1,59 +0,0 @@ -from .builder import ITEMS -from .clothing import Clothing - - -@ITEMS.register_module() -class Hairstyle(Clothing): - def __init__(self, **kwargs): - pipeline = [ - dict(type='LoadImageFromFile', path=kwargs['path']), - dict(type='KeypointDetection'), - dict(type='ContourDetection'), - dict(type='Painting'), - dict(type='Scaling'), - dict(type='Split'), - # dict(type='ImageShow', key=['image', 'mask', 'pattern_image']), - ] - kwargs.update(pipeline=pipeline) - super(Hairstyle, self).__init__(**kwargs) - - @staticmethod - def calculate_start_point(keypoint_type, scale, clothes_point, body_point): - """ - align up - Args: - keypoint_type: string, "head_point" - scale: float - clothes_point: dict{'left': [x1, y1, z1], 'right': [x2, y2, z2]} - body_point: dict, containing keypoint data of body figure - - Returns: - start_point: tuple (x', y') - x' = y_body - y1 * scale - y' = x_body - x1 * scale - """ - side_indicator = f'{keypoint_type}_up' - # clothes_point = {k: tuple(map(lambda x: int(scale * x), v[0: 2])) for k, v in clothes_point.items()} - # logging.info(clothes_point[side_indicator]) - - start_point = ( - int(body_point[side_indicator][1] - int(clothes_point[side_indicator].split("_")[1] * scale)), - int(body_point[side_indicator][0] - int(clothes_point[side_indicator].split("_")[0] * scale)) - ) - return start_point - - -@ITEMS.register_module() -class Earring(Clothing): - def __init__(self, **kwargs): - pipeline = [ - dict(type='LoadImageFromFile', path=kwargs['path']), - dict(type='KeypointDetection'), - dict(type='ContourDetection'), - dict(type='Painting'), - dict(type='Scaling'), - dict(type='Split'), - # dict(type='ImageShow', key=['image', 'mask', 'pattern_image']), - ] - kwargs.update(pipeline=pipeline) - super(Earring, self).__init__(**kwargs) diff --git a/app/service/design/items/pipelines/__init__.py b/app/service/design/items/pipelines/__init__.py deleted file mode 100644 index 9abb09c..0000000 --- a/app/service/design/items/pipelines/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -from .compose import Compose -from .loading import LoadImageFromFile, LoadBodyImageFromFile, ImageShow -from .keypoints import KeypointDetection -from .segmentation import Segmentation -from .painting import Painting, PrintPainting -from .scale import Scaling -from .contour_detection import ContourDetection -from .split import Split - -__all__ = [ - 'Compose', - 'LoadImageFromFile', 'LoadBodyImageFromFile', 'ImageShow', - 'KeypointDetection', - 'Segmentation', - 'Painting', 'PrintPainting', - 'Scaling', - 'ContourDetection', - 'split', -] diff --git a/app/service/design/items/pipelines/compose.py b/app/service/design/items/pipelines/compose.py deleted file mode 100644 index daf6977..0000000 --- a/app/service/design/items/pipelines/compose.py +++ /dev/null @@ -1,36 +0,0 @@ -import collections - -from mmcv.utils import build_from_cfg - -from ..builder import PIPELINES - - -@PIPELINES.register_module() -class Compose(object): - def __init__(self, transforms): - assert isinstance(transforms, collections.abc.Sequence) - self.transforms = [] - for transform in transforms: - if isinstance(transform, dict): - transform = build_from_cfg(transform, PIPELINES) - self.transforms.append(transform) - elif callable(transform): - self.transforms.append(transform) - else: - raise TypeError('transform must be callable or a dict') - - def __call__(self, data): - """Call function to apply transforms sequentially. - - Args: - data (dict): A result dict contains the data to transform. - - Returns: - dict: Transformed data. - """ - - for t in self.transforms: - data = t(data) - if data is None: - return None - return data diff --git a/app/service/design/items/pipelines/contour_detection.py b/app/service/design/items/pipelines/contour_detection.py deleted file mode 100644 index 487d2d6..0000000 --- a/app/service/design/items/pipelines/contour_detection.py +++ /dev/null @@ -1,59 +0,0 @@ -import cv2 -import numpy as np - -from ..builder import PIPELINES - - -@PIPELINES.register_module() -class ContourDetection(object): - def __init__(self): - # logging.info("ContourDetection run ") - pass - - # @ RunTime - def __call__(self, result): - # shoe diff - if result['name'] == 'shoes': - Contour = self.get_contours(result['image']) - Mask = np.zeros(result['image'].shape[:2], np.uint8) - for i in range(2): - Max_contour = Contour[i] - Epsilon = 0.001 * cv2.arcLength(Max_contour, True) - Approx = cv2.approxPolyDP(Max_contour, Epsilon, True) - cv2.drawContours(Mask, [Approx], -1, 255, -1) - if result['pre_mask'] is None: - result['mask'] = Mask - else: - result['mask'] = cv2.bitwise_and(Mask, result['pre_mask']) - else: - Contour = self.get_contours(result['image']) - Mask = np.zeros(result['image'].shape[:2], np.uint8) - if len(Contour): - Max_contour = Contour[0] - Epsilon = 0.001 * cv2.arcLength(Max_contour, True) - Approx = cv2.approxPolyDP(Max_contour, Epsilon, True) - cv2.drawContours(Mask, [Approx], -1, 255, -1) - else: - Mask = np.ones(result['image'].shape[:2], np.uint8) * 255 - # TODO 修复部分图片出现透明的情况 下版本上线 - # img2gray = cv2.cvtColor(result['image'], cv2.COLOR_BGR2GRAY) - # ret, Mask = cv2.threshold(img2gray, 126, 255, cv2.THRESH_BINARY) - # Mask = cv2.bitwise_not(Mask) - if result['pre_mask'] is None: - result['mask'] = Mask - else: - result['mask'] = cv2.bitwise_and(Mask, result['pre_mask']) - result['front_mask'] = result['mask'] - result['back_mask'] = result['mask'] - return result - - @staticmethod - def get_contours(image): - gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) - Edge = cv2.Canny(gray, 10, 150) - kernel = np.ones((5, 5), np.uint8) - Edge = cv2.dilate(Edge, kernel=kernel, iterations=1) - Edge = cv2.erode(Edge, kernel=kernel, iterations=1) - Contour, _ = cv2.findContours(Edge, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - Contour = sorted(Contour, key=cv2.contourArea, reverse=True) - return Contour diff --git a/app/service/design/items/pipelines/keypoints.py b/app/service/design/items/pipelines/keypoints.py deleted file mode 100644 index fded7de..0000000 --- a/app/service/design/items/pipelines/keypoints.py +++ /dev/null @@ -1,140 +0,0 @@ -import logging -import time - -import numpy as np -from pymilvus import MilvusClient - -from app.core.config import * -from app.service.utils.decorator import RunTime, ClassCallRunTime -from ..builder import PIPELINES -from ...utils.design_ensemble import get_keypoint_result - - -@PIPELINES.register_module() -class KeypointDetection(object): - """ - path here: abstract path - """ - - # def __init__(self): - # self.client = MilvusClient( - # uri="http://10.1.1.240:19530", - # token="root:Milvus", - # db_name=MILVUS_ALIAS - # ) - - # def __del__(self): - # start_time = time.time() - # self.client.close() - # print(f"client close time : {time.time() - start_time}") - - # @ClassCallRunTime - def __call__(self, result): - # logging.info("KeypointDetection run ") - if result['name'] in ['blouse', 'skirt', 'dress', 'outwear', 'trousers', 'tops', 'bottoms']: # 查询是否有数据 且类别相同 相同则直接读 不同则推理后更新 - # result['clothes_keypoint'] = self.infer_keypoint_result(result) - site = 'up' if result['name'] in ['blouse', 'outwear', 'dress', 'tops'] else 'down' - # keypoint_cache = search_keypoint_cache(result["image_id"], site) - - keypoint_cache = self.keypoint_cache(result, site) - # 取消向量查询 直接过模型推理 - # keypoint_cache = False - - if keypoint_cache is False: - keypoint_infer_result, site = self.infer_keypoint_result(result) - result['clothes_keypoint'] = self.save_keypoint_cache(result["image_id"], keypoint_infer_result, site) - else: - result['clothes_keypoint'] = keypoint_cache - return result - - @staticmethod - def infer_keypoint_result(result): - site = 'up' if result['name'] in ['blouse', 'outwear', 'dress', 'tops'] else 'down' - start_time = time.time() - keypoint_infer_result = get_keypoint_result(result["image"], site) # 推理结果 - # logging.info(f"infer keypoint time : {time.time() - start_time}") - return keypoint_infer_result, site - - @staticmethod - # @ RunTime - def save_keypoint_cache(keypoint_id, cache, site): - if site == "down": - zeros = np.zeros(20, dtype=int) - result = np.concatenate([zeros, cache.flatten()]) - else: - zeros = np.zeros(4, dtype=int) - result = np.concatenate([cache.flatten(), zeros]) - # 取消向量保存 直接拿结果 - data = [ - {"keypoint_id": keypoint_id, - "keypoint_site": site, - "keypoint_vector": result.tolist() - } - ] - try: - client = MilvusClient(uri=MILVUS_URL, token=MILVUS_TOKEN, db_name=MILVUS_ALIAS) - # start_time = time.time() - res = client.upsert(collection_name=MILVUS_TABLE_KEYPOINT, data=data) - # logging.info(f"save keypoint time : {time.time() - start_time}") - client.close() - return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, result.reshape(12, 2).astype(int).tolist())) - except Exception as e: - logging.info(f"save keypoint cache milvus error : {e}") - return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, result.reshape(12, 2).astype(int).tolist())) - - @staticmethod - def update_keypoint_cache(keypoint_id, infer_result, search_result, site): - if site == "up": - # 需要的是up 即推理出来的是up 那么查询的就是down - result = np.concatenate([infer_result.flatten(), search_result[-4:]]) - else: - # 需要的是down 即推理出来的是down 那么查询的就是up - result = np.concatenate([search_result[:20], infer_result.flatten()]) - data = [ - {"keypoint_id": keypoint_id, - "keypoint_site": "all", - "keypoint_vector": result.tolist() - } - ] - - try: - client = MilvusClient(uri=MILVUS_URL, token=MILVUS_TOKEN, db_name=MILVUS_ALIAS) - # connections.connect(alias=MILVUS_ALIAS, host=MILVUS_DB_HOST, port=MILVUS_PORT) - start_time = time.time() - # collection = Collection(MILVUS_TABLE_KEYPOINT) # Get an existing collection. - # mr = collection.upsert(data) - client.upsert( - collection_name=MILVUS_TABLE_KEYPOINT, - data=data - ) - # logging.info(f"save keypoint time : {time.time() - start_time}") - return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, result.reshape(12, 2).astype(int).tolist())) - except Exception as e: - logging.info(f"save keypoint cache milvus error : {e}") - return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, result.reshape(12, 2).astype(int).tolist())) - - # @ RunTime - def keypoint_cache(self, result, site): - try: - client = MilvusClient(uri=MILVUS_URL, token=MILVUS_TOKEN, db_name=MILVUS_ALIAS) - keypoint_id = result['image_id'] - res = client.query( - collection_name=MILVUS_TABLE_KEYPOINT, - # ids=[keypoint_id], - filter=f"keypoint_id == {keypoint_id}", - output_fields=['keypoint_vector', 'keypoint_site'] - ) - if len(res) == 0: - # 没有结果 直接推理拿结果 并保存 - keypoint_infer_result, site = self.infer_keypoint_result(result) - return self.save_keypoint_cache(result['image_id'], keypoint_infer_result, site) - elif res[0]["keypoint_site"] == "all" or res[0]["keypoint_site"] == site: - # 需要的类型和查询的类型一致,或者查询的类型为all 则直接返回查询的结果 - return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, np.array(res[0]['keypoint_vector']).astype(int).reshape(12, 2).tolist())) - elif res[0]["keypoint_site"] != site: - # 需要的类型和查询到的不一致,则更新类型为all - keypoint_infer_result, site = self.infer_keypoint_result(result) - return self.update_keypoint_cache(result["image_id"], keypoint_infer_result, res[0]['keypoint_vector'], site) - except Exception as e: - logging.info(f"search keypoint cache milvus error {e}") - return False diff --git a/app/service/design/items/pipelines/loading.py b/app/service/design/items/pipelines/loading.py deleted file mode 100644 index 04dc4d8..0000000 --- a/app/service/design/items/pipelines/loading.py +++ /dev/null @@ -1,134 +0,0 @@ -import cv2 - -from app.service.utils.oss_client import oss_get_image -from ..builder import PIPELINES - - -@PIPELINES.register_module() -class LoadImageFromFile(object): - def __init__(self, path, color=None, print_dict=None): - self.path = path - self.color = color - self.print_dict = print_dict - # self.minio_client = Minio(f"{MINIO_URL}", access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) - - # @ClassCallRunTime - def __call__(self, result): - result['image'], result['pre_mask'] = self.read_image(self.path) - result['gray'] = cv2.cvtColor(result['image'], cv2.COLOR_BGR2GRAY) - result['keypoint'] = self.get_keypoint(result['name']) - result['path'] = self.path - result['img_shape'] = result['image'].shape - result['ori_shape'] = result['image'].shape - result['color'] = self.color if self.color is not None else None - result['print_dict'] = self.print_dict - return result - - @staticmethod - def get_keypoint(name): - if name == 'blouse' or name == 'outwear' or name == 'dress' or name == 'tops': - keypoint = 'shoulder' - elif name == 'trousers' or name == 'skirt' or name == 'bottoms': - keypoint = 'waistband' - elif name == 'bag': - keypoint = 'hand_point' - elif name == 'shoes': - keypoint = 'toe' - elif name == 'hairstyle': - keypoint = 'head_point' - elif name == 'earring': - keypoint = 'ear_point' - else: - raise KeyError(f"{name} does not belong to item category list: blouse, outwear, dress, trousers, skirt, " - f"bag, shoes, hairstyle, earring.") - return keypoint - - @staticmethod - def read_image(image_path): - image_mask = None - image = oss_get_image(bucket=image_path.split("/", 1)[0], object_name=image_path.split("/", 1)[1], data_type="cv2") - if len(image.shape) == 2: - image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) - if image.shape[2] == 4: # 如果是四通道 mask - image_mask = image[:, :, 3] - image = image[:, :, :3] - - if image.shape[:2] <= (50, 50): - # 计算新尺寸 - new_size = (image.shape[1] * 2, image.shape[0] * 2) - # 调整大小 - image = cv2.resize(image, new_size, interpolation=cv2.INTER_LINEAR) - return image, image_mask - - -@PIPELINES.register_module() -class LoadBodyImageFromFile(object): - def __init__(self, body_path): - self.body_path = body_path - # self.minioClient = Minio(f"{MINIO_URL}", access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) - - # response = self.minioClient.get_object("aida-mannequins", "model_1693218345.2714431.png") - - # @ RunTime - def __call__(self, result): - result["image_url"] = result['body_path'] = self.body_path - result["name"] = "mannequin" - # if not result['image_url'].lower().endswith(".png"): - # bucket = self.body_path.split("/", 1)[0] - # object_name = self.body_path.split("/", 1)[1] - # new_object_name = f'{object_name[:object_name.rfind(".")]}.png' - # image = self.minioClient.get_object(bucket, object_name) - # image = Image.open(io.BytesIO(image.data)) - # image = image.convert("RGBA") - # data = image.getdata() - # # - # new_data = [] - # for item in data: - # if item[0] >= 230 and item[1] >= 230 and item[2] >= 230: - # new_data.append((255, 255, 255, 0)) - # else: - # new_data.append(item) - # image.putdata(new_data) - # image_data = io.BytesIO() - # image.save(image_data, format='PNG') - # image_data.seek(0) - # image_bytes = image_data.read() - # image_path = f"{bucket}/{self.minioClient.put_object(bucket, new_object_name, io.BytesIO(image_bytes), len(image_bytes), content_type='image/png').object_name}" - # self.body_path = image_path - # result["image_url"] = result['body_path'] = self.body_path - # response = self.minioClient.get_object(self.body_path.split("/", 1)[0], self.body_path.split("/", 1)[1]) - # put_image_time = time.time() - # result['body_image'] = Image.open(io.BytesIO(response.read())) - result['body_image'] = oss_get_image(bucket=self.body_path.split("/", 1)[0], object_name=self.body_path.split("/", 1)[1], data_type="PIL") - # logging.info(f"Image.open time is : {time.time() - put_image_time}") - return result - - -@PIPELINES.register_module() -class ImageShow(object): - def __init__(self, key): - self.key = key - - # @ RunTime - def __call__(self, result): - import matplotlib.pyplot as plt - if isinstance(self.key, list): - for key in self.key: - plt.imshow(result[key]) - plt.title(key) - plt.show() - elif isinstance(self.key, str): - img = self._resize_img(result[self.key]) - cv2.imshow(self.key, img) - cv2.waitKey(0) - else: - raise TypeError(f'key should be string but got type {type(self.key)}.') - return result - - @staticmethod - def _resize_img(img): - shape = img.shape - if shape[0] > 400 or shape[1] > 400: - ratio = min(400 / shape[0], 400 / shape[1]) - img = cv2.resize(img, (int(ratio * shape[1]), int(ratio * shape[0]))) - return img diff --git a/app/service/design/items/pipelines/painting.py b/app/service/design/items/pipelines/painting.py deleted file mode 100644 index 993697c..0000000 --- a/app/service/design/items/pipelines/painting.py +++ /dev/null @@ -1,605 +0,0 @@ -import logging -import random - -import cv2 -import numpy as np -from PIL import Image - -from app.service.utils.oss_client import oss_get_image -from ..builder import PIPELINES - -logger = logging.getLogger() - - -@PIPELINES.register_module() -class Painting(object): - def __init__(self, painting_flag=True): - self.painting_flag = painting_flag - - # @ClassCallRunTime - def __call__(self, result): - if result['name'] not in ['hairstyle', 'earring'] and self.painting_flag and result['color'] != 'none': - dim_image_h, dim_image_w = result['image'].shape[0:2] - if "gradient" in result.keys() and result['gradient'] != "": - bucket_name = result['gradient'].split('/')[0] - object_name = result['gradient'][result['gradient'].find('/') + 1:] - pattern = self.get_gradient(bucket_name=bucket_name, object_name=object_name) - resize_pattern = cv2.resize(pattern, (dim_image_w, dim_image_h), interpolation=cv2.INTER_AREA) - else: - pattern = self.get_pattern(result['color']) - resize_pattern = cv2.resize(pattern, (dim_image_w, dim_image_h), interpolation=cv2.INTER_AREA) - closed_mo = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2) - gray_mo = np.expand_dims(result['gray'], axis=2).repeat(3, axis=2) - get_image_fir = resize_pattern * (closed_mo / 255) * (gray_mo / 255) - result['pattern_image'] = get_image_fir.astype(np.uint8) - result['final_image'] = result['pattern_image'] - canvas = np.full_like(result['final_image'], 255) - temp_bg = np.expand_dims(cv2.bitwise_not(result['mask']), axis=2).repeat(3, axis=2) - tmp1 = (canvas * (temp_bg / 255)).astype(np.uint8) - temp_fg = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2) - tmp2 = (result['final_image'] * (temp_fg / 255)).astype(np.uint8) - result['single_image'] = cv2.add(tmp1, tmp2) - result['alpha'] = 100 / 255.0 - else: - closed_mo = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2) - get_image_fir = result['image'] * (closed_mo / 255) - result['pattern_image'] = get_image_fir.astype(np.uint8) - result['final_image'] = result['pattern_image'] - return result - - @staticmethod - def get_gradient(bucket_name, object_name): - # image_data = minio_client.get_object(bucket_name, object_name) - # image_data = s3.get_object(Bucket=bucket_name, Key=object_name)['Body'] - - # 从数据流中读取图像 - # image_bytes = image_data.read() - - # 将图像数据转换为numpy数组 - # image_array = np.asarray(bytearray(image_bytes), dtype=np.uint8) - - # 使用OpenCV解码图像数组 - # image = cv2.imdecode(image_array, cv2.IMREAD_COLOR) - image = oss_get_image(bucket=bucket_name, object_name=object_name, data_type="cv2") - if image.shape[2] == 4: - image = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR) - return image - - @staticmethod - def crop_image(image, image_size_h, image_size_w): - x_offset = np.random.randint(low=0, high=int(image_size_h / 5) - 6) - y_offset = np.random.randint(low=0, high=int(image_size_w / 5) - 6) - image = image[x_offset: x_offset + image_size_h, y_offset: y_offset + image_size_w, :] - return image - - @staticmethod - def get_pattern(single_color): - if single_color is None: - raise False - R, G, B = single_color.split(' ') - pattern = np.zeros([1, 1, 3], np.uint8) - pattern[0, 0, 0] = int(B) - pattern[0, 0, 1] = int(G) - pattern[0, 0, 2] = int(R) - return pattern - - -@PIPELINES.register_module() -class PrintPainting(object): - def __init__(self, print_flag=True): - self.print_flag = print_flag - - # @ClassCallRunTime - def __call__(self, result): - single_print = result['print']['single'] - overall_print = result['print']['overall'] - element_print = result['print']['element'] - result['single_image'] = None - result['print_image'] = None - if overall_print['print_path_list']: - painting_dict = {'dim_image_h': result['pattern_image'].shape[0], 'dim_image_w': result['pattern_image'].shape[1]} - result['print_image'] = result['pattern_image'] - if "print_angle_list" in overall_print.keys() and overall_print['print_angle_list'][0] != 0: - painting_dict = self.painting_collection(painting_dict, overall_print, print_trigger=True) - painting_dict['tile_print'] = self.rotate_crop_image(img=painting_dict['tile_print'], angle=-overall_print['print_angle_list'][0], crop=True) - painting_dict['mask_inv_print'] = self.rotate_crop_image(img=painting_dict['mask_inv_print'], angle=-overall_print['print_angle_list'][0], crop=True) - - # resize 到sketch大小 - painting_dict['tile_print'] = self.resize_and_crop(img=painting_dict['tile_print'], target_width=painting_dict['dim_image_w'], target_height=painting_dict['dim_image_h']) - painting_dict['mask_inv_print'] = self.resize_and_crop(img=painting_dict['mask_inv_print'], target_width=painting_dict['dim_image_w'], target_height=painting_dict['dim_image_h']) - else: - painting_dict = self.painting_collection(painting_dict, overall_print, print_trigger=True, is_single=False) - result['print_image'] = self.printpaint(result, painting_dict, print_=True) - result['single_image'] = result['final_image'] = result['pattern_image'] = result['print_image'] - - if single_print['print_path_list']: - print_background = np.zeros((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), dtype=np.uint8) - mask_background = np.zeros((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), dtype=np.uint8) - for i in range(len(single_print['print_path_list'])): - image, image_mode = self.read_image(single_print['print_path_list'][i]) - if image_mode == "RGBA": - new_size = (int(image.width * single_print['print_scale_list'][i]), int(image.height * single_print['print_scale_list'][i])) - - mask = image.split()[3] - resized_source = image.resize(new_size) - resized_source_mask = mask.resize(new_size) - - rotated_resized_source = resized_source.rotate(-single_print['print_angle_list'][i]) - rotated_resized_source_mask = resized_source_mask.rotate(-single_print['print_angle_list'][i]) - - source_image_pil = Image.fromarray(cv2.cvtColor(print_background, cv2.COLOR_BGR2RGB)) - source_image_pil_mask = Image.fromarray(cv2.cvtColor(mask_background, cv2.COLOR_BGR2RGB)) - - source_image_pil.paste(rotated_resized_source, (int(single_print['location'][i][0]), int(single_print['location'][i][1])), rotated_resized_source) - source_image_pil_mask.paste(rotated_resized_source_mask, (int(single_print['location'][i][0]), int(single_print['location'][i][1])), rotated_resized_source_mask) - - print_background = cv2.cvtColor(np.array(source_image_pil), cv2.COLOR_RGBA2BGR) - mask_background = cv2.cvtColor(np.array(source_image_pil_mask), cv2.COLOR_RGBA2BGR) - ret, mask_background = cv2.threshold(mask_background, 124, 255, cv2.THRESH_BINARY) - else: - mask = self.get_mask_inv(image) - mask = np.expand_dims(mask, axis=2) - mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) - mask = cv2.bitwise_not(mask) - # 旋转后的坐标需要重新算 - rotate_mask, _ = self.img_rotate(mask, single_print['print_angle_list'][i], single_print['print_scale_list'][i]) - rotate_image, rotated_new_size = self.img_rotate(image, single_print['print_angle_list'][i], single_print['print_scale_list'][i]) - # x, y = int(result['print']['location'][i][0] - rotated_new_size[0] - (rotate_mask.shape[0] - image.shape[0]) / 2), int(result['print']['location'][i][1] - rotated_new_size[1] - (rotate_mask.shape[1] - image.shape[1]) / 2) - x, y = int(single_print['location'][i][0] - rotated_new_size[0]), int(single_print['location'][i][1] - rotated_new_size[1]) - - image_x = print_background.shape[1] - image_y = print_background.shape[0] - print_x = rotate_image.shape[1] - print_y = rotate_image.shape[0] - - # 有bug - # if x + print_x > image_x: - # rotate_image = rotate_image[:, :x + print_x - image_x] - # rotate_mask = rotate_mask[:, :x + print_x - image_x] - # # - # if y + print_y > image_y: - # rotate_image = rotate_image[:y + print_y - image_y] - # rotate_mask = rotate_mask[:y + print_y - image_y] - - # 不能是并行 - # 当前第一轮的if (108以及115)是判断有没有过下界和右界。第二轮的是判断左上有没有超出。 如果这个样子的话,先裁了右边,再左移,region就会有问题 - # 先挪 再判断 最后裁剪 - - # 如果print旋转了 或者 print贴边了 则需要判断 判断左界和上界是否小于0 - if x <= 0: - rotate_image = rotate_image[:, -x:] - rotate_mask = rotate_mask[:, -x:] - start_x = x = 0 - else: - start_x = x - - if y <= 0: - rotate_image = rotate_image[-y:, :] - rotate_mask = rotate_mask[-y:, :] - start_y = y = 0 - else: - start_y = y - - # ------------------ - # 如果print-size大于image-size 则需要裁剪print - - if x + print_x > image_x: - rotate_image = rotate_image[:, :image_x - x] - rotate_mask = rotate_mask[:, :image_x - x] - - if y + print_y > image_y: - rotate_image = rotate_image[:image_y - y, :] - rotate_mask = rotate_mask[:image_y - y, :] - - # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = cv2.bitwise_xor(mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]], rotate_mask) - # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = cv2.add(print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]], rotate_image) - - # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = rotate_mask - # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = rotate_image - mask_background = self.stack_prin(mask_background, result['pattern_image'], rotate_mask, start_y, y, start_x, x) - print_background = self.stack_prin(print_background, result['pattern_image'], rotate_image, start_y, y, start_x, x) - - # gray_image = cv2.cvtColor(mask_background, cv2.COLOR_BGR2GRAY) - # print_background = cv2.bitwise_and(print_background, print_background, mask=gray_image) - - print_mask = cv2.bitwise_and(result['mask'], cv2.cvtColor(mask_background, cv2.COLOR_BGR2GRAY)) - img_fg = cv2.bitwise_or(print_background, print_background, mask=print_mask) - img_bg = cv2.bitwise_and(result['pattern_image'], result['pattern_image'], mask=cv2.bitwise_not(print_mask)) - mask_mo = np.expand_dims(print_mask, axis=2).repeat(3, axis=2) - gray_mo = np.expand_dims(result['gray'], axis=2).repeat(3, axis=2) - img_fg = (img_fg * (mask_mo / 255) * (gray_mo / 255)).astype(np.uint8) - result['final_image'] = cv2.add(img_bg, img_fg) - canvas = np.full_like(result['final_image'], 255) - temp_bg = np.expand_dims(cv2.bitwise_not(result['mask']), axis=2).repeat(3, axis=2) - tmp1 = (canvas * (temp_bg / 255)).astype(np.uint8) - temp_fg = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2) - tmp2 = (result['final_image'] * (temp_fg / 255)).astype(np.uint8) - result['single_image'] = cv2.add(tmp1, tmp2) - - if element_print['element_path_list']: - print_background = np.zeros((result['final_image'].shape[0], result['final_image'].shape[1], 3), dtype=np.uint8) - mask_background = np.zeros((result['final_image'].shape[0], result['final_image'].shape[1], 3), dtype=np.uint8) - for i in range(len(element_print['element_path_list'])): - image, image_mode = self.read_image(element_print['element_path_list'][i]) - if image_mode == "RGBA": - new_size = (int(image.width * element_print['element_scale_list'][i]), int(image.height * element_print['element_scale_list'][i])) - - mask = image.split()[3] - resized_source = image.resize(new_size) - resized_source_mask = mask.resize(new_size) - - rotated_resized_source = resized_source.rotate(-element_print['element_angle_list'][i]) - rotated_resized_source_mask = resized_source_mask.rotate(-element_print['element_angle_list'][i]) - - source_image_pil = Image.fromarray(cv2.cvtColor(print_background, cv2.COLOR_BGR2RGB)) - source_image_pil_mask = Image.fromarray(cv2.cvtColor(mask_background, cv2.COLOR_BGR2RGB)) - - source_image_pil.paste(rotated_resized_source, (int(element_print['location'][i][0]), int(element_print['location'][i][1])), rotated_resized_source) - source_image_pil_mask.paste(rotated_resized_source_mask, (int(element_print['location'][i][0]), int(element_print['location'][i][1])), rotated_resized_source_mask) - - print_background = cv2.cvtColor(np.array(source_image_pil), cv2.COLOR_RGBA2BGR) - mask_background = cv2.cvtColor(np.array(source_image_pil_mask), cv2.COLOR_RGBA2BGR) - else: - mask = self.get_mask_inv(image) - mask = np.expand_dims(mask, axis=2) - mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) - mask = cv2.bitwise_not(mask) - # 旋转后的坐标需要重新算 - rotate_mask, _ = self.img_rotate(mask, element_print['element_angle_list'][i], element_print['element_scale_list'][i]) - rotate_image, rotated_new_size = self.img_rotate(image, element_print['element_angle_list'][i], element_print['element_scale_list'][i]) - # x, y = int(result['print']['location'][i][0] - rotated_new_size[0] - (rotate_mask.shape[0] - image.shape[0]) / 2), int(result['print']['location'][i][1] - rotated_new_size[1] - (rotate_mask.shape[1] - image.shape[1]) / 2) - x, y = int(element_print['location'][i][0] - rotated_new_size[0]), int(element_print['location'][i][1] - rotated_new_size[1]) - - image_x = print_background.shape[1] - image_y = print_background.shape[0] - print_x = rotate_image.shape[1] - print_y = rotate_image.shape[0] - - # 有bug - # if x + print_x > image_x: - # rotate_image = rotate_image[:, :x + print_x - image_x] - # rotate_mask = rotate_mask[:, :x + print_x - image_x] - # # - # if y + print_y > image_y: - # rotate_image = rotate_image[:y + print_y - image_y] - # rotate_mask = rotate_mask[:y + print_y - image_y] - - # 不能是并行 - # 当前第一轮的if (108以及115)是判断有没有过下界和右界。第二轮的是判断左上有没有超出。 如果这个样子的话,先裁了右边,再左移,region就会有问题 - # 先挪 再判断 最后裁剪 - - # 如果print旋转了 或者 print贴边了 则需要判断 判断左界和上界是否小于0 - if x <= 0: - rotate_image = rotate_image[:, -x:] - rotate_mask = rotate_mask[:, -x:] - start_x = x = 0 - else: - start_x = x - - if y <= 0: - rotate_image = rotate_image[-y:, :] - rotate_mask = rotate_mask[-y:, :] - start_y = y = 0 - else: - start_y = y - - # ------------------ - # 如果print-size大于image-size 则需要裁剪print - - if x + print_x > image_x: - rotate_image = rotate_image[:, :image_x - x] - rotate_mask = rotate_mask[:, :image_x - x] - - if y + print_y > image_y: - rotate_image = rotate_image[:image_y - y, :] - rotate_mask = rotate_mask[:image_y - y, :] - - # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = cv2.bitwise_xor(mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]], rotate_mask) - # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = cv2.add(print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]], rotate_image) - - # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = rotate_mask - # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = rotate_image - mask_background = self.stack_prin(mask_background, result['pattern_image'], rotate_mask, start_y, y, start_x, x) - print_background = self.stack_prin(print_background, result['pattern_image'], rotate_image, start_y, y, start_x, x) - - # gray_image = cv2.cvtColor(mask_background, cv2.COLOR_BGR2GRAY) - # print_background = cv2.bitwise_and(print_background, print_background, mask=gray_image) - - print_mask = cv2.bitwise_and(result['mask'], cv2.cvtColor(mask_background, cv2.COLOR_BGR2GRAY)) - img_fg = cv2.bitwise_or(print_background, print_background, mask=print_mask) - # TODO element 丢失信息 - three_channel_image = cv2.merge([cv2.bitwise_not(print_mask), cv2.bitwise_not(print_mask), cv2.bitwise_not(print_mask)]) - img_bg = cv2.bitwise_and(result['final_image'], three_channel_image) - # mask_mo = np.expand_dims(print_mask, axis=2).repeat(3, axis=2) - # gray_mo = np.expand_dims(result['gray'], axis=2).repeat(3, axis=2) - # img_fg = (img_fg * (mask_mo / 255) * (gray_mo / 255)).astype(np.uint8) - result['final_image'] = cv2.add(img_bg, img_fg) - canvas = np.full_like(result['final_image'], 255) - temp_bg = np.expand_dims(cv2.bitwise_not(result['mask']), axis=2).repeat(3, axis=2) - tmp1 = (canvas * (temp_bg / 255)).astype(np.uint8) - temp_fg = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2) - tmp2 = (result['final_image'] * (temp_fg / 255)).astype(np.uint8) - result['single_image'] = cv2.add(tmp1, tmp2) - return result - - @staticmethod - def stack_prin(print_background, pattern_image, rotate_image, start_y, y, start_x, x): - temp_print = np.zeros((pattern_image.shape[0], pattern_image.shape[1], 3), dtype=np.uint8) - temp_print[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = rotate_image - img2gray = cv2.cvtColor(temp_print, cv2.COLOR_BGR2GRAY) - ret, mask_ = cv2.threshold(img2gray, 1, 255, cv2.THRESH_BINARY) - mask_inv = cv2.bitwise_not(mask_) - img1_bg = cv2.bitwise_and(print_background, print_background, mask=mask_inv) - img2_fg = cv2.bitwise_and(temp_print, temp_print, mask=mask_) - print_background = img1_bg + img2_fg - return print_background - - def painting_collection(self, painting_dict, print_dict, print_trigger=False, is_single=False): - if print_trigger: - print_ = self.get_print(print_dict) - painting_dict['Trigger'] = not is_single - painting_dict['location'] = print_['location'] - single_mask_inv_print = self.get_mask_inv(print_['image']) - dim_max = max(painting_dict['dim_image_h'], painting_dict['dim_image_w']) - dim_pattern = (int(dim_max * print_['scale'] / 5), int(dim_max * print_['scale'] / 5)) - if not is_single: - self.random_seed = random.randint(0, 1000) - # 如果print 模式为overall 且 有角度的话 , 组合的print为正方形,方便裁剪 - if "print_angle_list" in print_dict.keys() and print_dict['print_angle_list'][0] != 0: - painting_dict['mask_inv_print'] = self.tile_image(single_mask_inv_print, dim_pattern, print_['scale'], dim_max, dim_max, painting_dict['location'], trigger=True) - painting_dict['tile_print'] = self.tile_image(print_['image'], dim_pattern, print_['scale'], dim_max, dim_max, painting_dict['location'], trigger=True) - else: - painting_dict['mask_inv_print'] = self.tile_image(single_mask_inv_print, dim_pattern, print_['scale'], painting_dict['dim_image_h'], painting_dict['dim_image_w'], painting_dict['location'], trigger=True) - painting_dict['tile_print'] = self.tile_image(print_['image'], dim_pattern, print_['scale'], painting_dict['dim_image_h'], painting_dict['dim_image_w'], painting_dict['location'], trigger=True) - else: - painting_dict['mask_inv_print'] = self.tile_image(single_mask_inv_print, dim_pattern, print_['scale'], painting_dict['dim_image_h'], painting_dict['dim_image_w'], painting_dict['location']) - painting_dict['tile_print'] = self.tile_image(print_['image'], dim_pattern, print_['scale'], painting_dict['dim_image_h'], painting_dict['dim_image_w'], painting_dict['location']) - painting_dict['dim_print_h'], painting_dict['dim_print_w'] = dim_pattern - return painting_dict - - def tile_image(self, pattern, dim, scale, dim_image_h, dim_image_w, location, trigger=False): - tile = None - if not trigger: - tile = cv2.resize(pattern, dim, interpolation=cv2.INTER_AREA) - else: - resize_pattern = cv2.resize(pattern, dim, interpolation=cv2.INTER_AREA) - if len(pattern.shape) == 2: - tile = np.tile(resize_pattern, (int((5 + 1) / scale) + 4, int((5 + 1) / scale) + 4)) - if len(pattern.shape) == 3: - tile = np.tile(resize_pattern, (int((5 + 1) / scale) + 4, int((5 + 1) / scale) + 4, 1)) - tile = self.crop_image(tile, dim_image_h, dim_image_w, location, resize_pattern.shape) - return tile - - def get_mask_inv(self, print_): - if print_[0][0][0] == 255 and print_[0][0][1] == 255 and print_[0][0][2] == 255: - bg_color = cv2.cvtColor(print_, cv2.COLOR_BGR2LAB)[0][0] - print_tile = cv2.cvtColor(print_, cv2.COLOR_BGR2LAB) - bg_l, bg_a, bg_b = bg_color[0], bg_color[1], bg_color[2] - bg_L_high, bg_L_low = self.get_low_high_lab(bg_l, L=True) - bg_a_high, bg_a_low = self.get_low_high_lab(bg_a) - bg_b_high, bg_b_low = self.get_low_high_lab(bg_b) - lower = np.array([bg_L_low, bg_a_low, bg_b_low]) - upper = np.array([bg_L_high, bg_a_high, bg_b_high]) - mask_inv = cv2.inRange(print_tile, lower, upper) - return mask_inv - else: - # bg_color = cv2.cvtColor(print_, cv2.COLOR_BGR2LAB)[0][0] - # print_tile = cv2.cvtColor(print_, cv2.COLOR_BGR2LAB) - # bg_l, bg_a, bg_b = bg_color[0], bg_color[1], bg_color[2] - # bg_L_high, bg_L_low = self.get_low_high_lab(bg_l, L=True) - # bg_a_high, bg_a_low = self.get_low_high_lab(bg_a) - # bg_b_high, bg_b_low = self.get_low_high_lab(bg_b) - # lower = np.array([bg_L_low, bg_a_low, bg_b_low]) - # upper = np.array([bg_L_high, bg_a_high, bg_b_high]) - - # print_tile = cv2.cvtColor(print_, cv2.COLOR_BGR2LAB) - # mask_inv = cv2.cvtColor(print_tile, cv2.COLOR_BGR2GRAY) - - # mask_inv = cv2.cvtColor(print_, cv2.COLOR_BGR2GRAY) - mask_inv = np.zeros(print_.shape[:2], dtype=np.uint8) - return mask_inv - - @staticmethod - def printpaint(result, painting_dict, print_=False): - - if print_ and painting_dict['Trigger']: - print_mask = cv2.bitwise_and(result['mask'], cv2.bitwise_not(painting_dict['mask_inv_print'])) - img_fg = cv2.bitwise_and(painting_dict['tile_print'], painting_dict['tile_print'], mask=print_mask) - else: - print_mask = result['mask'] - img_fg = result['final_image'] - if print_ and not painting_dict['Trigger']: - index_ = None - try: - index_ = len(painting_dict['location']) - except: - assert f'there must be parameter of location if choose IfSingle' - - for i in range(index_): - start_h, start_w = int(painting_dict['location'][i][1]), int(painting_dict['location'][i][0]) - - length_h = min(start_h + painting_dict['dim_print_h'], img_fg.shape[0]) - length_w = min(start_w + painting_dict['dim_print_w'], img_fg.shape[1]) - - change_region = img_fg[start_h: length_h, start_w: length_w, :] - # problem in change_mask - change_mask = print_mask[start_h: length_h, start_w: length_w] - # get real part into change mask - _, change_mask = cv2.threshold(change_mask, 220, 255, cv2.THRESH_BINARY) - mask = cv2.bitwise_not(painting_dict['mask_inv_print']) - img_fg[start_h:start_h + painting_dict['dim_print_h'], start_w:start_w + painting_dict['dim_print_w'], :] = change_region - - clothes_mask_print = cv2.bitwise_not(print_mask) - - img_bg = cv2.bitwise_and(result['pattern_image'], result['pattern_image'], mask=clothes_mask_print) - mask_mo = np.expand_dims(print_mask, axis=2).repeat(3, axis=2) - gray_mo = np.expand_dims(result['gray'], axis=2).repeat(3, axis=2) - img_fg = (img_fg * (mask_mo / 255) * (gray_mo / 255)).astype(np.uint8) - print_image = cv2.add(img_bg, img_fg) - return print_image - - @staticmethod - def get_print(print_dict): - if 'print_scale_list' not in print_dict.keys() or print_dict['print_scale_list'][0] < 0.3: - print_dict['scale'] = 0.3 - else: - print_dict['scale'] = print_dict['print_scale_list'][0] - - bucket_name = print_dict['print_path_list'][0].split("/", 1)[0] - object_name = print_dict['print_path_list'][0].split("/", 1)[1] - image = oss_get_image(bucket=bucket_name, object_name=object_name, data_type="PIL") - # 判断图片格式,如果是RGBA 则贴在一张纯白图片上 防止透明转黑 - if image.mode == "RGBA": - new_background = Image.new('RGB', image.size, (255, 255, 255)) - new_background.paste(image, mask=image.split()[3]) - image = new_background - print_dict['image'] = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR) - return print_dict - - def crop_image(self, image, image_size_h, image_size_w, location, print_shape): - print_w = print_shape[1] - print_h = print_shape[0] - - random.seed(self.random_seed) - # logging.info(f'overall print location : {location}') - # x_offset = random.randint(0, image.shape[0] - image_size_h) - # y_offset = random.randint(0, image.shape[1] - image_size_w) - - # 1.拿到偏移量后和resize后的print宽高取余 得到真正偏移量 - x_offset = print_w - int(location[0][1] % print_w) - y_offset = print_w - int(location[0][0] % print_h) - - # y_offset = int(location[0][0]) - # x_offset = int(location[0][1]) - - if len(image.shape) == 2: - image = image[x_offset: x_offset + image_size_h, y_offset: y_offset + image_size_w] - elif len(image.shape) == 3: - image = image[x_offset: x_offset + image_size_h, y_offset: y_offset + image_size_w, :] - return image - - @staticmethod - def get_low_high_lab(Lab_value, L=False): - if L: - high = Lab_value + 30 if Lab_value + 30 < 255 else 255 - low = Lab_value - 30 if Lab_value - 30 > 0 else 0 - else: - high = Lab_value + 30 if Lab_value + 30 < 255 else 255 - low = Lab_value - 30 if Lab_value - 30 > 0 else 0 - return high, low - - @staticmethod - def img_rotate(image, angel, scale): - """顺时针旋转图像任意角度 - - Args: - image (np.array): [原始图像] - angel (float): [逆时针旋转的角度] - - Returns: - [array]: [旋转后的图像] - """ - - h, w = image.shape[:2] - center = (w // 2, h // 2) - # if type(angel) is not int: - # angel = 0 - M = cv2.getRotationMatrix2D(center, -angel, scale) - # 调整旋转后的图像长宽 - rotated_h = int((w * np.abs(M[0, 1]) + (h * np.abs(M[0, 0])))) - rotated_w = int((h * np.abs(M[0, 1]) + (w * np.abs(M[0, 0])))) - M[0, 2] += (rotated_w - w) // 2 - M[1, 2] += (rotated_h - h) // 2 - # 旋转图像 - rotated_img = cv2.warpAffine(image, M, (rotated_w, rotated_h)) - - return rotated_img, ((rotated_img.shape[1] - image.shape[1] * scale) // 2, (rotated_img.shape[0] - image.shape[0] * scale) // 2) - # return rotated_img, (0, 0) - - @staticmethod - def rotate_crop_image(img, angle, crop): - """ - angle: 旋转的角度 - crop: 是否需要进行裁剪,布尔向量 - """ - crop_image = lambda img, x0, y0, w, h: img[y0:y0 + h, x0:x0 + w] - w, h = img.shape[:2] - # 旋转角度的周期是360° - angle %= 360 - # 计算仿射变换矩阵 - M_rotation = cv2.getRotationMatrix2D((w / 2, h / 2), angle, 1) - # 得到旋转后的图像 - img_rotated = cv2.warpAffine(img, M_rotation, (w, h)) - - # 如果需要去除黑边 - if crop: - # 裁剪角度的等效周期是180° - angle_crop = angle % 180 - if angle > 90: - angle_crop = 180 - angle_crop - # 转化角度为弧度 - theta = angle_crop * np.pi / 180 - # 计算高宽比 - hw_ratio = float(h) / float(w) - # 计算裁剪边长系数的分子项 - tan_theta = np.tan(theta) - numerator = np.cos(theta) + np.sin(theta) * np.tan(theta) - - # 计算分母中和高宽比相关的项 - r = hw_ratio if h > w else 1 / hw_ratio - # 计算分母项 - denominator = r * tan_theta + 1 - # 最终的边长系数 - crop_mult = numerator / denominator - - # 得到裁剪区域 - w_crop = int(crop_mult * w) - h_crop = int(crop_mult * h) - x0 = int((w - w_crop) / 2) - y0 = int((h - h_crop) / 2) - - img_rotated = crop_image(img_rotated, x0, y0, w_crop, h_crop) - - return img_rotated - - @staticmethod - def read_image(image_url): - image = oss_get_image(bucket=image_url.split("/", 1)[0], object_name=image_url.split("/", 1)[1], data_type="cv2") - if image.shape[2] == 4: - image_rgb = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA) - image = Image.fromarray(image_rgb) - image_mode = "RGBA" - else: - image_mode = "RGB" - return image, image_mode - - @staticmethod - def resize_and_crop(img, target_width, target_height): - # 获取原始图像的尺寸 - original_height, original_width = img.shape[:2] - - # 计算目标尺寸的宽高比 - target_ratio = target_width / target_height - - # 计算原始图像的宽高比 - original_ratio = original_width / original_height - - # 调整尺寸 - if original_ratio > target_ratio: - # 原始图像更宽,按高度resize,然后裁剪宽度 - new_height = target_height - new_width = int(original_width * (target_height / original_height)) - resized_img = cv2.resize(img, (new_width, new_height)) - # 裁剪宽度 - start_x = (new_width - target_width) // 2 - cropped_img = resized_img[:, start_x:start_x + target_width] - else: - # 原始图像更高,按宽度resize,然后裁剪高度 - new_width = target_width - new_height = int(original_height * (target_width / original_width)) - resized_img = cv2.resize(img, (new_width, new_height)) - # 裁剪高度 - start_y = (new_height - target_height) // 2 - cropped_img = resized_img[start_y:start_y + target_height, :] - - return cropped_img diff --git a/app/service/design/items/pipelines/scale.py b/app/service/design/items/pipelines/scale.py deleted file mode 100644 index edd98c9..0000000 --- a/app/service/design/items/pipelines/scale.py +++ /dev/null @@ -1,57 +0,0 @@ -import math - -import cv2 - -from app.service.utils.decorator import ClassCallRunTime -from ..builder import PIPELINES - - -@PIPELINES.register_module() -class Scaling(object): - def __init__(self): - pass - - # @ClassCallRunTime - def __call__(self, result): - if result['keypoint'] in ['waistband', 'shoulder', 'head_point']: - # milvus_db_keypoint_cache - distance_clo = math.sqrt( - (int(result['clothes_keypoint'][result['keypoint'] + '_left'][0]) - int(result['clothes_keypoint'][result['keypoint'] + '_right'][0])) ** 2 - + - (int(result['clothes_keypoint'][result['keypoint'] + '_left'][1]) - int(result['clothes_keypoint'][result['keypoint'] + '_right'][1])) ** 2) - - distance_bdy = math.sqrt((int(result['body_point_test'][result['keypoint'] + '_left'][0]) - int(result['body_point_test'][result['keypoint'] + '_right'][0])) ** 2 + 1) - # distance_clo = math.sqrt( - # (int(result['clothes_keypoint'][result['keypoint'] + '_left'].split("_")[0]) - int(result['clothes_keypoint'][result['keypoint'] + '_right'].split("_")[0])) ** 2 - # + - # (int(result['clothes_keypoint'][result['keypoint'] + '_left'].split("_")[1]) - int(result['clothes_keypoint'][result['keypoint'] + '_right'].split("_")[1])) ** 2) - # - # distance_bdy = math.sqrt((int(result['body_point_test'][result['keypoint'] + '_left'][0]) - int(result['body_point_test'][result['keypoint'] + '_right'][0])) ** 2 + 1) - if distance_clo == 0: - result['scale'] = 1 - else: - result['scale'] = distance_bdy / distance_clo - elif result['keypoint'] == 'toe': - distance_bdy = math.sqrt( - (int(result['body_point_test']['foot_length'][0]) - int(result['body_point_test']['foot_length'][2])) ** 2 - + - (int(result['body_point_test']['foot_length'][1]) - int(result['body_point_test']['foot_length'][3])) ** 2 - ) - - Blur = cv2.GaussianBlur(result['gray'], (3, 3), 0) - Edge = cv2.Canny(Blur, 10, 200) - Edge = cv2.dilate(Edge, None) - Edge = cv2.erode(Edge, None) - Contour, _ = cv2.findContours(Edge, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - Contours = sorted(Contour, key=cv2.contourArea, reverse=True) - - Max_contour = Contours[0] - x, y, w, h = cv2.boundingRect(Max_contour) - width = w - distance_clo = width - result['scale'] = distance_bdy / distance_clo - elif result['keypoint'] == 'hand_point': - result['scale'] = result['scale_bag'] - elif result['keypoint'] == 'ear_point': - result['scale'] = result['scale_earrings'] - return result diff --git a/app/service/design/items/pipelines/segmentation.py b/app/service/design/items/pipelines/segmentation.py deleted file mode 100644 index 7ed43e5..0000000 --- a/app/service/design/items/pipelines/segmentation.py +++ /dev/null @@ -1,71 +0,0 @@ -import logging -import os - -import cv2 -import numpy as np - -from app.core.config import SEG_CACHE_PATH -from app.service.utils.decorator import ClassCallRunTime -from app.service.utils.oss_client import oss_get_image -from ..builder import PIPELINES -from ...utils.design_ensemble import get_seg_result - -logger = logging.getLogger() - - -@PIPELINES.register_module() -class Segmentation(object): - - @ClassCallRunTime - def __call__(self, result): - if "seg_mask_url" in result.keys() and result['seg_mask_url'] != "": - seg_mask = oss_get_image(bucket=result['seg_mask_url'].split('/')[0], object_name=result['seg_mask_url'][result['seg_mask_url'].find('/') + 1:], data_type="cv2") - seg_mask = cv2.resize(seg_mask, (result['img_shape'][1], result['img_shape'][0]), interpolation=cv2.INTER_NEAREST) - # 转换颜色空间为 RGB(OpenCV 默认是 BGR) - image_rgb = cv2.cvtColor(seg_mask, cv2.COLOR_BGR2RGB) - - r, g, b = cv2.split(image_rgb) - red_mask = r > g - green_mask = g > r - - # 创建红色和绿色掩码 - result['front_mask'] = np.array(red_mask, dtype=np.uint8) * 255 - result['back_mask'] = np.array(green_mask, dtype=np.uint8) * 255 - result['mask'] = result['front_mask'] + result['back_mask'] - else: - # 本地查询seg 缓存是否存在 - _, seg_result = self.load_seg_result(result["image_id"]) - result['seg_result'] = seg_result - if not _: - # 推理获得seg 结果 - seg_result = get_seg_result(result["image_id"], result['image'])[0] - self.save_seg_result(seg_result, result['image_id']) - # 处理前片后片 - temp_front = seg_result == 1.0 - result['front_mask'] = (255 * (temp_front + 0).astype(np.uint8)) - temp_back = seg_result == 2.0 - result['back_mask'] = (255 * (temp_back + 0).astype(np.uint8)) - result['mask'] = result['front_mask'] + result['back_mask'] - return result - - @staticmethod - def save_seg_result(seg_result, image_id): - file_path = f"{SEG_CACHE_PATH}{image_id}.npy" - try: - np.save(file_path, seg_result) - logger.debug(f"保存成功 :{os.path.abspath(file_path)}") - except Exception as e: - logger.error(f"保存失败: {e}") - - @staticmethod - def load_seg_result(image_id): - file_path = f"{SEG_CACHE_PATH}{image_id}.npy" - try: - seg_result = np.load(file_path) - return True, seg_result - except FileNotFoundError: - # logger.warning("文件不存在") - return False, None - except Exception as e: - logger.error(f"加载失败: {e}") - return False, None diff --git a/app/service/design/items/pipelines/split.py b/app/service/design/items/pipelines/split.py deleted file mode 100644 index 3485453..0000000 --- a/app/service/design/items/pipelines/split.py +++ /dev/null @@ -1,79 +0,0 @@ -import io -import logging - -import cv2 -import numpy as np -from PIL import Image -from cv2 import cvtColor, COLOR_BGR2RGBA - -from app.core.config import AIDA_CLOTHING -from app.service.utils.generate_uuid import generate_uuid -from app.service.utils.oss_client import oss_upload_image -from ..builder import PIPELINES -from ...utils.conversion_image import rgb_to_rgba -from ...utils.upload_image import upload_png_mask - - -@PIPELINES.register_module() -class Split(object): - """ - Split image into front and back layer according to the segmentation result - """ - - # @ClassCallRunTime - # KNet - def __call__(self, result): - try: - - if result['name'] in ('outwear', 'dress', 'blouse', 'skirt', 'trousers', 'tops', 'bottoms'): - front_mask = result['front_mask'] - back_mask = result['back_mask'] - rgba_image = rgb_to_rgba(result['final_image'], front_mask + back_mask) - new_size = (int(rgba_image.shape[1] * result["scale"] * result["resize_scale"][0]), int(rgba_image.shape[0] * result["scale"] * result["resize_scale"][1])) - rgba_image = cv2.resize(rgba_image, new_size) - result_front_image = np.zeros_like(rgba_image) - front_mask = cv2.resize(front_mask, new_size) - result_front_image[front_mask != 0] = rgba_image[front_mask != 0] - result_front_image_pil = Image.fromarray(cvtColor(result_front_image, COLOR_BGR2RGBA)) - result['front_image'], result["front_image_url"], _ = upload_png_mask(result_front_image_pil, f'{generate_uuid()}', mask=None) - - height, width = front_mask.shape - mask_image = np.zeros((height, width, 3)) - mask_image[front_mask != 0] = [0, 0, 255] - - if result["name"] in ('blouse', 'dress', 'outwear', 'tops'): - result_back_image = np.zeros_like(rgba_image) - back_mask = cv2.resize(back_mask, new_size) - result_back_image[back_mask != 0] = rgba_image[back_mask != 0] - result_back_image_pil = Image.fromarray(cvtColor(result_back_image, COLOR_BGR2RGBA)) - result['back_image'], result["back_image_url"], _ = upload_png_mask(result_back_image_pil, f'{generate_uuid()}', mask=None) - mask_image[back_mask != 0] = [0, 255, 0] - - rbga_mask = rgb_to_rgba(mask_image, front_mask + back_mask) - mask_pil = Image.fromarray(cvtColor(rbga_mask.astype(np.uint8), COLOR_BGR2RGBA)) - image_data = io.BytesIO() - mask_pil.save(image_data, format='PNG') - image_data.seek(0) - image_bytes = image_data.read() - req = oss_upload_image(bucket=AIDA_CLOTHING, object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) - result['mask_url'] = req.bucket_name + "/" + req.object_name - else: - rbga_mask = rgb_to_rgba(mask_image, front_mask) - mask_pil = Image.fromarray(cvtColor(rbga_mask.astype(np.uint8), COLOR_BGR2RGBA)) - image_data = io.BytesIO() - mask_pil.save(image_data, format='PNG') - image_data.seek(0) - image_bytes = image_data.read() - req = oss_upload_image(bucket=AIDA_CLOTHING, object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) - result['mask_url'] = req.bucket_name + "/" + req.object_name - result['back_image'] = None - result["back_image_url"] = None - # result["back_mask_url"] = None - # result['back_mask_image'] = None - # 创建中间图层 - result_pattern_image_rgba = rgb_to_rgba(result['pattern_image'], result['mask']) - result_pattern_image_pil = Image.fromarray(cvtColor(result_pattern_image_rgba, COLOR_BGR2RGBA)) - result['pattern_image'], result['pattern_image_url'], _ = upload_png_mask(result_pattern_image_pil, f'{generate_uuid()}') - return result - except Exception as e: - logging.warning(f"split runtime exception : {e} image_id : {result['image_id']}") diff --git a/app/service/design/items/shoes.py b/app/service/design/items/shoes.py deleted file mode 100644 index aa20d3c..0000000 --- a/app/service/design/items/shoes.py +++ /dev/null @@ -1,121 +0,0 @@ -import cv2 -import numpy as np -from PIL import Image - -from .builder import ITEMS -from .clothing import Clothing -from ..utils.conversion_image import rgb_to_rgba -from ..utils.upload_image import upload_png_mask -from ...utils.generate_uuid import generate_uuid - - -@ITEMS.register_module() -class Shoes(Clothing): - # TODO location of shoes has little mismatch - def __init__(self, **kwargs): - pipeline = [ - dict(type='LoadImageFromFile', path=kwargs['path'], color=kwargs['color']), - dict(type='KeypointDetection'), - dict(type='ContourDetection'), - dict(type='Painting'), - dict(type='Scaling'), - dict(type='Split'), - # dict(type='ImageShow', key=['image', 'mask', 'pattern_image']), - ] - kwargs.update(pipeline=pipeline) - super(Shoes, self).__init__(**kwargs) - - def organize(self, layer): - left_shoe_mask, right_shoe_mask = self.cut() - - left_layer = dict(name=f'{type(self).__name__.lower()}_left', - image=self.result['shoes_left'], - image_url=self.result['left_image_url'], - mask_url=self.result['left_mask_url'], - sacle=self.result['scale'], - clothes_keypoint=self.result['clothes_keypoint'], - position=self.calculate_start_point(self.result['keypoint'], - self.result['scale'], - self.result['clothes_keypoint'], - self.result['body_point'], - 'left')) - layer.insert(left_layer) - - right_layer = dict(name=f'{type(self).__name__.lower()}_right', - image=self.result['shoes_right'], - image_url=self.result['right_image_url'], - mask_url=self.result['right_mask_url'], - sacle=self.result['scale'], - clothes_keypoint=self.result['clothes_keypoint'], - position=self.calculate_start_point(self.result['keypoint'], - self.result['scale'], - self.result['clothes_keypoint'], - self.result['body_point'], - 'right')) - - layer.insert(right_layer) - - def cut(self): - """ - Cut shoes mask into two pieces - Returns: - """ - contour, _ = cv2.findContours(self.result['mask'], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - contours = sorted(contour, key=cv2.contourArea, reverse=True) - - bounding_boxes = [cv2.boundingRect(c) for c in contours[:2]] - (contours, bounding_boxes) = zip(*sorted(zip(contours[:2], bounding_boxes), key=lambda x: x[1][0], reverse=False)) - - epsilon_left = 0.001 * cv2.arcLength(contours[0], True) - - approx_left = cv2.approxPolyDP(contours[0], epsilon_left, True) - mask_left = np.zeros(self.result['final_image'].shape[:2], np.uint8) - cv2.drawContours(mask_left, [approx_left], -1, 255, -1) - item_mask_left = cv2.GaussianBlur(mask_left, (5, 5), 0) - - rgba_image = rgb_to_rgba((self.result['final_image'].shape[0], self.result['final_image'].shape[1]), self.result['final_image'], item_mask_left) - result_image = np.zeros_like(rgba_image) - result_image[self.result['front_mask'] != 0] = rgba_image[self.result['front_mask'] != 0] - result_left_image_pil = Image.fromarray(result_image, 'RGBA') - result_left_image_pil = result_left_image_pil.resize((int(result_left_image_pil.width * self.result["scale"]), int(result_left_image_pil.height * self.result["scale"])), Image.LANCZOS) - self.result['shoes_left'], self.result["left_image_url"], self.result["left_mask_url"] = upload_png_mask(result_left_image_pil, f"{generate_uuid()}") - - epsilon_right = 0.001 * cv2.arcLength(contours[1], True) - approx_right = cv2.approxPolyDP(contours[1], epsilon_right, True) - mask_right = np.zeros(self.result['final_image'].shape[:2], np.uint8) - cv2.drawContours(mask_right, [approx_right], -1, 255, -1) - item_mask_right = cv2.GaussianBlur(mask_right, (5, 5), 0) - - rgba_image = rgb_to_rgba((self.result['final_image'].shape[0], self.result['final_image'].shape[1]), self.result['final_image'], item_mask_right) - result_image = np.zeros_like(rgba_image) - result_image[self.result['front_mask'] != 0] = rgba_image[self.result['front_mask'] != 0] - result_right_image_pil = Image.fromarray(result_image, 'RGBA') - result_right_image_pil = result_right_image_pil.resize((int(result_right_image_pil.width * self.result["scale"]), int(result_right_image_pil.height * self.result["scale"])), Image.LANCZOS) - self.result['shoes_right'], self.result["right_image_url"], self.result["right_mask_url"] = upload_png_mask(result_right_image_pil, f"{generate_uuid()}") - - return item_mask_left, item_mask_right - - @staticmethod - def calculate_start_point(keypoint_type, scale, clothes_point, body_point, location): - """ - left shoes align left - right shoes align right - Args: - keypoint_type: string, "toe" - scale: float - clothes_point: dict{'left': [x1, y1, z1], 'right': [x2, y2, z2]} - body_point: dict, containing keypoint data of body figure - location: string, indicates whether the start point belongs to right or left shoe - - Returns: - start_point: tuple (x', y') - x' = y_body - y1 * scale - y' = x_body - x1 * scale - """ - if location not in ['left', 'right']: - raise KeyError(f'location value must be left or right but got {location}') - side_indicator = f'{keypoint_type}_{location}' - # clothes_point = {k: tuple(map(lambda x: int(scale * x), v[0: 2])) for k, v in clothes_point.items()} - start_point = (body_point[side_indicator][1] - int(int(clothes_point[side_indicator].split("_")[1]) * scale), - body_point[side_indicator][0] - int(int(clothes_point[side_indicator].split("_")[0]) * scale)) - return start_point diff --git a/app/service/design/items/top.py b/app/service/design/items/top.py deleted file mode 100644 index fc0d2a5..0000000 --- a/app/service/design/items/top.py +++ /dev/null @@ -1,46 +0,0 @@ -from .builder import ITEMS -from .clothing import Clothing - - -@ITEMS.register_module() -class Top(Clothing): - def __init__(self, pipeline, **kwargs): - if pipeline is None: - pipeline = [ - dict(type='LoadImageFromFile', path=kwargs['path'], color=kwargs['color'], print_dict=kwargs['print']), - dict(type='KeypointDetection'), - # dict(type='ContourDetection'), - dict(type='Segmentation'), - dict(type='Painting', painting_flag=True), - dict(type='PrintPainting', print_flag=True), - # dict(type='ImageShow', key=['image', 'mask', 'seg_visualize', 'pattern_image']), - dict(type='Scaling'), - dict(type='Split'), - ] - kwargs.update(pipeline=pipeline) - super(Top, self).__init__(**kwargs) - - -@ITEMS.register_module() -class Blouse(Top): - def __init__(self, pipeline=None, **kwargs): - super(Blouse, self).__init__(pipeline, **kwargs) - - -@ITEMS.register_module() -class Outwear(Top): - def __init__(self, pipeline=None, **kwargs): - super(Outwear, self).__init__(pipeline, **kwargs) - - -@ITEMS.register_module() -class Dress(Top): - def __init__(self, pipeline=None, **kwargs): - super(Dress, self).__init__(pipeline, **kwargs) - - -# Men's clothing -@ITEMS.register_module() -class Tops(Top): - def __init__(self, pipeline=None, **kwargs): - super(Tops, self).__init__(pipeline, **kwargs) diff --git a/app/service/design/service.py b/app/service/design/service.py deleted file mode 100644 index ba7e987..0000000 --- a/app/service/design/service.py +++ /dev/null @@ -1,197 +0,0 @@ -import concurrent.futures -import io - -import cv2 - -from app.core.config import PRIORITY_DICT -from app.service.design.core.layer import Layer -from app.service.design.items import build_item -from app.service.design.utils.redis_utils import Redis -from app.service.design.utils.synthesis_item import synthesis, synthesis_single -from app.service.utils.decorator import RunTime -from app.service.utils.oss_client import oss_upload_image - - -def process_item(item, layers): - # logging.info("process running.........") - item.process() - item.organize(layers) - if item.result['name'] == "mannequin": - return item.result['body_image'].size - - -def update_progress(process_id, total): - r = Redis() - progress = r.read(key=process_id) - if progress and total != 1: - if int(progress) <= 100: - r.write(key=process_id, value=int(progress) + int(100 / total)) - else: - r.write(key=process_id, value=99) - return progress - elif total == 1: - r.write(key=process_id, value=100) - return progress - else: - r.write(key=process_id, value=int(100 / total)) - return progress - - -def final_progress(process_id): - r = Redis() - progress = r.read(key=process_id) - r.write(key=process_id, value=100) - return progress - - -@RunTime -def generate(request_data): - return_response = {} - return_png_mask = [] - request_data = request_data.dict() - assert "process_id" in request_data.keys(), "Need process_id parameters" - - objects = request_data['objects'] - # insert_keypoint_cache(objects) - process_id = request_data['process_id'] - with concurrent.futures.ThreadPoolExecutor() as executor: - # 提交每个对象的处理任务 - futures = {executor.submit(process_object, cfg, process_id, len(objects)): obj for obj, cfg in enumerate(objects)} - # 获取处理结果 - for future in concurrent.futures.as_completed(futures): - obj = futures[future] - return_response[obj] = future.result()[0] - return_png_mask.extend(future.result()[1]) - # upload_results = process_images(return_png_mask) - final_progress(process_id) - return return_response - - -def process_object(cfg, process_id, total): - uploaded_images = [] - basic_info = cfg.get('basic') - items_response = { - 'layers': [] - } - if cfg.get('basic')['single_overall'] == 'overall': - basic_info['debug'] = False - items = [build_item(x, default_args=basic_info) for x in cfg.get('items')] - layers = Layer() - body_size = None - futures = [] - for item in items: - futures = [process_item(item, layers)] - for future in futures: - if future is not None: - body_size = future - # 是否自定义排序 - if basic_info.get('layer_order', False): - layers = sorted(layers.layer, key=lambda s: s.get("priority", float('inf'))) - else: - layers = sorted(layers.layer, key=lambda x: PRIORITY_DICT.get(x['name'], float('inf'))) - # 上传所有图片 - # for layer in layers: - # if 'image' in layer.keys() and layer['image'] is not None: - # uploaded_images.append({'image_obj': layer['image'], 'image_url': layer['image_url'], 'image_type': 'image'}) - # if 'pattern_image' in layer.keys() and layer['pattern_image'] is not None: - # uploaded_images.append({'image_obj': layer['pattern_image'], 'image_url': layer['pattern_image_url'], 'image_type': 'pattern_image'}) - # if 'mask' in layer.keys() and layer['mask'] is not None and layer['mask_url'] is not None: - # uploaded_images.append({'image_obj': layer['mask'], 'image_url': layer['mask_url'], 'image_type': 'mask'}) - layers, new_size = update_base_size_priority(layers, body_size) - # 合成 - items_response['synthesis_url'] = synthesis(layers, new_size, basic_info) - - for lay in layers: - items_response['layers'].append({ - 'image_category': lay['name'], - 'position': lay['position'], - 'priority': lay.get("priority", None), - 'resize_scale': lay['resize_scale'] if "resize_scale" in lay.keys() else None, - 'image_size': lay['image'] if lay['image'] is None else lay['image'].size, - 'gradient_string': lay['gradient_string'] if 'gradient_string' in lay.keys() else "", - 'mask_url': lay['mask_url'], - 'image_url': lay['image_url'] if 'image_url' in lay.keys() else None, - 'pattern_image_url': lay['pattern_image_url'] if 'pattern_image_url' in lay.keys() else None, - - # 'image': lay['image'], - # 'mask_image': lay['mask_image'], - }) - elif cfg.get('basic')['single_overall'] == 'single': - assert cfg.get('basic')['switch_category'] in [x['type'] for x in cfg.get('items')], "Lack of switch_category parameters " - basic_info['debug'] = False - for item in cfg.get('items'): - if item['type'] == cfg.get('basic')['switch_category']: - item = build_item(item, default_args=cfg.get('basic')) - item.process() - items_response['layers'].append({ - 'image_category': f"{item.result['name']}_front", - 'image_size': item.result['back_image'].size if item.result['back_image'] else None, - 'position': None, - 'priority': 0, - 'image_url': item.result['front_image_url'], - 'mask_url': item.result['mask_url'], - "gradient_string": item.result['gradient_string'] if 'gradient_string' in item.result.keys() else "", - 'pattern_image_url': item.result['pattern_image_url'] if 'pattern_image_url' in item.result.keys() else None, - - }) - items_response['layers'].append({ - 'image_category': f"{item.result['name']}_back", - 'image_size': item.result['front_image'].size if item.result['front_image'] else None, - 'position': None, - 'priority': 0, - 'image_url': item.result['back_image_url'], - 'mask_url': item.result['mask_url'], - "gradient_string": item.result['gradient_string'] if 'gradient_string' in item.result.keys() else "", - 'pattern_image_url': item.result['pattern_image_url'] if 'pattern_image_url' in item.result.keys() else None, - - }) - items_response['synthesis_url'] = synthesis_single(item.result['front_image'], item.result['back_image']) - break - update_progress(process_id, total) - return items_response, uploaded_images - - -@RunTime -def process_images(images): - with concurrent.futures.ThreadPoolExecutor() as executor: - results = list(executor.map(upload_images, images)) - # results = [] - # for image in images: - # results.append(upload_images(image)) - return results - - -# @RunTime -def upload_images(image_obj): - bucket_name = image_obj['image_url'].split("/", 1)[0] - object_name = image_obj['image_url'].split("/", 1)[1] - if image_obj['image_type'] == 'image' or image_obj['image_type'] == 'pattern_image': - image_data = io.BytesIO() - image_obj['image_obj'].save(image_data, format='PNG') - image_data.seek(0) - image_bytes = image_data.read() - req = oss_upload_image(bucket=bucket_name, object_name=object_name, image_bytes=image_bytes) - return image_obj['image_url'] - else: - mask_inverted = cv2.bitwise_not(image_obj['image_obj']) - # 将掩模的3通道转换为4通道,白色部分不透明,黑色部分透明 - rgba_image = cv2.cvtColor(mask_inverted, cv2.COLOR_BGR2BGRA) - rgba_image[rgba_image[:, :, 0] == 0] = [0, 0, 0, 0] - req = oss_upload_image(bucket=bucket_name, object_name=object_name, image_bytes=cv2.imencode('.png', rgba_image)[1]) - return image_obj['image_url'] - - -def update_base_size_priority(layers, size): - # 计算透明背景图片的宽度 - min_x = min(info['position'][1] for info in layers) - x_list = [] - for info in layers: - if info['image'] is not None: - x_list.append(info['position'][1] + info['image'].width) - max_x = max(x_list) - new_width = max_x - min_x - new_height = 700 - # 更新坐标 - for info in layers: - info['adaptive_position'] = (info['position'][0], info['position'][1] - min_x) - return layers, (new_width, new_height) diff --git a/app/service/design/utils/conversion_image.py b/app/service/design/utils/conversion_image.py deleted file mode 100644 index 11e39ae..0000000 --- a/app/service/design/utils/conversion_image.py +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env python -# -*- coding: UTF-8 -*- -""" -@Project :trinity_client -@File :conversion_image.py -@Author :周成融 -@Date :2023/8/21 10:40:29 -@detail : -""" -import numpy as np - - -# def rgb_to_rgba(rgb_size, rgb_image, mask): -# alpha_channel = np.full(rgb_size, 255, dtype=np.uint8) -# # 创建四通道的结果图像 -# rgba_image = np.dstack((rgb_image, alpha_channel)) -# alpha_channel = np.where(mask > 0, 255, 0) -# # 更新RGBA图像的透明度通道 -# rgba_image[:, :, 3] = alpha_channel -# return rgba_image - -def rgb_to_rgba(rgb_image, mask): - # 创建全透明的alpha通道 - alpha_channel = np.where(mask > 0, 255, 0).astype(np.uint8) - # 合并RGB图像和alpha通道 - rgba_image = np.dstack((rgb_image, alpha_channel)) - return rgba_image - - -if __name__ == '__main__': - image = open("") diff --git a/app/service/design/utils/design_ensemble.py b/app/service/design/utils/design_ensemble.py deleted file mode 100644 index f4f6a34..0000000 --- a/app/service/design/utils/design_ensemble.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/env python -# -*- coding: UTF-8 -*- -""" -@Project :trinity_client -@File :design_ensemble.py -@Author :周成融 -@Date :2023/8/16 19:36:21 -@detail :发起请求 获取推理结果 -""" -import logging - -import cv2 -import mmcv -import numpy as np -import torch -import torch.nn.functional as F -import tritonclient.http as httpclient - -from app.core.config import * - -""" - keypoint - 预处理 推理 后处理 -""" - - -def keypoint_preprocess(img_path): - img = mmcv.imread(img_path) - img_scale = (256, 256) - h, w = img.shape[:2] - img = cv2.resize(img, img_scale) - w_scale = img_scale[0] / w - h_scale = img_scale[1] / h - img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) - preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0) - return preprocessed_img, (w_scale, h_scale) - - -# @ RunTime -# 推理 -def get_keypoint_result(image, site): - keypoint_result = None - try: - image, scale_factor = keypoint_preprocess(image) - client = httpclient.InferenceServerClient(url=DESIGN_MODEL_URL) - transformed_img = image.astype(np.float32) - inputs = [httpclient.InferInput(f"input", transformed_img.shape, datatype="FP32")] - inputs[0].set_data_from_numpy(transformed_img, binary_data=True) - outputs = [httpclient.InferRequestedOutput(f"output", binary_data=True)] - results = client.infer(model_name=f"keypoint_{site}_ocrnet_hr18", inputs=inputs, outputs=outputs) - inference_output = torch.from_numpy(results.as_numpy(f'output')) - keypoint_result = keypoint_postprocess(inference_output, scale_factor) - except Exception as e: - logging.warning(f"get_keypoint_result : {e}") - return keypoint_result - - -def keypoint_postprocess(output, scale_factor): - max_indices = torch.argmax(output.view(output.size(0), output.size(1), -1), dim=2).unsqueeze(dim=2) - max_coords = torch.cat((max_indices / output.size(3), max_indices % output.size(3)), dim=2) - segment_result = max_coords.numpy() - scale_factor = [1 / x for x in scale_factor[::-1]] - scale_matrix = np.diag(scale_factor) - nan = np.isinf(scale_matrix) - scale_matrix[nan] = 0 - return np.ceil(np.dot(segment_result, scale_matrix) * 4) - - -""" - seg - 预处理 推理 后处理 -""" - - -# KNet -def seg_preprocess(img_path): - img = mmcv.imread(img_path) - ori_shape = img.shape[:2] - img_scale_w, img_scale_h = ori_shape - if ori_shape[0] > 1024: - img_scale_w = 1024 - if ori_shape[1] > 1024: - img_scale_h = 1024 - # 如果图片size任意一边 大于 1024, 则会resize 成1024 - if ori_shape != (img_scale_w, img_scale_h): - # mmcv.imresize(img, img_scale_h, img_scale_w) # 老代码 引以为戒!哈哈哈~ h和w写反了 - img = cv2.resize(img, (img_scale_h, img_scale_w)) - img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) - preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0) - return preprocessed_img, ori_shape - - -# @ RunTime -def get_seg_result(image_id, image): - image, ori_shape = seg_preprocess(image) - client = httpclient.InferenceServerClient(url=f"{DESIGN_MODEL_URL}") - transformed_img = image.astype(np.float32) - # 输入集 - inputs = [ - httpclient.InferInput(SEGMENTATION['input'], transformed_img.shape, datatype="FP32") - ] - inputs[0].set_data_from_numpy(transformed_img, binary_data=True) - # 输出集 - outputs = [ - httpclient.InferRequestedOutput(SEGMENTATION['output'], binary_data=True), - ] - results = client.infer(model_name=SEGMENTATION['new_model_name'], inputs=inputs, outputs=outputs) - # 推理 - # 取结果 - inference_output1 = results.as_numpy(SEGMENTATION['output']) - seg_result = seg_postprocess(int(image_id), inference_output1, ori_shape) - return seg_result - - -# no cache -def seg_postprocess(image_id, output, ori_shape): - seg_logit = F.interpolate(torch.tensor(output).float(), size=ori_shape, scale_factor=None, mode='bilinear', align_corners=False) - seg_pred = seg_logit.cpu().numpy() - return seg_pred[0] - - -def key_point_show(image_path, key_point_result=None): - img = cv2.imread(image_path) - points_list = key_point_result - point_size = 1 - point_color = (0, 0, 255) # BGR - thickness = 4 # 可以为 0 、4、8 - for point in points_list: - cv2.circle(img, point[::-1], point_size, point_color, thickness) - cv2.imshow("0", img) - cv2.waitKey(0) - - -if __name__ == '__main__': - image = cv2.imread("9070101c-e5be-49b5-9602-4113a968969b.png") - a = get_keypoint_result(image, "up") - new_list = [] - print(list) - for i in a[0]: - new_list.append((int(i[0]), int(i[1]))) - key_point_show("9070101c-e5be-49b5-9602-4113a968969b.png", new_list) - # a = get_seg_result(1, image) - print(a) diff --git a/app/service/design/utils/redis_utils.py b/app/service/design/utils/redis_utils.py deleted file mode 100644 index 012fbe0..0000000 --- a/app/service/design/utils/redis_utils.py +++ /dev/null @@ -1,99 +0,0 @@ -import redis - -from app.core.config import REDIS_HOST, REDIS_PORT - - -class Redis(object): - """ - redis数据库操作 - """ - - @staticmethod - def _get_r(): - host = REDIS_HOST - port = REDIS_PORT - db = 0 - r = redis.StrictRedis(host, port, db) - return r - - @classmethod - def write(cls, key, value, expire=None): - """ - 写入键值对 - """ - # 判断是否有过期时间,没有就设置默认值 - if expire: - expire_in_seconds = expire - else: - expire_in_seconds = 100 - r = cls._get_r() - r.set(key, value, ex=expire_in_seconds) - - @classmethod - def read(cls, key): - """ - 读取键值对内容 - """ - r = cls._get_r() - value = r.get(key) - return value.decode('utf-8') if value else value - - @classmethod - def hset(cls, name, key, value): - """ - 写入hash表 - """ - r = cls._get_r() - r.hset(name, key, value) - - @classmethod - def hget(cls, name, key): - """ - 读取指定hash表的键值 - """ - r = cls._get_r() - value = r.hget(name, key) - return value.decode('utf-8') if value else value - - @classmethod - def hgetall(cls, name): - """ - 获取指定hash表所有的值 - """ - r = cls._get_r() - return r.hgetall(name) - - @classmethod - def delete(cls, *names): - """ - 删除一个或者多个 - """ - r = cls._get_r() - r.delete(*names) - - @classmethod - def hdel(cls, name, key): - """ - 删除指定hash表的键值 - """ - r = cls._get_r() - r.hdel(name, key) - - @classmethod - def expire(cls, name, expire=None): - """ - 设置过期时间 - """ - if expire: - expire_in_seconds = expire - else: - expire_in_seconds = 100 - r = cls._get_r() - r.expire(name, expire_in_seconds) - - -if __name__ == '__main__': - redis_client = Redis() - # print(redis_client.write(key="1230", value=0)) - redis_client.write(key="1230", value=10) - # print(redis_client.read(key="1230")) diff --git a/app/service/design/utils/synthesis_item.py b/app/service/design/utils/synthesis_item.py deleted file mode 100644 index 03df2d9..0000000 --- a/app/service/design/utils/synthesis_item.py +++ /dev/null @@ -1,181 +0,0 @@ -#!/usr/bin/env python -# -*- coding: UTF-8 -*- -""" -@Project :trinity_client -@File :synthesis_item.py -@Author :周成融 -@Date :2023/8/26 14:13:04 -@detail : -""" -import io -import logging - -import cv2 -import numpy as np -from PIL import Image - -from app.service.utils.generate_uuid import generate_uuid -from app.service.utils.oss_client import oss_upload_image - - -def positioning(all_mask_shape, mask_shape, offset): - all_start = 0 - all_end = 0 - mask_start = 0 - mask_end = 0 - if offset == 0: - all_start = 0 - all_end = min(all_mask_shape, mask_shape) - - mask_start = 0 - mask_end = min(all_mask_shape, mask_shape) - elif offset > 0: - all_start = min(offset, all_mask_shape) - all_end = min(offset + mask_shape, all_mask_shape) - - mask_start = 0 - mask_end = 0 if offset > all_mask_shape else min(all_mask_shape - offset, mask_shape) - elif offset < 0: - if abs(offset) > mask_shape: - all_start = 0 - all_end = 0 - else: - all_start = 0 - if mask_shape - abs(offset) > all_mask_shape: - all_end = min(mask_shape - abs(offset), all_mask_shape) - else: - all_end = mask_shape - abs(offset) - - if abs(offset) > mask_shape: - mask_start = mask_shape - mask_end = mask_shape - else: - mask_start = abs(offset) - if mask_shape - abs(offset) >= all_mask_shape: - mask_end = all_mask_shape + abs(offset) - else: - mask_end = mask_shape - return all_start, all_end, mask_start, mask_end - - -# @RunTime -def synthesis(data, size, basic_info): - # 创建底图 - base_image = Image.new('RGBA', size, (0, 0, 0, 0)) - try: - all_mask_shape = (size[1], size[0]) - body_mask = None - for d in data: - if d['name'] == 'body': - # 创建一个新的宽高透明图像, 把模特贴上去获取mask - transparent_image = Image.new("RGBA", size, (0, 0, 0, 0)) - transparent_image.paste(d['image'], (d['adaptive_position'][1], d['adaptive_position'][0]), d['image']) # 此处可变数组会被paste篡改值,所以使用下标获取position - body_mask = np.array(transparent_image.split()[3]) - - # 根据新的坐标获取新的肩点 - left_shoulder = [x + y for x, y in zip(basic_info['body_point_test']['shoulder_left'], [d['adaptive_position'][1], d['adaptive_position'][0]])] - right_shoulder = [x + y for x, y in zip(basic_info['body_point_test']['shoulder_right'], [d['adaptive_position'][1], d['adaptive_position'][0]])] - body_mask[:min(left_shoulder[1], right_shoulder[1]), left_shoulder[0]:right_shoulder[0]] = 255 - _, binary_body_mask = cv2.threshold(body_mask, 127, 255, cv2.THRESH_BINARY) - top_outer_mask = np.array(binary_body_mask) - bottom_outer_mask = np.array(binary_body_mask) - - top = True - bottom = True - i = len(data) - while i: - i -= 1 - if top and data[i]['name'] in ["blouse_front", "outwear_front", "dress_front", "tops_front"]: - top = False - mask_shape = data[i]['mask'].shape - y_offset, x_offset = data[i]['adaptive_position'] - # 初始化叠加区域的起始和结束位置 - all_y_start, all_y_end, mask_y_start, mask_y_end = positioning(all_mask_shape=all_mask_shape[0], mask_shape=mask_shape[0], offset=y_offset) - all_x_start, all_x_end, mask_x_start, mask_x_end = positioning(all_mask_shape=all_mask_shape[1], mask_shape=mask_shape[1], offset=x_offset) - # 将叠加区域赋值为相应的像素值 - _, sketch_mask = cv2.threshold(data[i]['mask'], 127, 255, cv2.THRESH_BINARY) - background = np.zeros_like(top_outer_mask) - background[all_y_start:all_y_end, all_x_start:all_x_end] = sketch_mask[mask_y_start:mask_y_end, mask_x_start:mask_x_end] - top_outer_mask = background + top_outer_mask - elif bottom and data[i]['name'] in ["trousers_front", "skirt_front", "bottoms_front", "dress_front"]: - bottom = False - mask_shape = data[i]['mask'].shape - y_offset, x_offset = data[i]['adaptive_position'] - # 初始化叠加区域的起始和结束位置 - all_y_start, all_y_end, mask_y_start, mask_y_end = positioning(all_mask_shape=all_mask_shape[0], mask_shape=mask_shape[0], offset=y_offset) - all_x_start, all_x_end, mask_x_start, mask_x_end = positioning(all_mask_shape=all_mask_shape[1], mask_shape=mask_shape[1], offset=x_offset) - # 将叠加区域赋值为相应的像素值 - _, sketch_mask = cv2.threshold(data[i]['mask'], 127, 255, cv2.THRESH_BINARY) - background = np.zeros_like(top_outer_mask) - background[all_y_start:all_y_end, all_x_start:all_x_end] = sketch_mask[mask_y_start:mask_y_end, mask_x_start:mask_x_end] - bottom_outer_mask = background + bottom_outer_mask - elif bottom is False and top is False: - break - - all_mask = cv2.bitwise_or(top_outer_mask, bottom_outer_mask) - - for layer in data: - if layer['image'] is not None: - if layer['name'] != "body": - test_image = Image.new('RGBA', size, (0, 0, 0, 0)) - test_image.paste(layer['image'], (layer['adaptive_position'][1], layer['adaptive_position'][0]), layer['image']) - mask_data = np.where(all_mask > 0, 255, 0).astype(np.uint8) - mask_alpha = Image.fromarray(mask_data) - cropped_image = Image.composite(test_image, Image.new("RGBA", test_image.size, (255, 255, 255, 0)), mask_alpha) - base_image.paste(test_image, (0, 0), cropped_image) # test_image 已经按照坐标贴到最大宽值的图片上 坐着这里坐标为00 - else: - base_image.paste(layer['image'], (layer['adaptive_position'][1], layer['adaptive_position'][0]), layer['image']) - - result_image = base_image - - image_data = io.BytesIO() - result_image.save(image_data, format='PNG') - image_data.seek(0) - - # oss upload - image_bytes = image_data.read() - bucket_name = "aida-results" - object_name = f'result_{generate_uuid()}.png' - req = oss_upload_image(bucket=bucket_name, object_name=object_name, image_bytes=image_bytes) - return f"{bucket_name}/{object_name}" - # return f"aida-results/{minio_client.put_object('aida-results', f'result_{generate_uuid()}.png', io.BytesIO(image_bytes), len(image_bytes), content_type='image/png').object_name}" - - # object_name = f'result_{generate_uuid()}.png' - # response = s3.put_object(Bucket="aida-results", Key=object_name, Body=data, ContentType='image/png') - # object_url = f"aida-results/{object_name}" - # if response['ResponseMetadata']['HTTPStatusCode'] == 200: - # return object_url - # else: - # return "" - - except Exception as e: - logging.warning(f"synthesis runtime exception : {e}") - - -def synthesis_single(front_image, back_image): - result_image = None - if front_image: - result_image = front_image - if back_image: - result_image.paste(back_image, (0, 0), back_image) - - # with io.BytesIO() as output: - # result_image.save(output, format='PNG') - # data = output.getvalue() - # object_name = f'result_{generate_uuid()}.png' - # response = s3.put_object(Bucket="aida-results", Key=object_name, Body=data, ContentType='image/png') - # object_url = f"aida-results/{object_name}" - # if response['ResponseMetadata']['HTTPStatusCode'] == 200: - # return object_url - # else: - # return "" - image_data = io.BytesIO() - result_image.save(image_data, format='PNG') - image_data.seek(0) - image_bytes = image_data.read() - # return f"aida-results/{minio_client.put_object('aida-results', f'result_{generate_uuid()}.png', io.BytesIO(image_bytes), len(image_bytes), content_type='image/png').object_name}" - # oss upload - bucket_name = 'aida-results' - object_name = f'result_{generate_uuid()}.png' - req = oss_upload_image(bucket=bucket_name, object_name=object_name, image_bytes=image_bytes) - return f"{bucket_name}/{object_name}" diff --git a/app/service/design_batch/design_batch_celery.py b/app/service/design_batch/design_batch_celery.py index f7be62e..2eff696 100644 --- a/app/service/design_batch/design_batch_celery.py +++ b/app/service/design_batch/design_batch_celery.py @@ -4,7 +4,7 @@ import threading from celery import Celery from minio import Minio -from app.core.config import * +from app.core.config import settings from app.service.design_batch.item import BodyItem, TopItem, BottomItem, OthersItem from app.service.design_batch.utils.MQ import publish_status from app.service.design_batch.utils.organize import organize_body, organize_clothing, organize_others @@ -12,12 +12,12 @@ from app.service.design_batch.utils.save_json import oss_upload_json from app.service.design_batch.utils.synthesis_item import update_base_size_priority, synthesis, synthesis_single id_lock = threading.Lock() -celery_app = Celery('tasks', broker=f'amqp://rabbit:123456@18.167.251.121:5672//', backend='rpc://', BROKER_CONNECTION_RETRY_ON_STARTUP=True) +celery_app = Celery('tasks', broker=f'amqp://{settings.MQ_USERNAME}:{settings.MQ_PASSWORD}@{settings.MQ_HOST}:{settings.MQ_PORT}//', backend='rpc://') celery_app.conf.worker_log_format = '%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s' celery_app.conf.worker_hijack_root_logger = False logging.getLogger('pika').setLevel(logging.WARNING) logger = logging.getLogger() -minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) print("start") @@ -51,10 +51,12 @@ def process_layer(item, layers): front_layer, back_layer = organize_others(item) layers.append(front_layer) layers.append(back_layer) + return None else: front_layer, back_layer = organize_clothing(item) layers.append(front_layer) layers.append(back_layer) + return None @celery_app.task @@ -76,12 +78,11 @@ def batch_design(objects_data, tasks_id, json_name): for item in object['items']: item_results.append(process_item(item, basic)) layers = [] - body_size = None for item in item_results: - body_size = process_layer(item, layers) + process_layer(item, layers) layers = sorted(layers, key=lambda s: s.get("priority", float('inf'))) - layers, new_size = update_base_size_priority(layers, body_size) + layers, new_size = update_base_size_priority(layers) for lay in layers: items_response['layers'].append({ diff --git a/app/service/design_batch/pipeline/back_perspective.py b/app/service/design_batch/pipeline/back_perspective.py index 5ddd37c..825a3cb 100644 --- a/app/service/design_batch/pipeline/back_perspective.py +++ b/app/service/design_batch/pipeline/back_perspective.py @@ -18,11 +18,11 @@ class BackPerspective: result['back_perspective_url'] = file_path return result else: - seg_result = get_seg_result("1", result['image'])[0] + seg_result = get_seg_result(result['image'])[0] elif result['name'] in ['blouse', 'outwear', 'dress', 'tops']: seg_result = result['seg_result'] else: - seg_result = get_seg_result("1", result['image'])[0] + seg_result = get_seg_result(result['image'])[0] m = self.thicken_contours_and_display(seg_result, thickness=10, color=(0, 0, 0)) back_sketch = result['image'].copy() @@ -34,7 +34,8 @@ class BackPerspective: result['back_perspective_url'] = f"{resp.bucket_name}/{resp.object_name}" return result - def thicken_contours_and_display(self, mask, thickness=10, color=(0, 0, 0)): + @staticmethod + def thicken_contours_and_display(mask, thickness=10, color=(0, 0, 0)): mask = mask.astype(np.uint8) * 255 # 查找轮廓 contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) @@ -48,9 +49,9 @@ class BackPerspective: # 在空白图像上绘制白色的轮廓 cv2.drawContours(blank, [contour], -1, 255, thickness=thick) # 找到轮廓的中心(可以用重心等方法近似) - M = cv2.moments(contour) - cx = int(M['m10'] / M['m00']) - cy = int(M['m01'] / M['m00']) + m = cv2.moments(contour) + cx = int(m['m10'] / m['m00']) + cy = int(m['m01'] / m['m00']) # 进行距离变换,离中心越近的值越小 dist_transform = cv2.distanceTransform(255 - blank, cv2.DIST_L2, 5) # 根据距离变换的值来决定是否保留像素,离中心近的像素更容易被保留 diff --git a/app/service/design_batch/pipeline/color.py b/app/service/design_batch/pipeline/color.py index d6c84e4..a7928db 100644 --- a/app/service/design_batch/pipeline/color.py +++ b/app/service/design_batch/pipeline/color.py @@ -79,9 +79,9 @@ class Color: def get_pattern(single_color): if single_color is None: raise False - R, G, B = single_color.split(' ') + r, g, b = single_color.split(' ') pattern = np.zeros([1, 1, 3], np.uint8) - pattern[0, 0, 0] = int(B) - pattern[0, 0, 1] = int(G) - pattern[0, 0, 2] = int(R) + pattern[0, 0, 0] = int(b) + pattern[0, 0, 1] = int(g) + pattern[0, 0, 2] = int(r) return pattern diff --git a/app/service/design_batch/pipeline/keypoint.py b/app/service/design_batch/pipeline/keypoint.py index 73d7586..2b2607a 100644 --- a/app/service/design_batch/pipeline/keypoint.py +++ b/app/service/design_batch/pipeline/keypoint.py @@ -3,7 +3,7 @@ import logging import numpy as np from pymilvus import MilvusClient -from app.core.config import * +from app.core.config import KEYPOINT_RESULT_TABLE_FIELD_SET, MILVUS_TABLE_KEYPOINT, settings from app.service.design_fast.utils.design_ensemble import get_keypoint_result from app.service.utils.decorator import ClassCallRunTime, RunTime @@ -21,12 +21,12 @@ class KeyPoint: def __call__(self, result): if result['name'] in ['blouse', 'skirt', 'dress', 'outwear', 'trousers', 'tops', 'bottoms']: # 查询是否有数据 且类别相同 相同则直接读 不同则推理后更新 # result['clothes_keypoint'] = self.infer_keypoint_result(result) - site = 'up' if result['name'] in ['blouse', 'outwear', 'dress', 'tops'] else 'down' + # 'up' if result['name'] in ['blouse', 'outwear', 'dress', 'tops'] else 'down' # keypoint_cache = search_keypoint_cache(result["image_id"], site) # keypoint_cache = self.keypoint_cache(result, site) keypoint_cache = False # 取消向量查询 直接过模型推理 - if keypoint_cache is False: + if not keypoint_cache: keypoint_infer_result, site = self.infer_keypoint_result(result) result['clothes_keypoint'] = self.save_keypoint_cache(result["image_id"], keypoint_infer_result, site) else: @@ -55,8 +55,8 @@ class KeyPoint: } ] try: - client = MilvusClient(uri=MILVUS_URL, token=MILVUS_TOKEN, db_name=MILVUS_ALIAS) - res = client.upsert(collection_name=MILVUS_TABLE_KEYPOINT, data=data) + client = MilvusClient(uri=settings.MILVUS_URL, token=settings.MILVUS_TOKEN, db_name=settings.MILVUS_ALIAS) + client.upsert(collection_name=MILVUS_TABLE_KEYPOINT, data=data) client.close() return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, result.reshape(12, 2).astype(int).tolist())) except Exception as e: @@ -79,7 +79,7 @@ class KeyPoint: ] try: - client = MilvusClient(uri=MILVUS_URL, token=MILVUS_TOKEN, db_name=MILVUS_ALIAS) + client = MilvusClient(uri=settings.MILVUS_URL, token=settings.MILVUS_TOKEN, db_name=settings.MILVUS_ALIAS) client.upsert( collection_name=MILVUS_TABLE_KEYPOINT, data=data @@ -92,7 +92,7 @@ class KeyPoint: @RunTime def keypoint_cache(self, result, site): try: - client = MilvusClient(uri=MILVUS_URL, token=MILVUS_TOKEN, db_name=MILVUS_ALIAS) + client = MilvusClient(uri=settings.MILVUS_URL, token=settings.MILVUS_TOKEN, db_name=settings.MILVUS_ALIAS) keypoint_id = result['image_id'] res = client.query( collection_name=MILVUS_TABLE_KEYPOINT, diff --git a/app/service/design_batch/pipeline/loading.py b/app/service/design_batch/pipeline/loading.py index 332f29a..90166d1 100644 --- a/app/service/design_batch/pipeline/loading.py +++ b/app/service/design_batch/pipeline/loading.py @@ -1,9 +1,6 @@ -import io import logging import cv2 -import numpy as np -from PIL import Image from app.service.utils.new_oss_client import oss_get_image diff --git a/app/service/design_batch/pipeline/print_painting.py b/app/service/design_batch/pipeline/print_painting.py index 1534f9c..fde05fb 100644 --- a/app/service/design_batch/pipeline/print_painting.py +++ b/app/service/design_batch/pipeline/print_painting.py @@ -9,6 +9,7 @@ from app.service.utils.new_oss_client import oss_get_image class PrintPainting: def __init__(self, minio_client): + self.random_seed = None self.minio_client = minio_client def __call__(self, result): @@ -408,7 +409,7 @@ class PrintPainting: change_mask = print_mask[start_h: length_h, start_w: length_w] # get real part into change mask _, change_mask = cv2.threshold(change_mask, 220, 255, cv2.THRESH_BINARY) - mask = cv2.bitwise_not(painting_dict['mask_inv_print']) + cv2.bitwise_not(painting_dict['mask_inv_print']) img_fg[start_h:start_h + painting_dict['dim_print_h'], start_w:start_w + painting_dict['dim_print_w'], :] = change_region clothes_mask_print = cv2.bitwise_not(print_mask) diff --git a/app/service/design_batch/pipeline/segmentation.py b/app/service/design_batch/pipeline/segmentation.py index 0c9c51e..9d619d7 100644 --- a/app/service/design_batch/pipeline/segmentation.py +++ b/app/service/design_batch/pipeline/segmentation.py @@ -4,7 +4,7 @@ import os import cv2 import numpy as np -from app.core.config import SEG_CACHE_PATH +from app.core.config import settings from app.service.design_fast.utils.design_ensemble import get_seg_result from app.service.utils.decorator import ClassCallRunTime from app.service.utils.new_oss_client import oss_get_image @@ -36,11 +36,11 @@ class Segmentation: # preview 过模型 不缓存 if "preview_submit" in result.keys() and result['preview_submit'] == "preview": # 推理获得seg 结果 - seg_result = get_seg_result(result["image_id"], result['image']) + seg_result = get_seg_result(result['image']) # submit 过模型 缓存 elif "preview_submit" in result.keys() and result['preview_submit'] == "submit": # 推理获得seg 结果 - seg_result = get_seg_result(result["image_id"], result['image']) + seg_result = get_seg_result(result['image']) self.save_seg_result(seg_result, result['image_id']) # null 正常流程 加载本地缓存 无缓存则过模型 else: @@ -49,7 +49,7 @@ class Segmentation: # 判断缓存和实际图片size是否相同 if not _ or result["image"].shape[:2] != seg_result.shape: # 推理获得seg 结果 - seg_result = get_seg_result(result["image_id"], result['image']) + seg_result = get_seg_result(result['image']) self.save_seg_result(seg_result, result['image_id']) result['seg_result'] = seg_result @@ -63,7 +63,7 @@ class Segmentation: @staticmethod def save_seg_result(seg_result, image_id): - file_path = f"{SEG_CACHE_PATH}{image_id}.npy" + file_path = f"{settings.SEG_CACHE_PATH}{image_id}.npy" try: np.save(file_path, seg_result) logger.debug(f"保存成功 :{os.path.abspath(file_path)}") @@ -72,7 +72,7 @@ class Segmentation: @staticmethod def load_seg_result(image_id): - file_path = f"{SEG_CACHE_PATH}{image_id}.npy" + file_path = f"{settings.SEG_CACHE_PATH}{image_id}.npy" # logger.info(f"load seg file name is :{SEG_CACHE_PATH}{image_id}.npy") try: seg_result = np.load(file_path) diff --git a/app/service/design_batch/pipeline/split.py b/app/service/design_batch/pipeline/split.py index 288381a..cc32e05 100644 --- a/app/service/design_batch/pipeline/split.py +++ b/app/service/design_batch/pipeline/split.py @@ -4,9 +4,7 @@ import logging import cv2 import numpy as np from PIL import Image -from cv2 import cvtColor, COLOR_BGR2RGBA -from app.core.config import AIDA_CLOTHING from app.service.design_fast.utils.conversion_image import rgb_to_rgba from app.service.design_fast.utils.transparent import sketch_to_transparent from app.service.design_fast.utils.upload_image import upload_png_mask @@ -40,7 +38,7 @@ class Split(object): result_front_image = np.zeros_like(rgba_image) front_mask = cv2.resize(front_mask, new_size) result_front_image[front_mask != 0] = rgba_image[front_mask != 0] - result_front_image_pil = Image.fromarray(cvtColor(result_front_image, COLOR_BGR2RGBA)) + result_front_image_pil = Image.fromarray(cv2.cvtColor(result_front_image, cv2.COLOR_BGR2RGBA)) if 'transparent' in result.keys(): # 用户自选区域transparent transparent = result['transparent'] @@ -98,21 +96,21 @@ class Split(object): result_back_image = np.zeros_like(rgba_image) back_mask = cv2.resize(back_mask, new_size) result_back_image[back_mask != 0] = rgba_image[back_mask != 0] - result_back_image_pil = Image.fromarray(cvtColor(result_back_image, COLOR_BGR2RGBA)) + result_back_image_pil = Image.fromarray(cv2.cvtColor(result_back_image, cv2.COLOR_BGR2RGBA)) result['back_image'], result["back_image_url"], _ = upload_png_mask(self.minio_client, result_back_image_pil, f'{generate_uuid()}', mask=None) mask_image[back_mask != 0] = [0, 255, 0] rbga_mask = rgb_to_rgba(mask_image, front_mask + back_mask) - mask_pil = Image.fromarray(cvtColor(rbga_mask.astype(np.uint8), COLOR_BGR2RGBA)) + mask_pil = Image.fromarray(cv2.cvtColor(rbga_mask.astype(np.uint8), cv2.COLOR_BGR2RGBA)) image_data = io.BytesIO() mask_pil.save(image_data, format='PNG') image_data.seek(0) image_bytes = image_data.read() - req = oss_upload_image(oss_client=self.minio_client, bucket=AIDA_CLOTHING, object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) + req = oss_upload_image(oss_client=self.minio_client, bucket="aida-clothing", object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) result['mask_url'] = req.bucket_name + "/" + req.object_name # 创建中间图层 result_pattern_image_rgba = rgb_to_rgba(result['pattern_image'], result['mask']) - result_pattern_image_pil = Image.fromarray(cvtColor(result_pattern_image_rgba, COLOR_BGR2RGBA)) + result_pattern_image_pil = Image.fromarray(cv2.cvtColor(result_pattern_image_rgba, cv2.COLOR_BGR2RGBA)) result['pattern_image'], result['pattern_image_url'], _ = upload_png_mask(self.minio_client, result_pattern_image_pil, f'{generate_uuid()}') return result except Exception as e: diff --git a/app/service/design_batch/utils/MQ.py b/app/service/design_batch/utils/MQ.py index 4fc839b..42cea41 100644 --- a/app/service/design_batch/utils/MQ.py +++ b/app/service/design_batch/utils/MQ.py @@ -2,16 +2,17 @@ import json import pika -from app.core.config import RABBITMQ_PARAMS, BATCH_DESIGN_RABBITMQ_QUEUES +from app.core.config import settings +from app.core.rabbit_mq_config import RABBITMQ_PARAMS def publish_status(task_id, progress, result): connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) channel = connection.channel() - channel.queue_declare(queue=BATCH_DESIGN_RABBITMQ_QUEUES, durable=True) + channel.queue_declare(queue=settings.BATCH_DESIGN_RABBITMQ_QUEUES, durable=True) message = {'task_id': task_id, 'progress': progress, "result": result} channel.basic_publish(exchange='', - routing_key=BATCH_DESIGN_RABBITMQ_QUEUES, + routing_key=settings.BATCH_DESIGN_RABBITMQ_QUEUES, body=json.dumps(message), properties=pika.BasicProperties( delivery_mode=2, diff --git a/app/service/design_batch/utils/design_ensemble.py b/app/service/design_batch/utils/design_ensemble.py index f4f6a34..193da0e 100644 --- a/app/service/design_batch/utils/design_ensemble.py +++ b/app/service/design_batch/utils/design_ensemble.py @@ -16,7 +16,7 @@ import torch import torch.nn.functional as F import tritonclient.http as httpclient -from app.core.config import * +from app.core.config import DESIGN_MODEL_URL, DESIGN_MODEL_NAME """ keypoint @@ -91,29 +91,29 @@ def seg_preprocess(img_path): # @ RunTime -def get_seg_result(image_id, image): +def get_seg_result(image): image, ori_shape = seg_preprocess(image) client = httpclient.InferenceServerClient(url=f"{DESIGN_MODEL_URL}") transformed_img = image.astype(np.float32) # 输入集 inputs = [ - httpclient.InferInput(SEGMENTATION['input'], transformed_img.shape, datatype="FP32") + httpclient.InferInput(DESIGN_MODEL_NAME, transformed_img.shape, datatype="FP32") ] inputs[0].set_data_from_numpy(transformed_img, binary_data=True) # 输出集 outputs = [ - httpclient.InferRequestedOutput(SEGMENTATION['output'], binary_data=True), + httpclient.InferRequestedOutput("seg_input__0", binary_data=True), ] - results = client.infer(model_name=SEGMENTATION['new_model_name'], inputs=inputs, outputs=outputs) + results = client.infer(model_name=DESIGN_MODEL_NAME, inputs=inputs, outputs=outputs) # 推理 # 取结果 - inference_output1 = results.as_numpy(SEGMENTATION['output']) - seg_result = seg_postprocess(int(image_id), inference_output1, ori_shape) + inference_output1 = results.as_numpy("seg_input__0") + seg_result = seg_postprocess(inference_output1, ori_shape) return seg_result # no cache -def seg_postprocess(image_id, output, ori_shape): +def seg_postprocess(output, ori_shape): seg_logit = F.interpolate(torch.tensor(output).float(), size=ori_shape, scale_factor=None, mode='bilinear', align_corners=False) seg_pred = seg_logit.cpu().numpy() return seg_pred[0] diff --git a/app/service/design_batch/utils/organize.py b/app/service/design_batch/utils/organize.py index 0550419..877b4a5 100644 --- a/app/service/design_batch/utils/organize.py +++ b/app/service/design_batch/utils/organize.py @@ -98,6 +98,8 @@ def calculate_start_point(keypoint_type, scale, clothes_point, body_point, offse """ Align left Args: + offset: + resize_scale: keypoint_type: string, "waistband" | "shoulder" | "ear_point" scale: float clothes_point: dict{'left': [x1, y1, z1], 'right': [x2, y2, z2]} diff --git a/app/service/design_batch/utils/progress.py b/app/service/design_batch/utils/progress.py index 0f2c9cf..e970639 100644 --- a/app/service/design_batch/utils/progress.py +++ b/app/service/design_batch/utils/progress.py @@ -1,6 +1,6 @@ import logging -from app.service.design_fast.utils.redis_utils import Redis +from app.service.utils.redis_utils import Redis logger = logging.getLogger(__name__) diff --git a/app/service/design_batch/utils/redis_utils.py b/app/service/design_batch/utils/redis_utils.py deleted file mode 100644 index 012fbe0..0000000 --- a/app/service/design_batch/utils/redis_utils.py +++ /dev/null @@ -1,99 +0,0 @@ -import redis - -from app.core.config import REDIS_HOST, REDIS_PORT - - -class Redis(object): - """ - redis数据库操作 - """ - - @staticmethod - def _get_r(): - host = REDIS_HOST - port = REDIS_PORT - db = 0 - r = redis.StrictRedis(host, port, db) - return r - - @classmethod - def write(cls, key, value, expire=None): - """ - 写入键值对 - """ - # 判断是否有过期时间,没有就设置默认值 - if expire: - expire_in_seconds = expire - else: - expire_in_seconds = 100 - r = cls._get_r() - r.set(key, value, ex=expire_in_seconds) - - @classmethod - def read(cls, key): - """ - 读取键值对内容 - """ - r = cls._get_r() - value = r.get(key) - return value.decode('utf-8') if value else value - - @classmethod - def hset(cls, name, key, value): - """ - 写入hash表 - """ - r = cls._get_r() - r.hset(name, key, value) - - @classmethod - def hget(cls, name, key): - """ - 读取指定hash表的键值 - """ - r = cls._get_r() - value = r.hget(name, key) - return value.decode('utf-8') if value else value - - @classmethod - def hgetall(cls, name): - """ - 获取指定hash表所有的值 - """ - r = cls._get_r() - return r.hgetall(name) - - @classmethod - def delete(cls, *names): - """ - 删除一个或者多个 - """ - r = cls._get_r() - r.delete(*names) - - @classmethod - def hdel(cls, name, key): - """ - 删除指定hash表的键值 - """ - r = cls._get_r() - r.hdel(name, key) - - @classmethod - def expire(cls, name, expire=None): - """ - 设置过期时间 - """ - if expire: - expire_in_seconds = expire - else: - expire_in_seconds = 100 - r = cls._get_r() - r.expire(name, expire_in_seconds) - - -if __name__ == '__main__': - redis_client = Redis() - # print(redis_client.write(key="1230", value=0)) - redis_client.write(key="1230", value=10) - # print(redis_client.read(key="1230")) diff --git a/app/service/design_batch/utils/synthesis_item.py b/app/service/design_batch/utils/synthesis_item.py index 272ab23..2b03077 100644 --- a/app/service/design_batch/utils/synthesis_item.py +++ b/app/service/design_batch/utils/synthesis_item.py @@ -13,9 +13,12 @@ import logging import cv2 import numpy as np from PIL import Image - +from minio import Minio +from app.core.config import settings from app.service.utils.generate_uuid import generate_uuid -from app.service.utils.oss_client import oss_upload_image +from app.service.utils.new_oss_client import oss_upload_image + +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) def positioning(all_mask_shape, mask_shape, offset): @@ -136,7 +139,7 @@ def synthesis(data, size, basic_info): image_bytes = image_data.read() bucket_name = "aida-results" object_name = f'result_{generate_uuid()}.png' - req = oss_upload_image(bucket=bucket_name, object_name=object_name, image_bytes=image_bytes) + oss_upload_image(oss_client=minio_client, bucket=bucket_name, object_name=object_name, image_bytes=image_bytes) return f"{bucket_name}/{object_name}" # return f"aida-results/{minio_client.put_object('aida-results', f'result_{generate_uuid()}.png', io.BytesIO(image_bytes), len(image_bytes), content_type='image/png').object_name}" @@ -177,11 +180,11 @@ def synthesis_single(front_image, back_image): # oss upload bucket_name = 'aida-results' object_name = f'result_{generate_uuid()}.png' - req = oss_upload_image(bucket=bucket_name, object_name=object_name, image_bytes=image_bytes) + oss_upload_image(oss_client=minio_client, bucket=bucket_name, object_name=object_name, image_bytes=image_bytes) return f"{bucket_name}/{object_name}" -def update_base_size_priority(layers, size): +def update_base_size_priority(layers): # 计算透明背景图片的宽度 min_x = min(info['position'][1] for info in layers) x_list = [] diff --git a/app/service/design_batch/utils/upload_image.py b/app/service/design_batch/utils/upload_image.py index 2c79f9f..3e3dd2c 100644 --- a/app/service/design_batch/utils/upload_image.py +++ b/app/service/design_batch/utils/upload_image.py @@ -12,7 +12,6 @@ import logging import cv2 -from app.core.config import * from app.service.utils.new_oss_client import oss_upload_image @@ -25,15 +24,15 @@ def upload_png_mask(minio_client, front_image, object_name, mask=None): # 将掩模的3通道转换为4通道,白色部分不透明,黑色部分透明 rgba_image = cv2.cvtColor(mask_inverted, cv2.COLOR_BGR2BGRA) rgba_image[rgba_image[:, :, 0] == 0] = [0, 0, 0, 0] - req = oss_upload_image(oss_client=minio_client, bucket=AIDA_CLOTHING, object_name=f"mask/mask_{object_name}.png", image_bytes=cv2.imencode('.png', rgba_image)[1]) - mask_url = f"{AIDA_CLOTHING}/mask/mask_{object_name}.png" + req = oss_upload_image(oss_client=minio_client, bucket="aida-clothing", object_name=f"mask/mask_{object_name}.png", image_bytes=cv2.imencode('.png', rgba_image)[1]) + mask_url = f"aida-clothing/mask/mask_{object_name}.png" image_data = io.BytesIO() front_image.save(image_data, format='PNG') image_data.seek(0) image_bytes = image_data.read() - req = oss_upload_image(oss_client=minio_client, bucket=AIDA_CLOTHING, object_name=f"image/image_{object_name}.png", image_bytes=image_bytes) - image_url = f"{AIDA_CLOTHING}/image/image_{object_name}.png" + req = oss_upload_image(oss_client=minio_client, bucket="aida-clothing", object_name=f"image/image_{object_name}.png", image_bytes=image_bytes) + image_url = f"aida-clothing/image/image_{object_name}.png" return front_image, image_url, mask_url except Exception as e: logging.warning(f"upload_png_mask runtime exception : {e}") diff --git a/app/service/design_fast/design_generate.py b/app/service/design_fast/design_generate.py index eb0b8f9..43a21da 100644 --- a/app/service/design_fast/design_generate.py +++ b/app/service/design_fast/design_generate.py @@ -5,36 +5,60 @@ import time import requests from minio import Minio -from app.core.config import * -from app.service.design_fast.item import BodyItem, TopItem, BottomItem, OthersItem +from app.core.config import settings +from app.service.design_fast.item import BodyItem, TopItem, BottomItem, OthersItem, TopMergeItem, BottomMergeItem, OthersMergeItem from app.service.design_fast.utils.organize import organize_body, organize_clothing, organize_others from app.service.design_fast.utils.progress import final_progress, update_progress -from app.service.design_fast.utils.synthesis_item import synthesis, synthesis_single, update_base_size_priority +from app.service.design_fast.utils.synthesis_item import synthesis, synthesis_single, update_base_size_priority, merge from app.service.utils.decorator import RunTime id_lock = threading.Lock() logger = logging.getLogger() -minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) -def process_item(item, basic): - # 处理project中单个item - if item['type'] == "Body": - body_server = BodyItem(data=item, basic=basic, minio_client=minio_client) - item_data = body_server.process() - elif item['type'].lower() in ['blouse', 'outwear', 'dress', 'tops']: - top_server = TopItem(data=item, basic=basic, minio_client=minio_client) - item_data = top_server.process() - elif item['type'].lower() in ['skirt', 'trousers', 'bottoms']: - bottom_server = BottomItem(data=item, basic=basic, minio_client=minio_client) - item_data = bottom_server.process() - elif item['type'].lower() in ['others']: - bottom_server = OthersItem(data=item, basic=basic, minio_client=minio_client) - item_data = bottom_server.process() +def process_item(item, basic, design_type): + # 1. 定义映射配置 + # key 为 item_type 的小写,value 为对应的处理类 + DESIGN_MAP = { + 'body': BodyItem, + 'blouse': TopItem, 'outwear': TopItem, + 'dress': TopItem, 'tops': TopItem, + 'skirt': BottomItem, 'trousers': BottomItem, + 'bottoms': BottomItem, + 'others': OthersItem + } + + MERGE_MAP = { + 'body_merge': BodyItem, + 'blouse_merge': TopMergeItem, 'outwear_merge': TopMergeItem, + 'dress_merge': TopMergeItem, 'tops_merge': TopMergeItem, + 'skirt_merge': BottomMergeItem, 'trousers_merge': BottomMergeItem, + 'bottoms_merge': BottomMergeItem, + 'others_merge': OthersMergeItem + } + + # 2. 根据 design_type 选择映射表 + mapping = MERGE_MAP if design_type == 'merge' else DESIGN_MAP + + if design_type == 'merge': + item_type_key = f"{item['type'].lower()}_merge" + elif design_type == 'default': + item_type_key = item['type'].lower() else: - raise NotImplementedError(f"Item type {item['type']} not implemented") + item_type_key = item['type'].lower() + + handler_class = mapping.get(item_type_key) + + if not handler_class: + raise NotImplementedError(f"Item type {item['type']} not implemented for design_type={design_type}") + + # 4. 统一实例化并执行 + # 注意:这里假设所有 Item 类构造函数签名一致 + server = handler_class(data=item, basic=basic, minio_client=minio_client) + item_data = server.process() return item_data @@ -44,14 +68,16 @@ def process_layer(item, layers): body_layer = organize_body(item) layers.append(body_layer) return item['body_image'].size - elif item['name'] == 'others': + elif item['name'] in ['others', 'others_merge']: front_layer, back_layer = organize_others(item) layers.append(front_layer) layers.append(back_layer) + return None else: front_layer, back_layer = organize_clothing(item) layers.append(front_layer) layers.append(back_layer) + return None @RunTime @@ -68,17 +94,17 @@ def design_generate(request_data): nonlocal active_threads basic = object['basic'] items_response = {'layers': [], 'objectSign': object['objectSign'] if 'objectSign' in object.keys() else ""} + design_type = basic.get('design_type', "default") if basic['single_overall'] == "overall": item_results = [] for item in object['items']: - item_results.append(process_item(item, basic)) + item_results.append(process_item(item, basic, design_type)) layers = [] - body_size = None for item in item_results: - body_size = process_layer(item, layers) + process_layer(item, layers) layers = sorted(layers, key=lambda s: s.get("priority", float('inf'))) - layers, new_size = update_base_size_priority(layers, body_size) + layers, new_size = update_base_size_priority(layers) # pattern_overall_image_url 、 pattern_print_image_url for lay in layers: items_response['layers'].append({ @@ -92,12 +118,19 @@ def design_generate(request_data): 'image_url': lay['image_url'] if 'image_url' in lay.keys() else None, 'pattern_overall_image_url': lay['pattern_overall_image_url'] if 'pattern_overall_image_url' in lay.keys() else None, 'pattern_print_image_url': lay['pattern_print_image_url'] if 'pattern_print_image_url' in lay.keys() else None, - + 'transpose': lay.get('transpose', None), + 'rotate': lay.get('rotate', None), # 'back_perspective_url': lay['back_perspective_url'] if 'back_perspective_url' in lay.keys() else None, }) - items_response['synthesis_url'] = synthesis(layers, new_size, basic) + if basic.get('design_type') == 'default': + items_response['synthesis_url'] = synthesis(layers, new_size, basic) + elif basic.get('design_type') == 'merge': + items_response['synthesis_url'] = merge(layers, new_size, basic) + else: + items_response['synthesis_url'] = synthesis(layers, new_size, basic) + else: - item_result = process_item(object['items'][0], basic) + item_result = process_item(object['items'][0], basic, design_type) items_response['layers'].append({ 'image_category': f"{item_result['name']}_front", 'image_size': item_result['back_image'].size if item_result['back_image'] else None, @@ -149,8 +182,9 @@ def design_generate_v2(request_data): request_id = request_data.requestId threads = [] - def process_object(step, object, callback_url): + def process_object(object, callback_url): basic = object['basic'] + design_type = basic.get('design_type', "default") items_response = { 'layers': [], 'objectSign': object['objectSign'] if 'objectSign' in object.keys() else "", @@ -159,14 +193,13 @@ def design_generate_v2(request_data): if basic['single_overall'] == "overall": item_results = [] for item in object['items']: - item_results.append(process_item(item, basic)) + item_results.append(process_item(item, basic, design_type)) layers = [] - body_size = None for item in item_results: - body_size = process_layer(item, layers) + process_layer(item, layers) layers = sorted(layers, key=lambda s: s.get("priority", float('inf'))) - layers, new_size = update_base_size_priority(layers, body_size) + layers, new_size = update_base_size_priority(layers) for lay in layers: items_response['layers'].append({ @@ -185,7 +218,7 @@ def design_generate_v2(request_data): }) items_response['synthesis_url'] = synthesis(layers, new_size, basic) else: - item_result = process_item(object['items'][0], basic) + item_result = process_item(object['items'][0], basic, design_type) items_response['layers'].append({ 'image_category': f"{item_result['name']}_front", 'image_size': item_result['back_image'].size if item_result['back_image'] else None, @@ -229,7 +262,7 @@ def design_generate_v2(request_data): logger.info(response.text) for step, object in enumerate(objects_data): - t = threading.Thread(target=process_object, args=(step, object, callback_url)) + t = threading.Thread(target=process_object, args=(object, callback_url)) threads.append(t) t.start() diff --git a/app/service/design_fast/item.py b/app/service/design_fast/item.py index dcad5f0..b629698 100644 --- a/app/service/design_fast/item.py +++ b/app/service/design_fast/item.py @@ -7,6 +7,7 @@ class BaseItem: self.result['name'] = data['type'].lower() self.result.pop("type") self.result.update(basic) + self.result['design_type'] = basic.get('design_type', None) class OthersItem(BaseItem): @@ -14,13 +15,7 @@ class OthersItem(BaseItem): super().__init__(data, basic) self.Others_pipeline = [ LoadImage(minio_client), - # KeyPoint(), - # ContourDetection(), Segmentation(minio_client), - # BackPerspective(minio_client), - Color(minio_client), - NoSegPrintPainting(minio_client), - PrintPainting(minio_client), Scaling(), Split(minio_client) ] @@ -74,6 +69,65 @@ class BottomItem(BaseItem): return self.result +"""merge""" + + +class OthersMergeItem(BaseItem): + def __init__(self, data, basic, minio_client): + super().__init__(data, basic) + self.Others_pipeline = [ + LoadImage(minio_client), + # KeyPoint(), + # ContourDetection(), + Segmentation(minio_client), + # BackPerspective(minio_client), + Color(minio_client), + # NoSegPrintPainting(minio_client), + # PrintPainting(minio_client), + Scaling(), + Split(minio_client) + ] + + def process(self): + for item in self.Others_pipeline: + self.result = item(self.result) + return self.result + + +class TopMergeItem(BaseItem): + def __init__(self, data, basic, minio_client): + super().__init__(data, basic) + self.top_pipeline = [ + LoadImage(minio_client), + KeyPoint(), + Segmentation(minio_client), + Scaling(), + Split(minio_client) + ] + + def process(self): + for item in self.top_pipeline: + self.result = item(self.result) + return self.result + + +class BottomMergeItem(BaseItem): + def __init__(self, data, basic, minio_client): + super().__init__(data, basic) + self.bottom_pipeline = [ + LoadImage(minio_client), + KeyPoint(), + Segmentation(minio_client), + Scaling(), + Split(minio_client) + ] + + def process(self): + for item in self.bottom_pipeline: + self.result = item(self.result) + return self.result + + class BodyItem(BaseItem): def __init__(self, data, basic, minio_client): super().__init__(data, basic) diff --git a/app/service/design/model_process_service.py b/app/service/design_fast/model_process_service.py similarity index 58% rename from app/service/design/model_process_service.py rename to app/service/design_fast/model_process_service.py index 076e04d..e97cb4b 100644 --- a/app/service/design/model_process_service.py +++ b/app/service/design_fast/model_process_service.py @@ -1,13 +1,18 @@ import io -from app.service.utils.oss_client import oss_get_image, oss_upload_image +from minio import Minio +from app.core.config import settings + +from app.service.utils.new_oss_client import oss_get_image, oss_upload_image + +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) def model_transpose(image_path): bucket = image_path.split("/", 1)[0] object_name = image_path.split("/", 1)[1] new_object_name = f'{object_name[:object_name.rfind(".")]}.png' - image = oss_get_image(bucket=bucket, object_name=object_name, data_type="PIL") + image = oss_get_image(oss_client=minio_client, bucket=bucket, object_name=object_name, data_type="PIL") image = image.convert("RGBA") data = image.getdata() # @@ -23,6 +28,6 @@ def model_transpose(image_path): image.save(image_data, format='PNG') image_data.seek(0) image_bytes = image_data.read() - oss_upload_image(bucket=bucket, object_name=new_object_name, image_bytes=image_bytes) + oss_upload_image(oss_client=minio_client, bucket=bucket, object_name=new_object_name, image_bytes=image_bytes) image_path = f"{bucket}/{new_object_name}" return image_path diff --git a/app/service/design_fast/pipeline/back_perspective.py b/app/service/design_fast/pipeline/back_perspective.py index 5ddd37c..a1022d4 100644 --- a/app/service/design_fast/pipeline/back_perspective.py +++ b/app/service/design_fast/pipeline/back_perspective.py @@ -18,11 +18,11 @@ class BackPerspective: result['back_perspective_url'] = file_path return result else: - seg_result = get_seg_result("1", result['image'])[0] + seg_result = get_seg_result(result['image'])[0] elif result['name'] in ['blouse', 'outwear', 'dress', 'tops']: seg_result = result['seg_result'] else: - seg_result = get_seg_result("1", result['image'])[0] + seg_result = get_seg_result(result['image'])[0] m = self.thicken_contours_and_display(seg_result, thickness=10, color=(0, 0, 0)) back_sketch = result['image'].copy() @@ -34,7 +34,8 @@ class BackPerspective: result['back_perspective_url'] = f"{resp.bucket_name}/{resp.object_name}" return result - def thicken_contours_and_display(self, mask, thickness=10, color=(0, 0, 0)): + @staticmethod + def thicken_contours_and_display(mask, thickness=10, color=(0, 0, 0)): mask = mask.astype(np.uint8) * 255 # 查找轮廓 contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) @@ -48,9 +49,9 @@ class BackPerspective: # 在空白图像上绘制白色的轮廓 cv2.drawContours(blank, [contour], -1, 255, thickness=thick) # 找到轮廓的中心(可以用重心等方法近似) - M = cv2.moments(contour) - cx = int(M['m10'] / M['m00']) - cy = int(M['m01'] / M['m00']) + m = cv2.moments(contour) + # cx = int(m['m10'] / m['m00']) + # cy = int(m['m01'] / m['m00']) # 进行距离变换,离中心越近的值越小 dist_transform = cv2.distanceTransform(255 - blank, cv2.DIST_L2, 5) # 根据距离变换的值来决定是否保留像素,离中心近的像素更容易被保留 diff --git a/app/service/design_fast/pipeline/color.py b/app/service/design_fast/pipeline/color.py index 99ba8eb..a184125 100644 --- a/app/service/design_fast/pipeline/color.py +++ b/app/service/design_fast/pipeline/color.py @@ -81,9 +81,9 @@ class Color: def get_pattern(single_color): if single_color is None: raise False - R, G, B = single_color.split(' ') + r, g, b = single_color.split(' ') pattern = np.zeros([1, 1, 3], np.uint8) - pattern[0, 0, 0] = int(B) - pattern[0, 0, 1] = int(G) - pattern[0, 0, 2] = int(R) + pattern[0, 0, 0] = int(b) + pattern[0, 0, 1] = int(g) + pattern[0, 0, 2] = int(r) return pattern diff --git a/app/service/design_fast/pipeline/keypoint.py b/app/service/design_fast/pipeline/keypoint.py index 73d7586..51f1fbc 100644 --- a/app/service/design_fast/pipeline/keypoint.py +++ b/app/service/design_fast/pipeline/keypoint.py @@ -1,9 +1,9 @@ import logging import numpy as np -from pymilvus import MilvusClient +# from pymilvus import MilvusClient -from app.core.config import * +from app.core.config import KEYPOINT_RESULT_TABLE_FIELD_SET, MILVUS_TABLE_KEYPOINT, settings from app.service.design_fast.utils.design_ensemble import get_keypoint_result from app.service.utils.decorator import ClassCallRunTime, RunTime @@ -21,12 +21,12 @@ class KeyPoint: def __call__(self, result): if result['name'] in ['blouse', 'skirt', 'dress', 'outwear', 'trousers', 'tops', 'bottoms']: # 查询是否有数据 且类别相同 相同则直接读 不同则推理后更新 # result['clothes_keypoint'] = self.infer_keypoint_result(result) - site = 'up' if result['name'] in ['blouse', 'outwear', 'dress', 'tops'] else 'down' + # 'up' if result['name'] in ['blouse', 'outwear', 'dress', 'tops'] else 'down' # keypoint_cache = search_keypoint_cache(result["image_id"], site) # keypoint_cache = self.keypoint_cache(result, site) keypoint_cache = False # 取消向量查询 直接过模型推理 - if keypoint_cache is False: + if not keypoint_cache: keypoint_infer_result, site = self.infer_keypoint_result(result) result['clothes_keypoint'] = self.save_keypoint_cache(result["image_id"], keypoint_infer_result, site) else: @@ -54,63 +54,64 @@ class KeyPoint: "keypoint_vector": result.tolist() } ] - try: - client = MilvusClient(uri=MILVUS_URL, token=MILVUS_TOKEN, db_name=MILVUS_ALIAS) - res = client.upsert(collection_name=MILVUS_TABLE_KEYPOINT, data=data) - client.close() - return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, result.reshape(12, 2).astype(int).tolist())) - except Exception as e: - logger.info(f"save keypoint cache milvus error : {e}") - return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, result.reshape(12, 2).astype(int).tolist())) + return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, result.reshape(12, 2).astype(int).tolist())) - @staticmethod - def update_keypoint_cache(keypoint_id, infer_result, search_result, site): - if site == "up": - # 需要的是up 即推理出来的是up 那么查询的就是down - result = np.concatenate([infer_result.flatten(), search_result[-4:]]) - else: - # 需要的是down 即推理出来的是down 那么查询的就是up - result = np.concatenate([search_result[:20], infer_result.flatten()]) - data = [ - {"keypoint_id": keypoint_id, - "keypoint_site": "all", - "keypoint_vector": result.tolist() - } - ] + # try: + # client = MilvusClient(uri=settings.MILVUS_URL, token=settings.MILVUS_TOKEN, db_name=settings.MILVUS_ALIAS) + # client.upsert(collection_name=MILVUS_TABLE_KEYPOINT, data=data) + # client.close() + # except Exception as e: + # logger.info(f"save keypoint cache milvus error : {e}") + # return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, result.reshape(12, 2).astype(int).tolist())) - try: - client = MilvusClient(uri=MILVUS_URL, token=MILVUS_TOKEN, db_name=MILVUS_ALIAS) - client.upsert( - collection_name=MILVUS_TABLE_KEYPOINT, - data=data - ) - return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, result.reshape(12, 2).astype(int).tolist())) - except Exception as e: - logger.info(f"save keypoint cache milvus error : {e}") - return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, result.reshape(12, 2).astype(int).tolist())) + # @staticmethod + # def update_keypoint_cache(keypoint_id, infer_result, search_result, site): + # if site == "up": + # # 需要的是up 即推理出来的是up 那么查询的就是down + # result = np.concatenate([infer_result.flatten(), search_result[-4:]]) + # else: + # # 需要的是down 即推理出来的是down 那么查询的就是up + # result = np.concatenate([search_result[:20], infer_result.flatten()]) + # data = [ + # {"keypoint_id": keypoint_id, + # "keypoint_site": "all", + # "keypoint_vector": result.tolist() + # } + # ] + # + # try: + # client = MilvusClient(uri=settings.MILVUS_URL, token=settings.MILVUS_TOKEN, db_name=settings.MILVUS_ALIAS) + # client.upsert( + # collection_name=MILVUS_TABLE_KEYPOINT, + # data=data + # ) + # return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, result.reshape(12, 2).astype(int).tolist())) + # except Exception as e: + # logger.info(f"save keypoint cache milvus error : {e}") + # return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, result.reshape(12, 2).astype(int).tolist())) - @RunTime - def keypoint_cache(self, result, site): - try: - client = MilvusClient(uri=MILVUS_URL, token=MILVUS_TOKEN, db_name=MILVUS_ALIAS) - keypoint_id = result['image_id'] - res = client.query( - collection_name=MILVUS_TABLE_KEYPOINT, - # ids=[keypoint_id], - filter=f"keypoint_id == {keypoint_id}", - output_fields=['keypoint_vector', 'keypoint_site'] - ) - if len(res) == 0: - # 没有结果 直接推理拿结果 并保存 - keypoint_infer_result, site = self.infer_keypoint_result(result) - return self.save_keypoint_cache(result['image_id'], keypoint_infer_result, site) - elif res[0]["keypoint_site"] == "all" or res[0]["keypoint_site"] == site: - # 需要的类型和查询的类型一致,或者查询的类型为all 则直接返回查询的结果 - return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, np.array(res[0]['keypoint_vector']).astype(int).reshape(12, 2).tolist())) - elif res[0]["keypoint_site"] != site: - # 需要的类型和查询到的不一致,则更新类型为all - keypoint_infer_result, site = self.infer_keypoint_result(result) - return self.update_keypoint_cache(result["image_id"], keypoint_infer_result, res[0]['keypoint_vector'], site) - except Exception as e: - logger.info(f"search keypoint cache milvus error {e}") - return False + # @RunTime + # def keypoint_cache(self, result, site): + # try: + # client = MilvusClient(uri=settings.MILVUS_URL, token=settings.MILVUS_TOKEN, db_name=settings.MILVUS_ALIAS) + # keypoint_id = result['image_id'] + # res = client.query( + # collection_name=MILVUS_TABLE_KEYPOINT, + # # ids=[keypoint_id], + # filter=f"keypoint_id == {keypoint_id}", + # output_fields=['keypoint_vector', 'keypoint_site'] + # ) + # if len(res) == 0: + # # 没有结果 直接推理拿结果 并保存 + # keypoint_infer_result, site = self.infer_keypoint_result(result) + # return self.save_keypoint_cache(result['image_id'], keypoint_infer_result, site) + # elif res[0]["keypoint_site"] == "all" or res[0]["keypoint_site"] == site: + # # 需要的类型和查询的类型一致,或者查询的类型为all 则直接返回查询的结果 + # return dict(zip(KEYPOINT_RESULT_TABLE_FIELD_SET, np.array(res[0]['keypoint_vector']).astype(int).reshape(12, 2).tolist())) + # elif res[0]["keypoint_site"] != site: + # # 需要的类型和查询到的不一致,则更新类型为all + # keypoint_infer_result, site = self.infer_keypoint_result(result) + # return self.update_keypoint_cache(result["image_id"], keypoint_infer_result, res[0]['keypoint_vector'], site) + # except Exception as e: + # logger.info(f"search keypoint cache milvus error {e}") + # return False diff --git a/app/service/design_fast/pipeline/loading.py b/app/service/design_fast/pipeline/loading.py index 7cc4296..88662fa 100644 --- a/app/service/design_fast/pipeline/loading.py +++ b/app/service/design_fast/pipeline/loading.py @@ -1,10 +1,7 @@ -import io import logging -import os - +from skimage.morphology import skeletonize import cv2 import numpy as np -from PIL import Image from app.service.utils.new_oss_client import oss_get_image @@ -38,41 +35,26 @@ class LoadImage: return cls.name def __call__(self, result): + if result.get("merge_image_path"): + result['merge_image'], _ = self.read_image(result['merge_image_path']) result['image'], result['pre_mask'] = self.read_image(result['path']) - # if 'extract_lines' in result.keys(): - # if result['extract_lines']: - # result['gray'] = self.get_lines(cv2.cvtColor(result['image'], cv2.COLOR_BGR2GRAY), result['path']) - # else: - # result['gray'] = cv2.cvtColor(result['image'], cv2.COLOR_BGR2GRAY) - # else: - # result['gray'] = cv2.cvtColor(result['image'], cv2.COLOR_BGR2GRAY) - - result['gray'] = self.get_lines(cv2.cvtColor(result['image'], cv2.COLOR_BGR2GRAY), result['path']) + result['gray'] = self.get_lines(cv2.cvtColor(result['image'], cv2.COLOR_BGR2GRAY)) result['keypoint'] = self.get_keypoint(result['name']) result['img_shape'] = result['image'].shape result['ori_shape'] = result['image'].shape return result - def get_lines(self, img, path): + @staticmethod + def get_lines(img): binary = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 25, 10) - - # 步骤2:细化边缘(可选,让线条更干净) - # kernel = np.ones((1, 1), np.uint8) - # clean = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel) - - thinned = cv2.ximgproc.thinning(binary, thinningType=cv2.ximgproc.THINNING_ZHANGSUEN) # thinning算法细化线条 - mask = thinned > 0 + binary_bool = binary > 0 + skeleton = skeletonize(binary_bool, method='zhang') + mask = skeleton result = np.ones_like(img) * 255 result[mask] = img[mask] - - # 步骤3:反转回 白底黑线 - # lines = cv2.bitwise_not(thinned) - # cv2.imwrite(os.path.join('/home/user/PycharmProjects/trinity_client_aida/test/lines_original_result_5', f"Original_{path.replace('/', '-')}.png"), img) - # cv2.imwrite(os.path.join('/home/user/PycharmProjects/trinity_client_aida/test/lines_original_result_5', f"Line_{path.replace('/', '-')}.png"), result) - return result def read_image(self, image_path): @@ -93,19 +75,19 @@ class LoadImage: @staticmethod def get_keypoint(name): - if name == 'blouse' or name == 'outwear' or name == 'dress' or name == 'tops': + if name in ['blouse', 'outwear', 'dress', 'tops', 'blouse_merge', 'outwear_merge', 'dress_merge', 'tops_merge']: keypoint = 'shoulder' - elif name == 'trousers' or name == 'skirt' or name == 'bottoms': + elif name in ['trousers', 'skirt', 'bottoms', 'trousers_merge', 'skirt_merge', 'bottoms_merge']: keypoint = 'waistband' - elif name == 'bag': + elif name in ['bag', 'bag_merge']: keypoint = 'hand_point' - elif name == 'shoes': + elif name in ['shoes', 'shoes_merge']: keypoint = 'toe' - elif name == 'hairstyle': + elif name in ['hairstyle', 'hairstyle_merge']: keypoint = 'head_point' - elif name == 'earring': + elif name in ['earring', 'earring_merge']: keypoint = 'ear_point' - elif name == 'others': + elif name in ['others', 'others_merge']: keypoint = "others" else: raise KeyError(f"{name} does not belong to item category list: blouse, outwear, dress, trousers, skirt, " diff --git a/app/service/design_fast/pipeline/no_seg_print_painting.py b/app/service/design_fast/pipeline/no_seg_print_painting.py index dcceaba..b6449db 100644 --- a/app/service/design_fast/pipeline/no_seg_print_painting.py +++ b/app/service/design_fast/pipeline/no_seg_print_painting.py @@ -20,16 +20,8 @@ class NoSegPrintPainting: if overall_print['print_path_list']: painting_dict = {'dim_image_h': result['pattern_image'].shape[0], 'dim_image_w': result['pattern_image'].shape[1]} - if "print_angle_list" in overall_print.keys() and overall_print['print_angle_list'][0] != 0: - painting_dict = self.painting_collection(painting_dict, overall_print, print_trigger=True) - painting_dict['tile_print'] = self.rotate_crop_image(img=painting_dict['tile_print'], angle=-overall_print['print_angle_list'][0], crop=True) - painting_dict['mask_inv_print'] = self.rotate_crop_image(img=painting_dict['mask_inv_print'], angle=-overall_print['print_angle_list'][0], crop=True) - - # resize 到sketch大小 - painting_dict['tile_print'] = self.resize_and_crop(img=painting_dict['tile_print'], target_width=painting_dict['dim_image_w'], target_height=painting_dict['dim_image_h']) - painting_dict['mask_inv_print'] = self.resize_and_crop(img=painting_dict['mask_inv_print'], target_width=painting_dict['dim_image_w'], target_height=painting_dict['dim_image_h']) - else: - painting_dict = self.painting_collection(painting_dict, overall_print, print_trigger=True, is_single=False) + # 获取平铺 + 旋转 的overall print + painting_dict = self.painting_collection(painting_dict, overall_print) result['no_seg_sketch_overall'] = result['no_seg_sketch_print'] = self.printpaint(result, painting_dict, print_=True) result['pattern_image'] = result['no_seg_sketch_overall'] @@ -150,7 +142,6 @@ class NoSegPrintPainting: temp_fg = np.expand_dims(result['mask'], axis=2).repeat(3, axis=2) tmp2 = (result['final_image'] * (temp_fg / 255)).astype(np.uint8) result['no_seg_sketch_print'] = cv2.add(tmp1, tmp2) - return result @staticmethod @@ -165,27 +156,21 @@ class NoSegPrintPainting: print_background = img1_bg + img2_fg return print_background - def painting_collection(self, painting_dict, print_dict, print_trigger=False, is_single=False): - if print_trigger: - print_ = self.get_print(print_dict) - painting_dict['Trigger'] = not is_single - painting_dict['location'] = print_['location'] - single_mask_inv_print = self.get_mask_inv(print_['image']) - dim_max = max(painting_dict['dim_image_h'], painting_dict['dim_image_w']) - dim_pattern = (int(dim_max * print_['scale'] / 5), int(dim_max * print_['scale'] / 5)) - if not is_single: - self.random_seed = random.randint(0, 1000) - # 如果print 模式为overall 且 有角度的话 , 组合的print为正方形,方便裁剪 - if "print_angle_list" in print_dict.keys() and print_dict['print_angle_list'][0] != 0: - painting_dict['mask_inv_print'] = self.tile_image(single_mask_inv_print, dim_pattern, print_['scale'], dim_max, dim_max, painting_dict['location'], trigger=True) - painting_dict['tile_print'] = self.tile_image(print_['image'], dim_pattern, print_['scale'], dim_max, dim_max, painting_dict['location'], trigger=True) - else: - painting_dict['mask_inv_print'] = self.tile_image(single_mask_inv_print, dim_pattern, print_['scale'], painting_dict['dim_image_h'], painting_dict['dim_image_w'], painting_dict['location'], trigger=True) - painting_dict['tile_print'] = self.tile_image(print_['image'], dim_pattern, print_['scale'], painting_dict['dim_image_h'], painting_dict['dim_image_w'], painting_dict['location'], trigger=True) - else: - painting_dict['mask_inv_print'] = self.tile_image(single_mask_inv_print, dim_pattern, print_['scale'], painting_dict['dim_image_h'], painting_dict['dim_image_w'], painting_dict['location']) - painting_dict['tile_print'] = self.tile_image(print_['image'], dim_pattern, print_['scale'], painting_dict['dim_image_h'], painting_dict['dim_image_w'], painting_dict['location']) - painting_dict['dim_print_h'], painting_dict['dim_print_w'] = dim_pattern + def painting_collection(self, painting_dict, print_dict): + print_ = self.get_print(print_dict) + painting_dict['location'] = print_['location'] + dim_max = max(painting_dict['dim_image_h'], painting_dict['dim_image_w']) + dim_pattern = (int(dim_max * print_['scale'] / 5), int(dim_max * print_['scale'] / 5)) + gap = print_dict.get('gap', [[0, 0]])[0] + painting_dict['tile_print'] = tile_image(pattern=print_['image'], + dim=dim_pattern, + gap_x=gap[0], + gap_y=gap[1], + canvas_h=painting_dict['dim_image_h'], + canvas_w=painting_dict['dim_image_w'], + location=painting_dict['location'], + angle=45) + painting_dict['mask_inv_print'] = np.zeros(painting_dict['tile_print'].shape[:2], dtype=np.uint8) return painting_dict def tile_image(self, pattern, dim, scale, dim_image_h, dim_image_w, location, trigger=False): @@ -219,33 +204,32 @@ class NoSegPrintPainting: @staticmethod def printpaint(result, painting_dict, print_=False): - - if print_ and painting_dict['Trigger']: + if print_: print_mask = cv2.bitwise_and(result['mask'], cv2.bitwise_not(painting_dict['mask_inv_print'])) img_fg = cv2.bitwise_and(painting_dict['tile_print'], painting_dict['tile_print'], mask=print_mask) else: print_mask = result['mask'] img_fg = result['final_image'] - if print_ and not painting_dict['Trigger']: - index_ = None - try: - index_ = len(painting_dict['location']) - except: - assert f'there must be parameter of location if choose IfSingle' - - for i in range(index_): - start_h, start_w = int(painting_dict['location'][i][1]), int(painting_dict['location'][i][0]) - - length_h = min(start_h + painting_dict['dim_print_h'], img_fg.shape[0]) - length_w = min(start_w + painting_dict['dim_print_w'], img_fg.shape[1]) - - change_region = img_fg[start_h: length_h, start_w: length_w, :] - # problem in change_mask - change_mask = print_mask[start_h: length_h, start_w: length_w] - # get real part into change mask - _, change_mask = cv2.threshold(change_mask, 220, 255, cv2.THRESH_BINARY) - mask = cv2.bitwise_not(painting_dict['mask_inv_print']) - img_fg[start_h:start_h + painting_dict['dim_print_h'], start_w:start_w + painting_dict['dim_print_w'], :] = change_region + # if print_ and not painting_dict['Trigger']: + # index_ = None + # try: + # index_ = len(painting_dict['location']) + # except: + # assert f'there must be parameter of location if choose IfSingle' + # + # for i in range(index_): + # start_h, start_w = int(painting_dict['location'][i][1]), int(painting_dict['location'][i][0]) + # + # length_h = min(start_h + painting_dict['dim_print_h'], img_fg.shape[0]) + # length_w = min(start_w + painting_dict['dim_print_w'], img_fg.shape[1]) + # + # change_region = img_fg[start_h: length_h, start_w: length_w, :] + # # problem in change_mask + # change_mask = print_mask[start_h: length_h, start_w: length_w] + # # get real part into change mask + # _, change_mask = cv2.threshold(change_mask, 220, 255, cv2.THRESH_BINARY) + # cv2.bitwise_not(painting_dict['mask_inv_print']) + # img_fg[start_h:start_h + painting_dict['dim_print_h'], start_w:start_w + painting_dict['dim_print_w'], :] = change_region clothes_mask_print = cv2.bitwise_not(print_mask) @@ -277,8 +261,6 @@ class NoSegPrintPainting: print_w = print_shape[1] print_h = print_shape[0] - random.seed(self.random_seed) - # 1.拿到偏移量后和resize后的print宽高取余 得到真正偏移量 # 偏移量增加2分之print.w 使坐标位于图中间 如果要位于左上角删除+ print_w // 2 即可 x_offset = print_w - int(location[0][1] % print_w) + print_w // 2 @@ -420,3 +402,96 @@ class NoSegPrintPainting: cropped_img = resized_img[start_y:start_y + target_height, :] return cropped_img + + +def tile_image(pattern, dim, gap_x, gap_y, canvas_h, canvas_w, location, angle=0): + """ + 按照指定的 X/Y 间距平铺印花,并支持旋转 + :param angle: 旋转角度 (度数, 逆时针) + """ + # 1. 确保输入是 RGBA + if pattern.shape[2] == 3: + pattern = cv2.cvtColor(pattern, cv2.COLOR_BGR2BGRA) + + # 2. 缩放与旋转印花 + resized_p = cv2.resize(pattern, dim, interpolation=cv2.INTER_AREA) + rotated_p = rotate_image(resized_p, angle) + p_h, p_w = rotated_p.shape[:2] + + # 3. 创建透明单元格 + cell_h, cell_w = p_h + gap_y, p_w + gap_x + unit_cell = np.zeros((cell_h, cell_w, 4), dtype=np.uint8) + unit_cell[:p_h, :p_w, :] = rotated_p + + # 4. 执行平铺 + tiles_y = (canvas_h // cell_h) + 2 + tiles_x = (canvas_w // cell_w) + 2 + full_tiled = np.tile(unit_cell, (tiles_y, tiles_x, 1)) + + # 5. 裁剪平铺层 + offset_x = int(location[0][1] % cell_w) + offset_y = int(location[0][0] % cell_h) + tiled_layer = full_tiled[offset_y: offset_y + canvas_h, + offset_x: offset_x + canvas_w] + + # 6. 创建纯白色背景并合成 + # 创建一个纯白色的 BGR 画布 + white_background = np.full((canvas_h, canvas_w, 3), 255, dtype=np.uint8) + + # 分离平铺层的颜色通道和 Alpha 通道 + tiled_bgr = tiled_layer[:, :, :3] + alpha_mask = tiled_layer[:, :, 3] / 255.0 # 归一化到 0-1 + alpha_mask = cv2.merge([alpha_mask, alpha_mask, alpha_mask]) # 扩展到 3 通道 + + # 执行 Alpha 混合:结果 = 平铺层 * alpha + 背景 * (1 - alpha) + result = (tiled_bgr * alpha_mask + white_background * (1 - alpha_mask)).astype(np.uint8) + + return result + + +def rotate_image(image, angle): + """ + 旋转图片并保持完整内容(自动扩大画布) + """ + if angle == 0: + return image + + (h, w) = image.shape[:2] + (cX, cY) = (w // 2, h // 2) + + # 获取旋转矩阵 + M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0) + + # 计算旋转后新边界的 sine 和 cosine + cos = np.abs(M[0, 0]) + sin = np.abs(M[0, 1]) + + # 计算新的画布尺寸 + nW = int((h * sin) + (w * cos)) + nH = int((h * cos) + (w * sin)) + + # 调整旋转矩阵以考虑平移 + M[0, 2] += (nW / 2) - cX + M[1, 2] += (nH / 2) - cY + + # 执行旋转 + return cv2.warpAffine(image, M, (nW, nH)) + + +def crop_image(image, image_size_h, image_size_w, location, print_shape): + print_w = print_shape[1] + print_h = print_shape[0] + + # 1.拿到偏移量后和resize后的print宽高取余 得到真正偏移量 + # 偏移量增加2分之print.w 使坐标位于图中间 如果要位于左上角删除+ print_w // 2 即可 + x_offset = print_w - int(location[0][1] % print_w) + print_w // 2 + y_offset = print_h - int(location[0][0] % print_h) + print_h // 2 + + # y_offset = int(location[0][0]) + # x_offset = int(location[0][1]) + + if len(image.shape) == 2: + image = image[x_offset: x_offset + image_size_h, y_offset: y_offset + image_size_w] + elif len(image.shape) == 3: + image = image[x_offset: x_offset + image_size_h, y_offset: y_offset + image_size_w, :] + return image diff --git a/app/service/design_fast/pipeline/print_painting.py b/app/service/design_fast/pipeline/print_painting.py index 2dff103..7b1b2ed 100644 --- a/app/service/design_fast/pipeline/print_painting.py +++ b/app/service/design_fast/pipeline/print_painting.py @@ -38,23 +38,14 @@ class PrintPainting: overall_print['location'][0] = [x * y for x, y in zip(overall_print['location'][0], result['resize_scale'])] painting_dict = {'dim_image_h': result['pattern_image'].shape[0], 'dim_image_w': result['pattern_image'].shape[1]} result['print_image'] = result['pattern_image'] - if "print_angle_list" in overall_print.keys() and overall_print['print_angle_list'][0] != 0: - painting_dict = self.painting_collection(painting_dict, overall_print, print_trigger=True) - painting_dict['tile_print'] = self.rotate_crop_image(img=painting_dict['tile_print'], angle=-overall_print['print_angle_list'][0], crop=True) - painting_dict['mask_inv_print'] = self.rotate_crop_image(img=painting_dict['mask_inv_print'], angle=-overall_print['print_angle_list'][0], crop=True) - - # resize 到sketch大小 - painting_dict['tile_print'] = self.resize_and_crop(img=painting_dict['tile_print'], target_width=painting_dict['dim_image_w'], target_height=painting_dict['dim_image_h']) - painting_dict['mask_inv_print'] = self.resize_and_crop(img=painting_dict['mask_inv_print'], target_width=painting_dict['dim_image_w'], target_height=painting_dict['dim_image_h']) - else: - painting_dict = self.painting_collection(painting_dict, overall_print, print_trigger=True, is_single=False) + # 获取平铺 + 旋转 的overall print + painting_dict = self.painting_collection(painting_dict, overall_print) result['print_image'] = self.printpaint(result, painting_dict, print_=True) result['single_image'] = result['final_image'] = result['pattern_image'] = result['print_image'] if single_print['print_path_list']: # 2025-9-19 印花调整 印花坐标按照sketch的缩放比调整 sketch_resize_scale = result['resize_scale'] - print_background = np.zeros((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), dtype=np.uint8) mask_background = np.zeros((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), dtype=np.uint8) for i in range(len(single_print['print_path_list'])): @@ -77,75 +68,6 @@ class PrintPainting: print_background = cv2.cvtColor(np.array(source_image_pil), cv2.COLOR_RGBA2BGR) mask_background = cv2.cvtColor(np.array(source_image_pil_mask), cv2.COLOR_RGBA2BGR) ret, mask_background = cv2.threshold(mask_background, 124, 255, cv2.THRESH_BINARY) - # else: - # mask = self.get_mask_inv(image) - # mask = np.expand_dims(mask, axis=2) - # mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) - # mask = cv2.bitwise_not(mask) - # - # mask = cv2.resize(mask, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1]))) - # image = cv2.resize(image, (int(result['final_image'].shape[1] * single_print['print_scale_list'][i][0]), int(result['final_image'].shape[0] * single_print['print_scale_list'][i][1]))) - # # 旋转后的坐标需要重新算 - # rotate_mask, _ = self.img_rotate(mask, single_print['print_angle_list'][i]) - # rotate_image, rotated_new_size = self.img_rotate(image, single_print['print_angle_list'][i]) - # # x, y = int(result['print']['location'][i][0] - rotated_new_size[0] - (rotate_mask.shape[0] - image.shape[0]) / 2), int(result['print']['location'][i][1] - rotated_new_size[1] - (rotate_mask.shape[1] - image.shape[1]) / 2) - # x, y = int(single_print['location'][i][0] - rotated_new_size[0]), int(single_print['location'][i][1] - rotated_new_size[1]) - # - # image_x = print_background.shape[1] # 底图宽 - # image_y = print_background.shape[0] # 底图高 - # print_x = rotate_image.shape[1] #印花宽 - # print_y = rotate_image.shape[0] #印花高 - # - # # 有bug - # # if x + print_x > image_x: - # # rotate_image = rotate_image[:, :x + print_x - image_x] - # # rotate_mask = rotate_mask[:, :x + print_x - image_x] - # # # - # # if y + print_y > image_y: - # # rotate_image = rotate_image[:y + print_y - image_y] - # # rotate_mask = rotate_mask[:y + print_y - image_y] - # - # # 不能是并行 - # # 当前第一轮的if (108以及115)是判断有没有过下界和右界。第二轮的是判断左上有没有超出。 如果这个样子的话,先裁了右边,再左移,region就会有问题 - # # 先挪 再判断 最后裁剪 - # - # # 如果print旋转了 或者 print贴边了 则需要判断 判断左界和上界是否小于0 - # if x <= 0: # 如果X轴偏移量小于0,说明印花需要被裁剪至合适大小 或当X轴偏移量大于印花宽度时,裁剪后的印花宽度为0 - # rotate_image = rotate_image[:, abs(x):] - # rotate_mask = rotate_mask[:, abs(x):] - # start_x = x = 0 - # else: - # start_x = x - # - # if y <= 0: # 如果X轴偏移量大于0,说明印花需要被裁剪至合适大小 或当Y轴偏移量大于印花宽度时,裁剪后的印花宽度为0 - # rotate_image = rotate_image[abs(y):, :] - # rotate_mask = rotate_mask[abs(y):, :] - # start_y = y = 0 - # else: - # start_y = y - # - # # ------------------ - # # 如果print-size大于image-size 则需要裁剪print - # - # if x + print_x > image_x: - # rotate_image = rotate_image[:, :image_x - x] - # rotate_mask = rotate_mask[:, :image_x - x] - # - # if y + print_y > image_y: - # rotate_image = rotate_image[:image_y - y, :] - # rotate_mask = rotate_mask[:image_y - y, :] - # - # # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = cv2.bitwise_xor(mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]], rotate_mask) - # # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = cv2.add(print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]], rotate_image) - # - # # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = rotate_mask - # # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = rotate_image - # mask_background = self.stack_prin(mask_background, result['pattern_image'], rotate_mask, start_y, y, start_x, x) - # print_background = self.stack_prin(print_background, result['pattern_image'], rotate_image, start_y, y, start_x, x) - - # gray_image = cv2.cvtColor(mask_background, cv2.COLOR_BGR2GRAY) - # print_background = cv2.bitwise_and(print_background, print_background, mask=gray_image) - print_mask = cv2.bitwise_and(result['mask'], cv2.cvtColor(mask_background, cv2.COLOR_BGR2GRAY)) img_fg = cv2.bitwise_or(print_background, print_background, mask=print_mask) img_bg = cv2.bitwise_and(result['pattern_image'], result['pattern_image'], mask=cv2.bitwise_not(print_mask)) @@ -165,7 +87,6 @@ class PrintPainting: if element_print['element_path_list']: # 2025-9-19 印花调整 印花坐标按照sketch的缩放比调整 sketch_resize_scale = result['resize_scale'] - print_background = np.zeros((result['final_image'].shape[0], result['final_image'].shape[1], 3), dtype=np.uint8) mask_background = np.zeros((result['final_image'].shape[0], result['final_image'].shape[1], 3), dtype=np.uint8) for i in range(len(element_print['element_path_list'])): @@ -206,20 +127,6 @@ class PrintPainting: print_x = rotate_image.shape[1] print_y = rotate_image.shape[0] - # 有bug - # if x + print_x > image_x: - # rotate_image = rotate_image[:, :x + print_x - image_x] - # rotate_mask = rotate_mask[:, :x + print_x - image_x] - # # - # if y + print_y > image_y: - # rotate_image = rotate_image[:y + print_y - image_y] - # rotate_mask = rotate_mask[:y + print_y - image_y] - - # 不能是并行 - # 当前第一轮的if (108以及115)是判断有没有过下界和右界。第二轮的是判断左上有没有超出。 如果这个样子的话,先裁了右边,再左移,region就会有问题 - # 先挪 再判断 最后裁剪 - - # 如果print旋转了 或者 print贴边了 则需要判断 判断左界和上界是否小于0 if x <= 0: rotate_image = rotate_image[:, -x:] rotate_mask = rotate_mask[:, -x:] @@ -234,9 +141,6 @@ class PrintPainting: else: start_y = y - # ------------------ - # 如果print-size大于image-size 则需要裁剪print - if x + print_x > image_x: rotate_image = rotate_image[:, :image_x - x] rotate_mask = rotate_mask[:, :image_x - x] @@ -245,11 +149,6 @@ class PrintPainting: rotate_image = rotate_image[:image_y - y, :] rotate_mask = rotate_mask[:image_y - y, :] - # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = cv2.bitwise_xor(mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]], rotate_mask) - # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = cv2.add(print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]], rotate_image) - - # mask_background[start_y:y + rotate_mask.shape[0], start_x:x + rotate_mask.shape[1]] = rotate_mask - # print_background[start_y:y + rotate_image.shape[0], start_x:x + rotate_image.shape[1]] = rotate_image mask_background = self.stack_prin(mask_background, result['pattern_image'], rotate_mask, start_y, y, start_x, x) print_background = self.stack_prin(print_background, result['pattern_image'], rotate_image, start_y, y, start_x, x) @@ -297,12 +196,8 @@ class PrintPainting: ret, mask_background = cv2.threshold(mask_background, 124, 255, cv2.THRESH_BINARY) print_mask = cv2.bitwise_and(result['mask'], cv2.cvtColor(mask_background, cv2.COLOR_BGR2GRAY)) img_fg = cv2.bitwise_or(print_background, print_background, mask=print_mask) - # TODO element 丢失信息 three_channel_image = cv2.merge([cv2.bitwise_not(print_mask), cv2.bitwise_not(print_mask), cv2.bitwise_not(print_mask)]) img_bg = cv2.bitwise_and(result['final_image'], three_channel_image) - # mask_mo = np.expand_dims(print_mask, axis=2).repeat(3, axis=2) - # gray_mo = np.expand_dims(result['gray'], axis=2).repeat(3, axis=2) - # img_fg = (img_fg * (mask_mo / 255) * (gray_mo / 255)).astype(np.uint8) result['final_image'] = cv2.add(img_bg, img_fg) canvas = np.full_like(result['final_image'], 255) temp_bg = np.expand_dims(cv2.bitwise_not(result['mask']), axis=2).repeat(3, axis=2) @@ -324,27 +219,21 @@ class PrintPainting: print_background = img1_bg + img2_fg return print_background - def painting_collection(self, painting_dict, print_dict, print_trigger=False, is_single=False): - if print_trigger: - print_ = self.get_print(print_dict) - painting_dict['Trigger'] = not is_single - painting_dict['location'] = print_['location'] - single_mask_inv_print = self.get_mask_inv(print_['image']) - dim_max = max(painting_dict['dim_image_h'], painting_dict['dim_image_w']) - dim_pattern = (int(dim_max * print_['scale'] / 5), int(dim_max * print_['scale'] / 5)) - if not is_single: - self.random_seed = random.randint(0, 1000) - # 如果print 模式为overall 且 有角度的话 , 组合的print为正方形,方便裁剪 - if "print_angle_list" in print_dict.keys() and print_dict['print_angle_list'][0] != 0: - painting_dict['mask_inv_print'] = self.tile_image(single_mask_inv_print, dim_pattern, print_['scale'], dim_max, dim_max, painting_dict['location'], trigger=True) - painting_dict['tile_print'] = self.tile_image(print_['image'], dim_pattern, print_['scale'], dim_max, dim_max, painting_dict['location'], trigger=True) - else: - painting_dict['mask_inv_print'] = self.tile_image(single_mask_inv_print, dim_pattern, print_['scale'], painting_dict['dim_image_h'], painting_dict['dim_image_w'], painting_dict['location'], trigger=True) - painting_dict['tile_print'] = self.tile_image(print_['image'], dim_pattern, print_['scale'], painting_dict['dim_image_h'], painting_dict['dim_image_w'], painting_dict['location'], trigger=True) - else: - painting_dict['mask_inv_print'] = self.tile_image(single_mask_inv_print, dim_pattern, print_['scale'], painting_dict['dim_image_h'], painting_dict['dim_image_w'], painting_dict['location']) - painting_dict['tile_print'] = self.tile_image(print_['image'], dim_pattern, print_['scale'], painting_dict['dim_image_h'], painting_dict['dim_image_w'], painting_dict['location']) - painting_dict['dim_print_h'], painting_dict['dim_print_w'] = dim_pattern + def painting_collection(self, painting_dict, print_dict): + print_ = self.get_print(print_dict) + painting_dict['location'] = print_['location'] + dim_max = max(painting_dict['dim_image_h'], painting_dict['dim_image_w']) + dim_pattern = (int(dim_max * print_['scale'] / 5), int(dim_max * print_['scale'] / 5)) + gap = print_dict.get('gap', [[0, 0]])[0] + painting_dict['tile_print'] = tile_image(pattern=print_['image'], + dim=dim_pattern, + gap_x=gap[0], + gap_y=gap[1], + canvas_h=painting_dict['dim_image_h'], + canvas_w=painting_dict['dim_image_w'], + location=painting_dict['location'], + angle=45) + painting_dict['mask_inv_print'] = np.zeros(painting_dict['tile_print'].shape[:2], dtype=np.uint8) return painting_dict def tile_image(self, pattern, dim, scale, dim_image_h, dim_image_w, location, trigger=False): @@ -373,51 +262,37 @@ class PrintPainting: mask_inv = cv2.inRange(print_tile, lower, upper) return mask_inv else: - # bg_color = cv2.cvtColor(print_, cv2.COLOR_BGR2LAB)[0][0] - # print_tile = cv2.cvtColor(print_, cv2.COLOR_BGR2LAB) - # bg_l, bg_a, bg_b = bg_color[0], bg_color[1], bg_color[2] - # bg_L_high, bg_L_low = self.get_low_high_lab(bg_l, L=True) - # bg_a_high, bg_a_low = self.get_low_high_lab(bg_a) - # bg_b_high, bg_b_low = self.get_low_high_lab(bg_b) - # lower = np.array([bg_L_low, bg_a_low, bg_b_low]) - # upper = np.array([bg_L_high, bg_a_high, bg_b_high]) - - # print_tile = cv2.cvtColor(print_, cv2.COLOR_BGR2LAB) - # mask_inv = cv2.cvtColor(print_tile, cv2.COLOR_BGR2GRAY) - - # mask_inv = cv2.cvtColor(print_, cv2.COLOR_BGR2GRAY) mask_inv = np.zeros(print_.shape[:2], dtype=np.uint8) return mask_inv @staticmethod def printpaint(result, painting_dict, print_=False): - - if print_ and painting_dict['Trigger']: + if print_: print_mask = cv2.bitwise_and(result['mask'], cv2.bitwise_not(painting_dict['mask_inv_print'])) img_fg = cv2.bitwise_and(painting_dict['tile_print'], painting_dict['tile_print'], mask=print_mask) else: print_mask = result['mask'] img_fg = result['final_image'] - if print_ and not painting_dict['Trigger']: - index_ = None - try: - index_ = len(painting_dict['location']) - except: - assert f'there must be parameter of location if choose IfSingle' - - for i in range(index_): - start_h, start_w = int(painting_dict['location'][i][1]), int(painting_dict['location'][i][0]) - - length_h = min(start_h + painting_dict['dim_print_h'], img_fg.shape[0]) - length_w = min(start_w + painting_dict['dim_print_w'], img_fg.shape[1]) - - change_region = img_fg[start_h: length_h, start_w: length_w, :] - # problem in change_mask - change_mask = print_mask[start_h: length_h, start_w: length_w] - # get real part into change mask - _, change_mask = cv2.threshold(change_mask, 220, 255, cv2.THRESH_BINARY) - mask = cv2.bitwise_not(painting_dict['mask_inv_print']) - img_fg[start_h:start_h + painting_dict['dim_print_h'], start_w:start_w + painting_dict['dim_print_w'], :] = change_region + # if print_ and not painting_dict['Trigger']: + # index_ = None + # try: + # index_ = len(painting_dict['location']) + # except: + # assert f'there must be parameter of location if choose IfSingle' + # + # for i in range(index_): + # start_h, start_w = int(painting_dict['location'][i][1]), int(painting_dict['location'][i][0]) + # + # length_h = min(start_h + painting_dict['dim_print_h'], img_fg.shape[0]) + # length_w = min(start_w + painting_dict['dim_print_w'], img_fg.shape[1]) + # + # change_region = img_fg[start_h: length_h, start_w: length_w, :] + # # problem in change_mask + # change_mask = print_mask[start_h: length_h, start_w: length_w] + # # get real part into change mask + # _, change_mask = cv2.threshold(change_mask, 220, 255, cv2.THRESH_BINARY) + # cv2.bitwise_not(painting_dict['mask_inv_print']) + # img_fg[start_h:start_h + painting_dict['dim_print_h'], start_w:start_w + painting_dict['dim_print_w'], :] = change_region clothes_mask_print = cv2.bitwise_not(print_mask) @@ -449,11 +324,6 @@ class PrintPainting: print_w = print_shape[1] print_h = print_shape[0] - random.seed(self.random_seed) - # logging.info(f'overall print location : {location}') - # x_offset = random.randint(0, image.shape[0] - image_size_h) - # y_offset = random.randint(0, image.shape[1] - image_size_w) - # 1.拿到偏移量后和resize后的print宽高取余 得到真正偏移量 # 偏移量增加2分之print.w 使坐标位于图中间 如果要位于左上角删除+ print_w // 2 即可 x_offset = print_w - int(location[0][1] % print_w) + print_w // 2 @@ -595,3 +465,96 @@ class PrintPainting: cropped_img = resized_img[start_y:start_y + target_height, :] return cropped_img + + +def tile_image(pattern, dim, gap_x, gap_y, canvas_h, canvas_w, location, angle=0): + """ + 按照指定的 X/Y 间距平铺印花,并支持旋转 + :param angle: 旋转角度 (度数, 逆时针) + """ + # 1. 确保输入是 RGBA + if pattern.shape[2] == 3: + pattern = cv2.cvtColor(pattern, cv2.COLOR_BGR2BGRA) + + # 2. 缩放与旋转印花 + resized_p = cv2.resize(pattern, dim, interpolation=cv2.INTER_AREA) + rotated_p = rotate_image(resized_p, angle) + p_h, p_w = rotated_p.shape[:2] + + # 3. 创建透明单元格 + cell_h, cell_w = p_h + gap_y, p_w + gap_x + unit_cell = np.zeros((cell_h, cell_w, 4), dtype=np.uint8) + unit_cell[:p_h, :p_w, :] = rotated_p + + # 4. 执行平铺 + tiles_y = (canvas_h // cell_h) + 2 + tiles_x = (canvas_w // cell_w) + 2 + full_tiled = np.tile(unit_cell, (tiles_y, tiles_x, 1)) + + # 5. 裁剪平铺层 + offset_x = int(location[0][1] % cell_w) + offset_y = int(location[0][0] % cell_h) + tiled_layer = full_tiled[offset_y: offset_y + canvas_h, + offset_x: offset_x + canvas_w] + + # 6. 创建纯白色背景并合成 + # 创建一个纯白色的 BGR 画布 + white_background = np.full((canvas_h, canvas_w, 3), 255, dtype=np.uint8) + + # 分离平铺层的颜色通道和 Alpha 通道 + tiled_bgr = tiled_layer[:, :, :3] + alpha_mask = tiled_layer[:, :, 3] / 255.0 # 归一化到 0-1 + alpha_mask = cv2.merge([alpha_mask, alpha_mask, alpha_mask]) # 扩展到 3 通道 + + # 执行 Alpha 混合:结果 = 平铺层 * alpha + 背景 * (1 - alpha) + result = (tiled_bgr * alpha_mask + white_background * (1 - alpha_mask)).astype(np.uint8) + + return result + + +def rotate_image(image, angle): + """ + 旋转图片并保持完整内容(自动扩大画布) + """ + if angle == 0: + return image + + (h, w) = image.shape[:2] + (cX, cY) = (w // 2, h // 2) + + # 获取旋转矩阵 + M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0) + + # 计算旋转后新边界的 sine 和 cosine + cos = np.abs(M[0, 0]) + sin = np.abs(M[0, 1]) + + # 计算新的画布尺寸 + nW = int((h * sin) + (w * cos)) + nH = int((h * cos) + (w * sin)) + + # 调整旋转矩阵以考虑平移 + M[0, 2] += (nW / 2) - cX + M[1, 2] += (nH / 2) - cY + + # 执行旋转 + return cv2.warpAffine(image, M, (nW, nH)) + + +def crop_image(image, image_size_h, image_size_w, location, print_shape): + print_w = print_shape[1] + print_h = print_shape[0] + + # 1.拿到偏移量后和resize后的print宽高取余 得到真正偏移量 + # 偏移量增加2分之print.w 使坐标位于图中间 如果要位于左上角删除+ print_w // 2 即可 + x_offset = print_w - int(location[0][1] % print_w) + print_w // 2 + y_offset = print_h - int(location[0][0] % print_h) + print_h // 2 + + # y_offset = int(location[0][0]) + # x_offset = int(location[0][1]) + + if len(image.shape) == 2: + image = image[x_offset: x_offset + image_size_h, y_offset: y_offset + image_size_w] + elif len(image.shape) == 3: + image = image[x_offset: x_offset + image_size_h, y_offset: y_offset + image_size_w, :] + return image diff --git a/app/service/design_fast/pipeline/segmentation.py b/app/service/design_fast/pipeline/segmentation.py index 0c9c51e..bdf75a0 100644 --- a/app/service/design_fast/pipeline/segmentation.py +++ b/app/service/design_fast/pipeline/segmentation.py @@ -4,7 +4,7 @@ import os import cv2 import numpy as np -from app.core.config import SEG_CACHE_PATH +from app.core.config import settings from app.service.design_fast.utils.design_ensemble import get_seg_result from app.service.utils.decorator import ClassCallRunTime from app.service.utils.new_oss_client import oss_get_image @@ -34,22 +34,22 @@ class Segmentation: result['mask'] = result['front_mask'] + result['back_mask'] else: # preview 过模型 不缓存 - if "preview_submit" in result.keys() and result['preview_submit'] == "preview": - # 推理获得seg 结果 - seg_result = get_seg_result(result["image_id"], result['image']) - # submit 过模型 缓存 - elif "preview_submit" in result.keys() and result['preview_submit'] == "submit": - # 推理获得seg 结果 - seg_result = get_seg_result(result["image_id"], result['image']) - self.save_seg_result(seg_result, result['image_id']) - # null 正常流程 加载本地缓存 无缓存则过模型 + if result.get("design_type", None) == "merge": + seg_result = get_seg_result(result['image']) + # 默认design 模式 - 过模型 缓存 + # elif result.get("design_type", None) == "submit": + # 推理获得seg 结果 + # seg_result = get_seg_result(result['image']) + # self.save_seg_result(seg_result, result['image_id']) + + # 默认模式- 加载模型,找不到则过模型推理,推理后保存到本地 else: # 本地查询seg 缓存是否存在 _, seg_result = self.load_seg_result(result["image_id"]) # 判断缓存和实际图片size是否相同 if not _ or result["image"].shape[:2] != seg_result.shape: # 推理获得seg 结果 - seg_result = get_seg_result(result["image_id"], result['image']) + seg_result = get_seg_result(result['image']) self.save_seg_result(seg_result, result['image_id']) result['seg_result'] = seg_result @@ -63,7 +63,7 @@ class Segmentation: @staticmethod def save_seg_result(seg_result, image_id): - file_path = f"{SEG_CACHE_PATH}{image_id}.npy" + file_path = f"{settings.SEG_CACHE_PATH}{image_id}.npy" try: np.save(file_path, seg_result) logger.debug(f"保存成功 :{os.path.abspath(file_path)}") @@ -72,7 +72,7 @@ class Segmentation: @staticmethod def load_seg_result(image_id): - file_path = f"{SEG_CACHE_PATH}{image_id}.npy" + file_path = f"{settings.SEG_CACHE_PATH}{image_id}.npy" # logger.info(f"load seg file name is :{SEG_CACHE_PATH}{image_id}.npy") try: seg_result = np.load(file_path) diff --git a/app/service/design_fast/pipeline/split.py b/app/service/design_fast/pipeline/split.py index 906fc93..192529e 100644 --- a/app/service/design_fast/pipeline/split.py +++ b/app/service/design_fast/pipeline/split.py @@ -4,9 +4,8 @@ import logging import cv2 import numpy as np from PIL import Image -from cv2 import cvtColor, COLOR_BGR2RGBA +from celery.bin.result import result -from app.core.config import AIDA_CLOTHING from app.service.design_fast.utils.conversion_image import rgb_to_rgba from app.service.design_fast.utils.transparent import sketch_to_transparent from app.service.design_fast.utils.upload_image import upload_png_mask @@ -21,112 +20,131 @@ class Split(object): def __call__(self, result): try: if result['name'] in ('outwear', 'dress', 'blouse', 'skirt', 'trousers', 'tops', 'bottoms', 'others'): - ori_front_mask = result['front_mask'].copy() - ori_back_mask = result['back_mask'].copy() + if result.get('design_type', None) == 'merge': + ori_front_mask = result['front_mask'].copy() + ori_back_mask = result['back_mask'].copy() - if result['resize_scale'][0] == 1.0 and result['resize_scale'][1] == 1.0: - front_mask = result['front_mask'] - back_mask = result['back_mask'] - else: - height, width = result['front_mask'].shape[:2] - new_width = int(width * result['resize_scale'][0]) - new_height = int(height * result['resize_scale'][1]) - - front_mask = cv2.resize(result['front_mask'], (new_width, new_height), interpolation=cv2.INTER_AREA) - back_mask = cv2.resize(result['back_mask'], (new_width, new_height), interpolation=cv2.INTER_AREA) - - rgba_image = rgb_to_rgba(result['final_image'], front_mask + back_mask) - new_size = (int(rgba_image.shape[1] * result["scale"]), int(rgba_image.shape[0] * result["scale"])) - rgba_image = cv2.resize(rgba_image, new_size, interpolation=cv2.INTER_AREA) - result_front_image = np.zeros_like(rgba_image) - front_mask = cv2.resize(front_mask, new_size, interpolation=cv2.INTER_AREA) - result_front_image[front_mask != 0] = rgba_image[front_mask != 0] - result_front_image_pil = Image.fromarray(cvtColor(result_front_image, COLOR_BGR2RGBA)) - if 'transparent' in result.keys(): - # 用户自选区域transparent - transparent = result['transparent'] - if transparent['mask_url'] is not None and transparent['mask_url'] != "": - # 预处理用户自选区mask - seg_mask = oss_get_image(oss_client=self.minio_client, bucket=transparent['mask_url'].split('/')[0], object_name=transparent['mask_url'][transparent['mask_url'].find('/') + 1:], data_type="cv2") - seg_mask = cv2.resize(seg_mask, new_size, interpolation=cv2.INTER_AREA) - # 转换颜色空间为 RGB(OpenCV 默认是 BGR) - image_rgb = cv2.cvtColor(seg_mask, cv2.COLOR_BGR2RGB) - - r, g, b = cv2.split(image_rgb) - blue_mask = b > r - - # 创建红色和绿色掩码 - transparent_mask = np.array(blue_mask, dtype=np.uint8) * 255 - result_front_image_pil = sketch_to_transparent(result_front_image_pil, transparent_mask, transparent["scale"]) + if result['resize_scale'][0] == 1.0 and result['resize_scale'][1] == 1.0: + front_mask = result['front_mask'] + back_mask = result['back_mask'] else: - result_front_image_pil = sketch_to_transparent(result_front_image_pil, front_mask, transparent["scale"]) - result['front_image'], result["front_image_url"], _ = upload_png_mask(self.minio_client, result_front_image_pil, f'{generate_uuid()}', mask=None) + height, width = result['front_mask'].shape[:2] + new_width = int(width * result['resize_scale'][0]) + new_height = int(height * result['resize_scale'][1]) - # 前片部分 (红图部分) - # height, width = front_mask.shape - # mask_image = np.zeros((height, width, 3)) - # mask_image[front_mask != 0] = [0, 0, 255] + front_mask = cv2.resize(result['front_mask'], (new_width, new_height), interpolation=cv2.INTER_AREA) + back_mask = cv2.resize(result['back_mask'], (new_width, new_height), interpolation=cv2.INTER_AREA) + result['merge_image'] = cv2.resize(result['merge_image'], (new_width, new_height), interpolation=cv2.INTER_AREA) - # 切换为原始图片尺寸------------------------------- - height, width = ori_front_mask.shape - mask_image = np.zeros((height, width, 3)) - mask_image[ori_front_mask != 0] = [0, 0, 255] - # ----------------------------------------------- + rgba_image = rgb_to_rgba(result['merge_image'], front_mask + back_mask) + new_size = (int(rgba_image.shape[1] * result["scale"]), int(rgba_image.shape[0] * result["scale"])) + rgba_image = cv2.resize(rgba_image, new_size, interpolation=cv2.INTER_AREA) + result_front_image = np.zeros_like(rgba_image) + front_mask = cv2.resize(front_mask, new_size, interpolation=cv2.INTER_AREA) + result_front_image[front_mask != 0] = rgba_image[front_mask != 0] + result_front_image_pil = Image.fromarray(cv2.cvtColor(result_front_image, cv2.COLOR_BGR2RGBA)) + result['front_image'], result["front_image_url"], _ = upload_png_mask(self.minio_client, result_front_image_pil, f'{generate_uuid()}', mask=None) - # if result["name"] in ('blouse', 'dress', 'outwear', 'tops'): - # result_back_image = np.zeros_like(rgba_image) - # back_mask = cv2.resize(back_mask, new_size, interpolation=cv2.INTER_AREA) - # result_back_image[back_mask != 0] = rgba_image[back_mask != 0] - # result_back_image_pil = Image.fromarray(cvtColor(result_back_image, COLOR_BGR2RGBA)) - # result['back_image'], result["back_image_url"], _ = upload_png_mask(self.minio_client, result_back_image_pil, f'{generate_uuid()}', mask=None) - # mask_image[back_mask != 0] = [0, 255, 0] - # - # rbga_mask = rgb_to_rgba(mask_image, front_mask + back_mask) - # mask_pil = Image.fromarray(cvtColor(rbga_mask.astype(np.uint8), COLOR_BGR2RGBA)) - # image_data = io.BytesIO() - # mask_pil.save(image_data, format='PNG') - # image_data.seek(0) - # image_bytes = image_data.read() - # req = oss_upload_image(oss_client=self.minio_client, bucket=AIDA_CLOTHING, object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) - # result['mask_url'] = req.bucket_name + "/" + req.object_name - # else: - # rbga_mask = rgb_to_rgba(mask_image, front_mask) - # mask_pil = Image.fromarray(cvtColor(rbga_mask.astype(np.uint8), COLOR_BGR2RGBA)) - # image_data = io.BytesIO() - # mask_pil.save(image_data, format='PNG') - # image_data.seek(0) - # image_bytes = image_data.read() - # req = oss_upload_image(oss_client=self.minio_client, bucket=AIDA_CLOTHING, object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) - # result['mask_url'] = req.bucket_name + "/" + req.object_name - # result['back_image'] = None - # result["back_image_url"] = None - # # result["back_mask_url"] = None - # # result['back_mask_image'] = None + height, width = ori_front_mask.shape + mask_image = np.zeros((height, width, 3)) + mask_image[ori_front_mask != 0] = [0, 0, 255] + mask_image[ori_back_mask != 0] = [0, 255, 0] + rbga_mask = rgb_to_rgba(mask_image, ori_front_mask + ori_back_mask) + mask_pil = Image.fromarray(cv2.cvtColor(rbga_mask.astype(np.uint8), cv2.COLOR_BGR2RGBA)) + image_data = io.BytesIO() + mask_pil.save(image_data, format='PNG') + image_data.seek(0) + image_bytes = image_data.read() + req = oss_upload_image(oss_client=self.minio_client, bucket="aida-clothing", object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) + result['mask_url'] = req.bucket_name + "/" + req.object_name - result_back_image = np.zeros_like(rgba_image) - back_mask = cv2.resize(back_mask, new_size, interpolation=cv2.INTER_AREA) - result_back_image[back_mask != 0] = rgba_image[back_mask != 0] - result_back_image_pil = Image.fromarray(cvtColor(result_back_image, COLOR_BGR2RGBA)) - result['back_image'], result["back_image_url"], _ = upload_png_mask(self.minio_client, result_back_image_pil, f'{generate_uuid()}', mask=None) - # mask_image[back_mask != 0] = [0, 255, 0] - mask_image[ori_back_mask != 0] = [0, 255, 0] + result_back_image = np.zeros_like(rgba_image) + back_mask = cv2.resize(back_mask, new_size, interpolation=cv2.INTER_AREA) + result_back_image[back_mask != 0] = rgba_image[back_mask != 0] + result_back_image_pil = Image.fromarray(cv2.cvtColor(result_back_image, cv2.COLOR_BGR2RGBA)) + result['back_image'], result["back_image_url"], _ = upload_png_mask(self.minio_client, result_back_image_pil, f'{generate_uuid()}', mask=None) + return result + else: + ori_front_mask = result['front_mask'].copy() + ori_back_mask = result['back_mask'].copy() - rbga_mask = rgb_to_rgba(mask_image, ori_front_mask + ori_back_mask) - mask_pil = Image.fromarray(cvtColor(rbga_mask.astype(np.uint8), COLOR_BGR2RGBA)) - image_data = io.BytesIO() - mask_pil.save(image_data, format='PNG') - image_data.seek(0) - image_bytes = image_data.read() - req = oss_upload_image(oss_client=self.minio_client, bucket=AIDA_CLOTHING, object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) - result['mask_url'] = req.bucket_name + "/" + req.object_name + if result['resize_scale'][0] == 1.0 and result['resize_scale'][1] == 1.0: + front_mask = result['front_mask'] + back_mask = result['back_mask'] + else: + height, width = result['front_mask'].shape[:2] + new_width = int(width * result['resize_scale'][0]) + new_height = int(height * result['resize_scale'][1]) + front_mask = cv2.resize(result['front_mask'], (new_width, new_height), interpolation=cv2.INTER_AREA) + back_mask = cv2.resize(result['back_mask'], (new_width, new_height), interpolation=cv2.INTER_AREA) + + rgba_image = rgb_to_rgba(result['final_image'], front_mask + back_mask) + new_size = (int(rgba_image.shape[1] * result["scale"]), int(rgba_image.shape[0] * result["scale"])) + rgba_image = cv2.resize(rgba_image, new_size, interpolation=cv2.INTER_AREA) + result_front_image = np.zeros_like(rgba_image) + front_mask = cv2.resize(front_mask, new_size, interpolation=cv2.INTER_AREA) + result_front_image[front_mask != 0] = rgba_image[front_mask != 0] + result_front_image_pil = Image.fromarray(cv2.cvtColor(result_front_image, cv2.COLOR_BGR2RGBA)) + if 'transparent' in result.keys(): + # 用户自选区域transparent + transparent = result['transparent'] + if transparent['mask_url'] is not None and transparent['mask_url'] != "": + # 预处理用户自选区mask + seg_mask = oss_get_image(oss_client=self.minio_client, bucket=transparent['mask_url'].split('/')[0], object_name=transparent['mask_url'][transparent['mask_url'].find('/') + 1:], data_type="cv2") + seg_mask = cv2.resize(seg_mask, new_size, interpolation=cv2.INTER_AREA) + # 转换颜色空间为 RGB(OpenCV 默认是 BGR) + image_rgb = cv2.cvtColor(seg_mask, cv2.COLOR_BGR2RGB) + + r, g, b = cv2.split(image_rgb) + blue_mask = b > r + + # 创建红色和绿色掩码 + transparent_mask = np.array(blue_mask, dtype=np.uint8) * 255 + result_front_image_pil = sketch_to_transparent(result_front_image_pil, transparent_mask, transparent["scale"]) + else: + result_front_image_pil = sketch_to_transparent(result_front_image_pil, front_mask, transparent["scale"]) + result['front_image'], result["front_image_url"], _ = upload_png_mask(self.minio_client, result_front_image_pil, f'{generate_uuid()}', mask=None) + + height, width = ori_front_mask.shape + mask_image = np.zeros((height, width, 3)) + mask_image[ori_front_mask != 0] = [0, 0, 255] + + result_back_image = np.zeros_like(rgba_image) + back_mask = cv2.resize(back_mask, new_size, interpolation=cv2.INTER_AREA) + result_back_image[back_mask != 0] = rgba_image[back_mask != 0] + result_back_image_pil = Image.fromarray(cv2.cvtColor(result_back_image, cv2.COLOR_BGR2RGBA)) + result['back_image'], result["back_image_url"], _ = upload_png_mask(self.minio_client, result_back_image_pil, f'{generate_uuid()}', mask=None) + + # mask_image[back_mask != 0] = [0, 255, 0] + mask_image[ori_back_mask != 0] = [0, 255, 0] + + rbga_mask = rgb_to_rgba(mask_image, ori_front_mask + ori_back_mask) + mask_pil = Image.fromarray(cv2.cvtColor(rbga_mask.astype(np.uint8), cv2.COLOR_BGR2RGBA)) + image_data = io.BytesIO() + mask_pil.save(image_data, format='PNG') + image_data.seek(0) + image_bytes = image_data.read() + req = oss_upload_image(oss_client=self.minio_client, bucket="aida-clothing", object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes) + result['mask_url'] = req.bucket_name + "/" + req.object_name + + # 创建中间图层(未分割图层) 1.color + overall_print 2.color + overall_print + print + result_pattern_overall_image_pil = Image.fromarray(cv2.cvtColor(rgb_to_rgba(result['no_seg_sketch_overall'], ori_front_mask + ori_back_mask), cv2.COLOR_BGR2RGBA)) + result['pattern_overall_image'], result['pattern_overall_image_url'], _ = upload_png_mask(self.minio_client, result_pattern_overall_image_pil, f'{generate_uuid()}') + + result_pattern_print_image_pil = Image.fromarray(cv2.cvtColor(rgb_to_rgba(result['no_seg_sketch_print'], ori_front_mask + ori_back_mask), cv2.COLOR_BGR2RGBA)) + result['pattern_print_image'], result['pattern_print_image_url'], _ = upload_png_mask(self.minio_client, result_pattern_print_image_pil, f'{generate_uuid()}') + return result + else: + ori_front_mask, ori_back_mask = None, None # 创建中间图层(未分割图层) 1.color + overall_print 2.color + overall_print + print - result_pattern_overall_image_pil = Image.fromarray(cvtColor(rgb_to_rgba(result['no_seg_sketch_overall'], ori_front_mask + ori_back_mask), COLOR_BGR2RGBA)) + result_pattern_overall_image_pil = Image.fromarray(cv2.cvtColor(rgb_to_rgba(result['no_seg_sketch_overall'], ori_front_mask + ori_back_mask), cv2.COLOR_BGR2RGBA)) result['pattern_overall_image'], result['pattern_overall_image_url'], _ = upload_png_mask(self.minio_client, result_pattern_overall_image_pil, f'{generate_uuid()}') - result_pattern_print_image_pil = Image.fromarray(cvtColor(rgb_to_rgba(result['no_seg_sketch_print'], ori_front_mask + ori_back_mask), COLOR_BGR2RGBA)) + result_pattern_print_image_pil = Image.fromarray(cv2.cvtColor(rgb_to_rgba(result['no_seg_sketch_print'], ori_front_mask + ori_back_mask), cv2.COLOR_BGR2RGBA)) result['pattern_print_image'], result['pattern_print_image_url'], _ = upload_png_mask(self.minio_client, result_pattern_print_image_pil, f'{generate_uuid()}') return result + except Exception as e: logging.warning(f"split runtime exception : {e} image_id : {result['image_id']}") diff --git a/app/service/design_fast/utils/design_ensemble.py b/app/service/design_fast/utils/design_ensemble.py index 8eef4f2..9aa674c 100644 --- a/app/service/design_fast/utils/design_ensemble.py +++ b/app/service/design_fast/utils/design_ensemble.py @@ -15,7 +15,7 @@ import numpy as np import torch import tritonclient.http as httpclient -from app.core.config import * +from app.core.config import DESIGN_MODEL_URL, DESIGN_MODEL_NAME """ keypoint @@ -98,29 +98,29 @@ def seg_preprocess(img_path): # @ RunTime -def get_seg_result(image_id, image): +def get_seg_result(image): image, ori_shape = seg_preprocess(image) - client = httpclient.InferenceServerClient(url=f"{DESIGN_MODEL_URL}") + client = httpclient.InferenceServerClient(url=DESIGN_MODEL_URL) transformed_img = image.astype(np.float32) # 输入集 inputs = [ - httpclient.InferInput(SEGMENTATION['input'], transformed_img.shape, datatype="FP32") + httpclient.InferInput("seg_input__0", transformed_img.shape, datatype="FP32") ] inputs[0].set_data_from_numpy(transformed_img, binary_data=True) # 输出集 outputs = [ - httpclient.InferRequestedOutput(SEGMENTATION['output'], binary_data=True), + httpclient.InferRequestedOutput("seg_output__0", binary_data=True), ] - results = client.infer(model_name=SEGMENTATION['new_model_name'], inputs=inputs, outputs=outputs) + results = client.infer(model_name=DESIGN_MODEL_NAME, inputs=inputs, outputs=outputs) # 推理 # 取结果 - inference_output1 = results.as_numpy(SEGMENTATION['output']) - seg_result = seg_postprocess(int(image_id), inference_output1, ori_shape) + inference_output1 = results.as_numpy("seg_output__0") + seg_result = seg_postprocess(inference_output1, ori_shape) return seg_result # no cache -def seg_postprocess(image_id, output, ori_shape): +def seg_postprocess(output, ori_shape): seg_logit = cv2.resize(output[0][0].astype(np.uint8), (ori_shape[1] + 50, ori_shape[0] + 50)) seg_logit = seg_logit[25: - 25, 25: - 25] return seg_logit diff --git a/app/service/design_fast/utils/organize.py b/app/service/design_fast/utils/organize.py index ce4a961..82e8026 100644 --- a/app/service/design_fast/utils/organize.py +++ b/app/service/design_fast/utils/organize.py @@ -23,20 +23,23 @@ def organize_clothing(layer): front_layer = dict(priority=layer['priority'] if layer.get("layer_order", False) else PRIORITY_DICT.get(f'{layer["name"].lower()}_front', None), name=f'{layer["name"].lower()}_front', image=layer["front_image"], + merge_image=layer["front_image"], # mask_image=layer['front_mask_image'], image_url=layer['front_image_url'], - mask_url=layer['mask_url'], + mask_url=layer.get("mask_url", None), sacle=layer['scale'], clothes_keypoint=layer['clothes_keypoint'], position=start_point, resize_scale=layer["resize_scale"], mask=cv2.resize(layer['mask'], layer["front_image"].size), gradient_string=layer['gradient_string'] if 'gradient_string' in layer.keys() else "", - pattern_overall_image_url=layer['pattern_overall_image_url'], - pattern_print_image_url=layer['pattern_print_image_url'], + pattern_overall_image_url=layer.get('pattern_overall_image_url', None), + pattern_print_image_url=layer.get('pattern_print_image_url', None), - pattern_image=layer['pattern_image'], + pattern_image=layer.get('pattern_image', None), # back_perspective_url=layer['back_perspective_url'] if 'back_perspective_url' in layer.keys() else "" + transpose=layer.get("transpose", [1, 1]), # 默认为1, 1代表不镜像 + rotate=layer.get('rotate', 0), ) # 后片数据 back_layer = dict(priority=-layer.get("priority", 0) if layer.get("layer_order", False) else PRIORITY_DICT.get(f'{layer["name"].lower()}_back', None), @@ -44,16 +47,18 @@ def organize_clothing(layer): image=layer["back_image"], # mask_image=layer['back_mask_image'], image_url=layer['back_image_url'], - mask_url=layer['mask_url'], + mask_url=layer.get('mask_url', None), sacle=layer['scale'], clothes_keypoint=layer['clothes_keypoint'], position=start_point, resize_scale=layer["resize_scale"], mask=cv2.resize(layer['mask'], layer["front_image"].size), gradient_string=layer['gradient_string'] if 'gradient_string' in layer.keys() else "", - pattern_overall_image_url=layer['pattern_overall_image_url'], - pattern_print_image_url=layer['pattern_print_image_url'], + pattern_overall_image_url=layer.get('pattern_overall_image_url', None), + pattern_print_image_url=layer.get('pattern_print_image_url', None), # back_perspective_url=layer['back_perspective_url'] if 'back_perspective_url' in layer.keys() else "" + transpose=layer.get("transpose", [1, 1]), # 默认为1, 1代表不镜像 + rotate=layer.get('rotate', 0), ) return front_layer, back_layer @@ -74,35 +79,35 @@ def organize_others(layer): front_layer = dict(priority=layer['priority'] if layer.get("layer_order", False) else PRIORITY_DICT.get(f'{layer["name"].lower()}_front', None), name=f'{layer["name"].lower()}_front', image=layer["front_image"], - # mask_image=layer['front_mask_image'], + mask_image=layer['front_mask_image'], image_url=layer['front_image_url'], - mask_url=layer['mask_url'], + mask_url=layer.get('mask_url', None), sacle=layer['scale'], clothes_keypoint=(0, 0), position=start_point, resize_scale=layer["resize_scale"], mask=cv2.resize(layer['mask'], layer["front_image"].size), gradient_string=layer['gradient_string'] if 'gradient_string' in layer.keys() else "", - pattern_overall_image_url=layer['pattern_overall_image_url'], - pattern_print_image_url=layer['pattern_print_image_url'], - pattern_image=layer['pattern_image'], + pattern_overall_image_url=layer.get('pattern_overall_image_url', None), + pattern_print_image_url=layer.get('pattern_print_image_url', None), + pattern_image=layer.get('pattern_image', None), # back_perspective_url=layer['back_perspective_url'] if 'back_perspective_url' in layer.keys() else "" ) # 后片数据 back_layer = dict(priority=-layer.get("priority", 0) if layer.get("layer_order", False) else PRIORITY_DICT.get(f'{layer["name"].lower()}_back', None), name=f'{layer["name"].lower()}_back', image=layer["back_image"], - # mask_image=layer['back_mask_image'], + mask_image=layer['back_mask_image'], image_url=layer['back_image_url'], - mask_url=layer['mask_url'], + mask_url=layer.get('mask_url', None), sacle=layer['scale'], clothes_keypoint=(0, 0), position=start_point, resize_scale=layer["resize_scale"], mask=cv2.resize(layer['mask'], layer["front_image"].size), gradient_string=layer['gradient_string'] if 'gradient_string' in layer.keys() else "", - pattern_overall_image_url=layer['pattern_overall_image_url'], - pattern_print_image_url=layer['pattern_print_image_url'], + pattern_overall_image_url=layer.get('pattern_overall_image_url', None), + pattern_print_image_url=layer.get('pattern_print_image_url', None), # back_perspective_url=layer['back_perspective_url'] if 'back_perspective_url' in layer.keys() else "" ) return front_layer, back_layer @@ -112,6 +117,8 @@ def calculate_start_point(keypoint_type, scale, clothes_point, body_point, offse """ Align left Args: + offset: + resize_scale: keypoint_type: string, "waistband" | "shoulder" | "ear_point" scale: float clothes_point: dict{'left': [x1, y1, z1], 'right': [x2, y2, z2]} diff --git a/app/service/design_fast/utils/progress.py b/app/service/design_fast/utils/progress.py index 0f2c9cf..e970639 100644 --- a/app/service/design_fast/utils/progress.py +++ b/app/service/design_fast/utils/progress.py @@ -1,6 +1,6 @@ import logging -from app.service.design_fast.utils.redis_utils import Redis +from app.service.utils.redis_utils import Redis logger = logging.getLogger(__name__) diff --git a/app/service/design_fast/utils/redis_utils.py b/app/service/design_fast/utils/redis_utils.py deleted file mode 100644 index 012fbe0..0000000 --- a/app/service/design_fast/utils/redis_utils.py +++ /dev/null @@ -1,99 +0,0 @@ -import redis - -from app.core.config import REDIS_HOST, REDIS_PORT - - -class Redis(object): - """ - redis数据库操作 - """ - - @staticmethod - def _get_r(): - host = REDIS_HOST - port = REDIS_PORT - db = 0 - r = redis.StrictRedis(host, port, db) - return r - - @classmethod - def write(cls, key, value, expire=None): - """ - 写入键值对 - """ - # 判断是否有过期时间,没有就设置默认值 - if expire: - expire_in_seconds = expire - else: - expire_in_seconds = 100 - r = cls._get_r() - r.set(key, value, ex=expire_in_seconds) - - @classmethod - def read(cls, key): - """ - 读取键值对内容 - """ - r = cls._get_r() - value = r.get(key) - return value.decode('utf-8') if value else value - - @classmethod - def hset(cls, name, key, value): - """ - 写入hash表 - """ - r = cls._get_r() - r.hset(name, key, value) - - @classmethod - def hget(cls, name, key): - """ - 读取指定hash表的键值 - """ - r = cls._get_r() - value = r.hget(name, key) - return value.decode('utf-8') if value else value - - @classmethod - def hgetall(cls, name): - """ - 获取指定hash表所有的值 - """ - r = cls._get_r() - return r.hgetall(name) - - @classmethod - def delete(cls, *names): - """ - 删除一个或者多个 - """ - r = cls._get_r() - r.delete(*names) - - @classmethod - def hdel(cls, name, key): - """ - 删除指定hash表的键值 - """ - r = cls._get_r() - r.hdel(name, key) - - @classmethod - def expire(cls, name, expire=None): - """ - 设置过期时间 - """ - if expire: - expire_in_seconds = expire - else: - expire_in_seconds = 100 - r = cls._get_r() - r.expire(name, expire_in_seconds) - - -if __name__ == '__main__': - redis_client = Redis() - # print(redis_client.write(key="1230", value=0)) - redis_client.write(key="1230", value=10) - # print(redis_client.read(key="1230")) diff --git a/app/service/design_fast/utils/synthesis_item.py b/app/service/design_fast/utils/synthesis_item.py index 606b1b5..ff44157 100644 --- a/app/service/design_fast/utils/synthesis_item.py +++ b/app/service/design_fast/utils/synthesis_item.py @@ -13,9 +13,12 @@ import logging import cv2 import numpy as np from PIL import Image - +from minio import Minio +from app.core.config import settings from app.service.utils.generate_uuid import generate_uuid -from app.service.utils.oss_client import oss_upload_image +from app.service.utils.new_oss_client import oss_upload_image + +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) def positioning(all_mask_shape, mask_shape, offset): @@ -148,9 +151,11 @@ def synthesis(data, size, basic_info): if layer['image'] is not None: if layer['name'] != "body": test_image = Image.new('RGBA', size, (0, 0, 0, 0)) - test_image.paste(layer['image'], (layer['adaptive_position'][1], layer['adaptive_position'][0]), layer['image']) + paste_img, position = transpose_rotate(layer, layer['image']) + test_image.paste(paste_img, position, paste_img) mask_data = np.where(all_mask > 0, 255, 0).astype(np.uint8) mask_alpha = Image.fromarray(mask_data) + mask_alpha.paste(paste_img.getchannel('A'), position, paste_img.getchannel('A')) cropped_image = Image.composite(test_image, Image.new("RGBA", test_image.size, (255, 255, 255, 0)), mask_alpha) base_image.paste(test_image, (0, 0), cropped_image) # test_image 已经按照坐标贴到最大宽值的图片上 坐着这里坐标为00 else: @@ -166,7 +171,7 @@ def synthesis(data, size, basic_info): image_bytes = image_data.read() bucket_name = "aida-results" object_name = f'result_{generate_uuid()}.png' - req = oss_upload_image(bucket=bucket_name, object_name=object_name, image_bytes=image_bytes) + oss_upload_image(oss_client=minio_client, bucket=bucket_name, object_name=object_name, image_bytes=image_bytes) return f"{bucket_name}/{object_name}" # return f"aida-results/{minio_client.put_object('aida-results', f'result_{generate_uuid()}.png', io.BytesIO(image_bytes), len(image_bytes), content_type='image/png').object_name}" @@ -182,6 +187,111 @@ def synthesis(data, size, basic_info): logging.warning(f"synthesis runtime exception : {e}") +def merge(data, size, basic_info): + # out_of_bounds_control: 是否允许服装越界 True 允许 False 不允许 默认情况允许 + out_of_bounds_control = basic_info.get('out_of_bounds_control', True) + # 创建底图 + base_image = Image.new('RGBA', size, (0, 0, 0, 0)) + try: + all_mask_shape = (size[1], size[0]) + body_mask = None + for d in data: + if d['name'] == 'body' or d['name'] == 'mannequin': + # 创建一个新的宽高透明图像, 把模特贴上去获取mask + transparent_image = Image.new("RGBA", size, (0, 0, 0, 0)) + transparent_image.paste(d['image'], (d['adaptive_position'][1], d['adaptive_position'][0]), d['image']) # 此处可变数组会被paste篡改值,所以使用下标获取position + body_mask = np.array(transparent_image.split()[3]) + + # 根据新的坐标获取新的肩点 + left_shoulder = [x + y for x, y in zip(basic_info['body_point_test']['shoulder_left'], [d['adaptive_position'][1], d['adaptive_position'][0]])] + right_shoulder = [x + y for x, y in zip(basic_info['body_point_test']['shoulder_right'], [d['adaptive_position'][1], d['adaptive_position'][0]])] + body_mask[:min(left_shoulder[1], right_shoulder[1]), left_shoulder[0]:right_shoulder[0]] = 255 + _, binary_body_mask = cv2.threshold(body_mask, 127, 255, cv2.THRESH_BINARY) + top_outer_mask = np.array(binary_body_mask) + bottom_outer_mask = np.array(binary_body_mask) + others_outer_mask = np.array(binary_body_mask) + + top = True + bottom = True + others = True + i = len(data) + while i: + i -= 1 + if top and data[i]['name'] in ["blouse_front", "outwear_front", "dress_front", "tops_front"]: + if out_of_bounds_control: + top = True + else: + top = False + mask_shape = data[i]['mask'].shape + y_offset, x_offset = data[i]['adaptive_position'] + # 初始化叠加区域的起始和结束位置 + all_y_start, all_y_end, mask_y_start, mask_y_end = positioning(all_mask_shape=all_mask_shape[0], mask_shape=mask_shape[0], offset=y_offset) + all_x_start, all_x_end, mask_x_start, mask_x_end = positioning(all_mask_shape=all_mask_shape[1], mask_shape=mask_shape[1], offset=x_offset) + # 将叠加区域赋值为相应的像素值 + _, sketch_mask = cv2.threshold(data[i]['mask'], 127, 255, cv2.THRESH_BINARY) + background = np.zeros_like(top_outer_mask) + background[all_y_start:all_y_end, all_x_start:all_x_end] = sketch_mask[mask_y_start:mask_y_end, mask_x_start:mask_x_end] + top_outer_mask = background + top_outer_mask + elif bottom and data[i]['name'] in ["trousers_front", "skirt_front", "bottoms_front", "dress_front"]: + # bottom = False + mask_shape = data[i]['mask'].shape + y_offset, x_offset = data[i]['adaptive_position'] + # 初始化叠加区域的起始和结束位置 + all_y_start, all_y_end, mask_y_start, mask_y_end = positioning(all_mask_shape=all_mask_shape[0], mask_shape=mask_shape[0], offset=y_offset) + all_x_start, all_x_end, mask_x_start, mask_x_end = positioning(all_mask_shape=all_mask_shape[1], mask_shape=mask_shape[1], offset=x_offset) + # 将叠加区域赋值为相应的像素值 + _, sketch_mask = cv2.threshold(data[i]['mask'], 127, 255, cv2.THRESH_BINARY) + background = np.zeros_like(top_outer_mask) + background[all_y_start:all_y_end, all_x_start:all_x_end] = sketch_mask[mask_y_start:mask_y_end, mask_x_start:mask_x_end] + bottom_outer_mask = background + bottom_outer_mask + elif others and data[i]['name'] in ['others_front']: + mask_shape = data[i]['mask'].shape + y_offset, x_offset = data[i]['adaptive_position'] + # 初始化叠加区域的起始和结束位置 + all_y_start, all_y_end, mask_y_start, mask_y_end = positioning(all_mask_shape=all_mask_shape[0], mask_shape=mask_shape[0], offset=y_offset) + all_x_start, all_x_end, mask_x_start, mask_x_end = positioning(all_mask_shape=all_mask_shape[1], mask_shape=mask_shape[1], offset=x_offset) + # 将叠加区域赋值为相应的像素值 + _, sketch_mask = cv2.threshold(data[i]['mask'], 127, 255, cv2.THRESH_BINARY) + background = np.zeros_like(top_outer_mask) + background[all_y_start:all_y_end, all_x_start:all_x_end] = sketch_mask[mask_y_start:mask_y_end, mask_x_start:mask_x_end] + others_outer_mask = background + others_outer_mask + pass + elif bottom is False and top is False: + break + + all_mask = cv2.bitwise_or(top_outer_mask, bottom_outer_mask) + all_mask = cv2.bitwise_or(all_mask, others_outer_mask) + + for layer in data: + if layer['image'] is not None: + if layer['name'] != "body": + test_image = Image.new('RGBA', size, (0, 0, 0, 0)) + paste_img, position = transpose_rotate(layer, layer['image']) + test_image.paste(paste_img, position, paste_img) + mask_data = np.where(all_mask > 0, 255, 0).astype(np.uint8) + mask_alpha = Image.fromarray(mask_data) + mask_alpha.paste(paste_img.getchannel('A'), position, paste_img.getchannel('A')) + cropped_image = Image.composite(test_image, Image.new("RGBA", test_image.size, (255, 255, 255, 0)), mask_alpha) + base_image.paste(test_image, (0, 0), cropped_image) # test_image 已经按照坐标贴到最大宽值的图片上 坐着这里坐标为00 + else: + base_image.paste(layer['merge_image'], (layer['adaptive_position'][1], layer['adaptive_position'][0]), layer['merge_image']) + + result_image = base_image + + image_data = io.BytesIO() + result_image.save(image_data, format='PNG') + image_data.seek(0) + + # oss upload + image_bytes = image_data.read() + bucket_name = "aida-results" + object_name = f'result_{generate_uuid()}.png' + oss_upload_image(oss_client=minio_client, bucket=bucket_name, object_name=object_name, image_bytes=image_bytes) + return f"{bucket_name}/{object_name}" + except Exception as e: + logging.warning(f"synthesis runtime exception : {e}") + + def synthesis_single(front_image, back_image): result_image = None if front_image: @@ -207,11 +317,11 @@ def synthesis_single(front_image, back_image): # oss upload bucket_name = 'aida-results' object_name = f'result_{generate_uuid()}.png' - req = oss_upload_image(bucket=bucket_name, object_name=object_name, image_bytes=image_bytes) + oss_upload_image(oss_client=minio_client, bucket=bucket_name, object_name=object_name, image_bytes=image_bytes) return f"{bucket_name}/{object_name}" -def update_base_size_priority(layers, size): +def update_base_size_priority(layers): # 计算透明背景图片的宽度 min_x = min(info['position'][1] for info in layers) x_list = [] @@ -229,3 +339,35 @@ def update_base_size_priority(layers, size): for info in layers: info['adaptive_position'] = (info['position'][0], info['position'][1] - min_x) return layers, (new_width, new_height) + + +def transpose_rotate(layer, image): + # transpose[0]是左右 transpose[1]是上下 + transpose = layer.get('transpose', [1, 1]) # 默认为1, 1代表不镜像 + + rotate = layer.get('rotate', 0) + paste_x, paste_y = layer['adaptive_position'][1], layer['adaptive_position'][0] + + # transpose左右是1 上下是-1 + if transpose[0] != 1: + # 左右 + image = image.transpose(0) + + if transpose[1] != 1: + # 上下 + image = image.transpose(1) + + if rotate: + image = image.rotate(-rotate, expand=True) + # 4. 计算粘贴位置以保持视觉中心一致 + # 原本 (15, 36) 是 288*288 的左上角,我们计算其中心点 + target_center_x = 15 + 288 // 2 + target_center_y = 36 + 288 // 2 + + # 获取旋转后图像的新尺寸 + new_w, new_h = image.size + + # 计算新的左上角坐标,使得旋转后的图像中心依然在原定的中心位置 + paste_x = target_center_x - new_w // 2 + paste_y = target_center_y - new_h // 2 + return image, (paste_x, paste_y) diff --git a/app/service/design_fast/utils/upload_image.py b/app/service/design_fast/utils/upload_image.py index 2c79f9f..3e3dd2c 100644 --- a/app/service/design_fast/utils/upload_image.py +++ b/app/service/design_fast/utils/upload_image.py @@ -12,7 +12,6 @@ import logging import cv2 -from app.core.config import * from app.service.utils.new_oss_client import oss_upload_image @@ -25,15 +24,15 @@ def upload_png_mask(minio_client, front_image, object_name, mask=None): # 将掩模的3通道转换为4通道,白色部分不透明,黑色部分透明 rgba_image = cv2.cvtColor(mask_inverted, cv2.COLOR_BGR2BGRA) rgba_image[rgba_image[:, :, 0] == 0] = [0, 0, 0, 0] - req = oss_upload_image(oss_client=minio_client, bucket=AIDA_CLOTHING, object_name=f"mask/mask_{object_name}.png", image_bytes=cv2.imencode('.png', rgba_image)[1]) - mask_url = f"{AIDA_CLOTHING}/mask/mask_{object_name}.png" + req = oss_upload_image(oss_client=minio_client, bucket="aida-clothing", object_name=f"mask/mask_{object_name}.png", image_bytes=cv2.imencode('.png', rgba_image)[1]) + mask_url = f"aida-clothing/mask/mask_{object_name}.png" image_data = io.BytesIO() front_image.save(image_data, format='PNG') image_data.seek(0) image_bytes = image_data.read() - req = oss_upload_image(oss_client=minio_client, bucket=AIDA_CLOTHING, object_name=f"image/image_{object_name}.png", image_bytes=image_bytes) - image_url = f"{AIDA_CLOTHING}/image/image_{object_name}.png" + req = oss_upload_image(oss_client=minio_client, bucket="aida-clothing", object_name=f"image/image_{object_name}.png", image_bytes=image_bytes) + image_url = f"aida-clothing/image/image_{object_name}.png" return front_image, image_url, mask_url except Exception as e: logging.warning(f"upload_png_mask runtime exception : {e}") diff --git a/app/service/design_pre_processing/service.py b/app/service/design_pre_processing/service.py index 636360c..9c9dd66 100644 --- a/app/service/design_pre_processing/service.py +++ b/app/service/design_pre_processing/service.py @@ -1,19 +1,22 @@ import logging +import os import time import cv2 import numpy as np import torch import tritonclient.grpc as grpcclient -from pymilvus import MilvusClient +from minio import Minio +# from pymilvus import MilvusClient from urllib3.exceptions import ResponseError -from app.core.config import * +from app.core.config import settings, SR_MODEL_NAME, SR_TRITON_URL, MILVUS_TABLE_KEYPOINT, KEYPOINT_RESULT_TABLE_FIELD_SET from app.schemas.pre_processing import DesignPreProcessingModel from app.service.design_fast.utils.design_ensemble import get_seg_result, get_keypoint_result -from app.service.utils.oss_client import oss_get_image, oss_upload_image +from app.service.utils.new_oss_client import oss_get_image, oss_upload_image logger = logging.getLogger() +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) class DesignPreprocessing: @@ -46,20 +49,36 @@ class DesignPreprocessing: del d['keypoint_result'] return result - def read_image(self, image_list): + @staticmethod + def read_image(image_list): for obj in image_list: # file = self.minio_client.get_object(obj['image_url'].split("/", 1)[0], obj['image_url'].split("/", 1)[1]).data # image = cv2.imdecode(np.frombuffer(file, np.uint8), 1) - image = oss_get_image(bucket=obj['image_url'].split("/", 1)[0], object_name=obj['image_url'].split("/", 1)[1], data_type="cv2") + image = oss_get_image(oss_client=minio_client, bucket=obj['image_url'].split("/", 1)[0], object_name=obj['image_url'].split("/", 1)[1], data_type="cv2") if len(image.shape) == 2: image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) elif image.shape[2] == 4: # 如果是四通道 mask - image = image[:, :, :3] + # 分离RGB和Alpha通道 + bgr = image[:, :, :3] + alpha = image[:, :, 3] + + # 创建白色背景(也可改为其他颜色,如(255,255,255)就是白色) + background_color = (255, 255, 255) + background = np.full_like(bgr, background_color) + + # 将Alpha通道转换为掩码(0=透明,255=不透明) + alpha_mask = alpha / 255.0 # 归一化到0-1 + alpha_mask = np.expand_dims(alpha_mask, axis=-1) # 扩展维度,方便广播计算 + + # 混合背景和原图:透明区域显示背景色,不透明区域显示原图 + image = (bgr * alpha_mask + background * (1 - alpha_mask)).astype(np.uint8) + # 此时image已经是3通道RGB,无需再执行image = image[:, :, :3] obj["image_obj"] = image return image_list # @ RunTime - def bounding_box(self, image_list): + @staticmethod + def bounding_box(image_list): for item in image_list: image = item['image_obj'] height, width = image.shape[:2] @@ -77,11 +96,6 @@ class DesignPreprocessing: x_max = max(x_max, x + w) y_max = max(y_max, y + h) - if IF_DEBUG_SHOW: - image_with_big_rect = cv2.rectangle(image.copy(), (x_min, y_min), (x_max, y_max), (0, 255, 0), 2) - cv2.imshow("bounding_box image", image_with_big_rect) - cv2.waitKey(0) - # 根据大矩形的坐标来裁剪原始图像 if len(contours) > 0: cropped_image = image[y_min:y_max, x_min:x_max] @@ -107,7 +121,8 @@ class DesignPreprocessing: item['obj'] = padded_image return image_list - def super_resolution(self, image_list): + @staticmethod + def super_resolution(image_list): for item in image_list: # 判断 两边是否同时都小于512 因为此处做四倍超分 if item['obj'].shape[0] <= 512 and item['obj'].shape[1] <= 512: @@ -136,7 +151,7 @@ class DesignPreprocessing: # self.minio_client.put_object(item['image_url'].split("/", 1)[0], item['image_url'].split("/", 1)[1], io.BytesIO(image_bytes), len(image_bytes), content_type="image/jpeg", ) bucket_name = item['image_url'].split("/", 1)[0] object_name = item['image_url'].split("/", 1)[1] - oss_upload_image(bucket=bucket_name, object_name=object_name, image_bytes=image_bytes) + oss_upload_image(oss_client=minio_client, bucket=bucket_name, object_name=object_name, image_bytes=image_bytes) logging.info(f"Object '{item['image_url'].split('/', 1)[1]}' overwritten successfully.") except ResponseError as err: logging.warning(f"Error: {err}") @@ -144,7 +159,6 @@ class DesignPreprocessing: # @ RunTime def infer_image(self, image_list): - seg_result = None for sketch in image_list: # 小写 image_category = sketch['image_category'].lower() @@ -156,36 +170,17 @@ class DesignPreprocessing: _, seg_cache = self.load_seg_result(sketch['image_id']) if not _: # 推理获得seg 结果 - seg_result = get_seg_result(sketch["image_id"], sketch['obj'])[0] + seg_result = get_seg_result(sketch['obj'])[0] self.save_seg_result(seg_result, sketch['image_id']) logger.info(f"{sketch['image_id']} image size is :{sketch['obj'].shape} , seg cache size is :{seg_result.shape}") else: logger.info(f"{sketch['image_id']} image size is :{sketch['obj'].shape} , seg cache size is :{seg_cache.shape}") - if IF_DEBUG_SHOW: - debug_show_image = sketch['obj'].copy() - points_list = [] - point_size = 1 - point_color = (0, 0, 255) # BGR - thickness = 4 # 可以为 0 、4、8 - for i in sketch['keypoint_result'].values(): - points_list.append((int(i[1]), int(i[0]))) - for point in points_list: - cv2.circle(debug_show_image, point, point_size, point_color, thickness) - cv2.imshow("seg_result", seg_result) - cv2.imshow("", debug_show_image) - cv2.waitKey(0) - # # 关键点在上部则推理seg - # if sketch["site"] == "up": - # # 判断seg缓存是否存在,是否与当前图片shape一致 - # seg_result = self.search_seg_result(sketch["image_id"], sketch["obj"].shape) - # if seg_result is False: - # # 推理seg + 保存 - # seg_result = get_seg_result(sketch['image_id'], sketch['obj']) return image_list # @ RunTime - def composing_image(self, image_list): + @staticmethod + def composing_image(image_list): for image in image_list: ''' 比例相同 整合上下装代码''' image_width = image['obj'].shape[1] @@ -193,22 +188,20 @@ class DesignPreprocessing: scale = 0.4 if waist_width / scale >= image_width: add_width = int((waist_width / scale - image_width) / 2) - ret = cv2.copyMakeBorder(image['obj'], 0, 0, add_width, add_width, cv2.BORDER_CONSTANT, value=(256, 256, 256)) - if IF_DEBUG_SHOW: - cv2.imshow("composing_image", ret) - cv2.waitKey(0) - image_bytes = cv2.imencode(".jpg", ret)[1].tobytes() + ret = cv2.copyMakeBorder(image['obj'], 0, 0, add_width, add_width, cv2.BORDER_CONSTANT, value=(255, 255, 255)) + img_rgba = cv2.cvtColor(ret, cv2.COLOR_RGB2RGBA) + image_bytes = cv2.imencode(".png", img_rgba)[1].tobytes() # image['show_image_url'] = f"{image['image_url'].split('/', 1)[0]}/{self.minio_client.put_object(image['image_url'].split('/', 1)[0], image['image_url'].split('/', 1)[1].replace('.', '-show.'), io.BytesIO(image_bytes), len(image_bytes), content_type='image/jpeg').object_name}" bucket_name = image['image_url'].split('/', 1)[0] object_name = image['image_url'].split('/', 1)[1].replace('.', '-show.') - oss_upload_image(bucket=bucket_name, object_name=object_name, image_bytes=image_bytes) + oss_upload_image(oss_client=minio_client, bucket=bucket_name, object_name=object_name, image_bytes=image_bytes) image['show_image_url'] = f"{bucket_name}/{object_name}" else: image_bytes = cv2.imencode(".jpg", image['obj'])[1].tobytes() # image['show_image_url'] = f"{image['image_url'].split('/', 1)[0]}/{self.minio_client.put_object(image['image_url'].split('/', 1)[0], image['image_url'].split('/', 1)[1].replace('.', '-show.'), io.BytesIO(image_bytes), len(image_bytes), content_type='image/jpeg').object_name}" bucket_name = image['image_url'].split('/', 1)[0] object_name = image['image_url'].split('/', 1)[1].replace('.', '-show.') - oss_upload_image(bucket=bucket_name, object_name=object_name, image_bytes=image_bytes) + oss_upload_image(oss_client=minio_client, bucket=bucket_name, object_name=object_name, image_bytes=image_bytes) image['show_image_url'] = f"{bucket_name}/{object_name}" # if image['site'] == 'down': @@ -261,7 +254,7 @@ class DesignPreprocessing: @staticmethod def load_seg_result(image_id): - file_path = f"{SEG_CACHE_PATH}{image_id}.npy" + file_path = f"{settings.SEG_CACHE_PATH}{image_id}.npy" try: seg_result = np.load(file_path) return True, seg_result @@ -274,7 +267,7 @@ class DesignPreprocessing: @staticmethod def save_seg_result(seg_result, image_id): - file_path = f"{SEG_CACHE_PATH}{image_id}.npy" + file_path = f"{settings.SEG_CACHE_PATH}{image_id}.npy" try: np.save(file_path, seg_result) logging.debug(f"保存成功,{os.path.abspath(file_path)}") @@ -283,14 +276,15 @@ class DesignPreprocessing: def keypoint_cache(self, sketch): try: - client = MilvusClient(uri=MILVUS_URL, token=MILVUS_TOKEN, db_name=MILVUS_ALIAS) + # client = MilvusClient(uri=settings.MILVUS_URL, token=settings.MILVUS_TOKEN, db_name=settings.MILVUS_ALIAS) keypoint_id = sketch['image_id'] - res = client.query( - collection_name=MILVUS_TABLE_KEYPOINT, - # ids=[keypoint_id], - filter=f"keypoint_id == {keypoint_id}", - output_fields=['keypoint_vector', 'keypoint_site'] - ) + # res = client.query( + # collection_name=MILVUS_TABLE_KEYPOINT, + # # ids=[keypoint_id], + # filter=f"keypoint_id == {keypoint_id}", + # output_fields=['keypoint_vector', 'keypoint_site'] + # ) + res = [] if len(res) == 0: # 没有结果 直接推理拿结果 并保存 keypoint_infer_result = self.infer_keypoint_result(sketch) @@ -307,7 +301,8 @@ class DesignPreprocessing: return False # @ RunTime - def infer_keypoint_result(self, sketch): + @staticmethod + def infer_keypoint_result(sketch): keypoint_infer_result = get_keypoint_result(sketch["obj"], sketch['site']) # 推理结果 return keypoint_infer_result @@ -320,14 +315,14 @@ class DesignPreprocessing: else: zeros = np.zeros(4, dtype=int) result = np.concatenate([keypoint_infer_result.flatten(), zeros]) - data = [ - [int(sketch['image_id'])], - [sketch['site']], - [result.tolist()] - ] + # [ + # [int(sketch['image_id'])], + # [sketch['site']], + # [result.tolist()] + # ] try: # connections.connect(alias=MILVUS_ALIAS, host=MILVUS_DB_HOST, port=MILVUS_PORT) - start_time = time.time() + time.time() # collection = Collection(MILVUS_TABLE_KEYPOINT) # Get an existing collection. # mr = collection.insert(data) # logging.info(f"save keypoint time : {time.time() - start_time}") @@ -344,11 +339,11 @@ class DesignPreprocessing: else: # 需要的是down 即推理出来的是down 那么查询的就是up result = np.concatenate([search_result[:20], infer_result.flatten()]) - data = [ - [int(sketch['image_id'])], - ["all"], - [result.tolist()] - ] + # [ + # [int(sketch['image_id'])], + # ["all"], + # [result.tolist()] + # ] try: # connections.connect(alias=MILVUS_ALIAS, host=MILVUS_DB_HOST, port=MILVUS_PORT) # start_time = time.time() diff --git a/app/service/generate_batch_image/service_batch_generate_product_image.py b/app/service/generate_batch_image/service_batch_generate_product_image.py index 570354a..4707cbf 100644 --- a/app/service/generate_batch_image/service_batch_generate_product_image.py +++ b/app/service/generate_batch_image/service_batch_generate_product_image.py @@ -13,17 +13,19 @@ import logging import cv2 import numpy as np +import pika import tritonclient.grpc as grpcclient from PIL import Image from celery import Celery from tritonclient.utils import np_to_triton_dtype -from app.core.config import * +from app.core.config import settings, GPI_MODEL_URL, GPI_MODEL_NAME_SINGLE, GPI_MODEL_NAME_OVERALL, BATCH_GPI_RABBITMQ_QUEUES +from app.core.rabbit_mq_config import RABBITMQ_PARAMS from app.schemas.generate_image import BatchGenerateProductImageModel, ProductItemModel from app.service.generate_image.utils.upload_sd_image import upload_SDXL_image -from app.service.utils.oss_client import oss_get_image +from app.service.utils.new_oss_client import oss_get_image -celery_app = Celery('product_tasks', broker=f'amqp://rabbit:123456@18.167.251.121:5672//', backend='rpc://', BROKER_CONNECTION_RETRY_ON_STARTUP=True) +celery_app = Celery('product_tasks', broker=f'amqp://rabbit:123456@18.167.251.121:5672//', backend='rpc://') celery_app.conf.task_default_queue = 'queue_product' celery_app.conf.worker_log_format = '%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s' celery_app.conf.worker_hijack_root_logger = False @@ -104,7 +106,7 @@ def batch_generate_product(batch_request_data): result_data_list.append(data) # 发送每条结果 - if DEBUG: + if settings.DEBUG: logger.info(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | result_data:{data}") print(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | result_data:{data}") else: @@ -112,7 +114,7 @@ def batch_generate_product(batch_request_data): logger.info(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | result_data:{data}") # 任务完成,发送所有数据结果 - if DEBUG: + if settings.DEBUG: print(result_data_list) logger.info(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | batch_tasks_id:{batch_tasks_id} | progress:OK | result_data_list:{result_data_list}") print(f" [x]Queue : {BATCH_GPI_RABBITMQ_QUEUES} | batch_tasks_id:{batch_tasks_id} | progress:OK | result_data_list:{result_data_list}") diff --git a/app/service/generate_batch_image/service_batch_generate_relight_image.py b/app/service/generate_batch_image/service_batch_generate_relight_image.py index 0a039d5..9b4e5e1 100644 --- a/app/service/generate_batch_image/service_batch_generate_relight_image.py +++ b/app/service/generate_batch_image/service_batch_generate_relight_image.py @@ -12,18 +12,20 @@ import logging import cv2 import numpy as np +import pika import tritonclient.grpc as grpcclient from PIL import Image from celery import Celery from tritonclient.utils import np_to_triton_dtype -from app.core.config import * +from app.core.config import settings, GRI_MODEL_URL, BATCH_GRI_RABBITMQ_QUEUES, GRI_MODEL_NAME_SINGLE, GRI_MODEL_NAME_OVERALL +from app.core.rabbit_mq_config import RABBITMQ_PARAMS from app.schemas.generate_image import BatchGenerateRelightImageModel, RelightItemModel from app.service.generate_image.utils.upload_sd_image import upload_SDXL_image -from app.service.utils.oss_client import oss_get_image +from app.service.utils.new_oss_client import oss_get_image logger = logging.getLogger() -celery_app = Celery('relight_tasks', broker=f'amqp://rabbit:123456@18.167.251.121:5672//', backend='rpc://', BROKER_CONNECTION_RETRY_ON_STARTUP=True) +celery_app = Celery('relight_tasks', broker=f'amqp://rabbit:123456@18.167.251.121:5672//', backend='rpc://') celery_app.conf.task_default_queue = 'queue_relight' celery_app.conf.worker_log_format = '%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s' celery_app.conf.worker_hijack_root_logger = False @@ -133,14 +135,14 @@ def batch_generate_relight(batch_request_data): result_data_list.append(data) # 发送每条结果 - if DEBUG: + if settings.DEBUG: logger.info(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | result_data:{data}") print(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | result_data:{data}") else: publish_status(tasks_id, f"{i + 1}/{batch_size}", data) logger.info(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | result_data:{data}") # 任务完成,发送所有数据结果 - if DEBUG: + if settings.DEBUG: print(result_data_list) logger.info(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | batch_tasks_id:{batch_tasks_id} | progress:OK | result_data_list:{result_data_list}") print(f" [x]Queue : {BATCH_GRI_RABBITMQ_QUEUES} | batch_tasks_id:{batch_tasks_id} | progress:OK | result_data_list:{result_data_list}") diff --git a/app/service/generate_batch_image/service_batch_pose_transform.py b/app/service/generate_batch_image/service_batch_pose_transform.py index 0114ee5..e2215ff 100644 --- a/app/service/generate_batch_image/service_batch_pose_transform.py +++ b/app/service/generate_batch_image/service_batch_pose_transform.py @@ -14,22 +14,24 @@ from io import BytesIO import imageio import numpy as np +import pika import tritonclient.grpc as grpcclient from PIL import Image from celery import Celery from minio import Minio from tritonclient.utils import np_to_triton_dtype -from app.core.config import * +from app.core.config import settings, BATCH_PS_RABBITMQ_QUEUES, PT_MODEL_URL +from app.core.rabbit_mq_config import RABBITMQ_PARAMS from app.schemas.pose_transform import BatchPoseTransformModel from app.service.generate_image.utils.pose_transform_upload import upload_gif, upload_video from app.service.utils.new_oss_client import oss_upload_image -from app.service.utils.oss_client import oss_get_image +from app.service.utils.new_oss_client import oss_get_image -minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) logger = logging.getLogger() -celery_app = Celery('post_transform_tasks', broker=f'amqp://rabbit:123456@18.167.251.121:5672//', backend='rpc://', BROKER_CONNECTION_RETRY_ON_STARTUP=True) +celery_app = Celery('post_transform_tasks', broker=f'amqp://rabbit:123456@18.167.251.121:5672//', backend='rpc://') celery_app.conf.task_default_queue = 'queue_post_transform' celery_app.conf.worker_log_format = '%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s' celery_app.conf.worker_hijack_root_logger = False @@ -45,7 +47,7 @@ def upload_first_image(image, user_id, category, file_name): image_data.seek(0) image_bytes = image_data.read() object_name = f'{user_id}/{category}/{file_name}' - req = oss_upload_image(oss_client=minio_client, bucket=GI_MINIO_BUCKET, object_name=object_name, image_bytes=image_bytes) + req = oss_upload_image(oss_client=minio_client, bucket="aida-users", object_name=object_name, image_bytes=image_bytes) image_url = f"aida-users/{object_name}" return image_url except Exception as e: @@ -141,7 +143,7 @@ def batch_generate_pose_transform(batch_request_data): print(e) data = {} result_url_list.append(data) - if DEBUG is False: + if settings.DEBUG is False: if i + 1 < batch_size: publish_status(tasks_id, f"{i + 1}/{batch_size}", data) logger.info(f" [x]Queue : {BATCH_PS_RABBITMQ_QUEUES} | tasks_id:{tasks_id} | progress:{i + 1}/{batch_size} | image_url:{image_url}") diff --git a/app/service/generate_batch_image/test.py b/app/service/generate_batch_image/test.py index ece4b39..cea4190 100644 --- a/app/service/generate_batch_image/test.py +++ b/app/service/generate_batch_image/test.py @@ -1,16 +1,11 @@ -from app.schemas.generate_image import BatchGenerateRelightImageModel, BatchGenerateProductImageModel +from app.schemas.generate_image import BatchGenerateProductImageModel from app.service.generate_batch_image.service_batch_generate_product_image import batch_generate_product -from app.service.generate_batch_image.service_batch_generate_relight_image import batch_generate_relight - if __name__ == '__main__': rd = BatchGenerateProductImageModel( - tasks_id="test1-89", - image_strength=0.7, - prompt=" The best quality, masterpiece, real image.Outwear,high quality clothing details,8K realistic,HDR", - image_url="aida-results/result_40b1a2fe-e220-11ef-9bfa-0242ac150003.png", - product_type="single", - batch_size=2 + batch_tasks_id="", + batch_data_list="", + user_id="" ) x = batch_generate_product.delay(rd.dict()) print(x) diff --git a/app/service/generate_image/service_agent_tool_generate_image.py b/app/service/generate_image/service_agent_tool_generate_image.py index a5c295c..76f5de8 100644 --- a/app/service/generate_image/service_agent_tool_generate_image.py +++ b/app/service/generate_image/service_agent_tool_generate_image.py @@ -8,25 +8,24 @@ @detail : """ import logging -import time import uuid + import cv2 import mmcv import numpy as np import pandas as pd import torch -import tritonclient.http as httpclient -import cv2 -import numpy as np import tritonclient.grpc as grpcclient +import tritonclient.http as httpclient from minio import Minio from tritonclient.utils import np_to_triton_dtype -from app.core.config import * + +from app.core.config import settings, FAST_GI_MODEL_URL, GI_MODEL_URL, DESIGN_MODEL_URL, FAST_GI_MODEL_NAME, GI_MODEL_NAME from app.service.utils.new_oss_client import oss_upload_image logger = logging.getLogger() -minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) class AgentToolGenerateImage: @@ -85,7 +84,8 @@ class AgentToolGenerateImage: self.grpc_client.close() self.triton_client.close() - def preprocess(self, img): + @staticmethod + def preprocess(img): img = mmcv.imread(img) img_scale = (224, 224) img = cv2.resize(img, img_scale) @@ -126,7 +126,7 @@ class AgentToolGenerateImage: return category_list -attr_type = pd.read_csv(CATEGORY_PATH) +attr_type = pd.read_csv(settings.CATEGORY_PATH) if __name__ == '__main__': request_data = { diff --git a/app/service/generate_image/service_generate_image.py b/app/service/generate_image/service_generate_image.py index 7d00b87..d9772b5 100644 --- a/app/service/generate_image/service_generate_image.py +++ b/app/service/generate_image/service_generate_image.py @@ -16,16 +16,18 @@ import minio import numpy as np import redis import tritonclient.grpc as grpcclient +from minio import Minio from tritonclient.utils import np_to_triton_dtype -from app.core.config import * +from app.core.config import settings, FAST_GI_MODEL_URL, GI_MODEL_URL, FAST_GI_MODEL_NAME, GI_MODEL_NAME, GI_RABBITMQ_QUEUES from app.schemas.generate_image import GenerateImageModel from app.service.generate_image.utils.image_processing import remove_background, stain_detection, generate_category_recognition, autoLevels, luminance_adjust from app.service.generate_image.utils.mq import publish_status from app.service.generate_image.utils.upload_sd_image import upload_png_sd -from app.service.utils.oss_client import oss_get_image +from app.service.utils.new_oss_client import oss_get_image logger = logging.getLogger() +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) class GenerateImage: @@ -36,7 +38,7 @@ class GenerateImage: else: self.grpc_client = grpcclient.InferenceServerClient(url=GI_MODEL_URL) - self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) + self.redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True) if request_data.mode == "img2img": # cv2 读图片是BGR PIL读图片是RGB self.image = self.get_image(request_data.image_url) @@ -67,8 +69,7 @@ class GenerateImage: # image_array = np.asarray(bytearray(image_file.read()), dtype=np.uint8) # image_cv2 = cv2.imdecode(image_array, cv2.IMREAD_COLOR) # image_rbg = cv2.cvtColor(image_cv2, cv2.COLOR_BGR2RGB) - - image_cv2 = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="cv2") + image_cv2 = oss_get_image(oss_client=minio_client, bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="cv2") image_rbg = cv2.cvtColor(image_cv2, cv2.COLOR_BGR2RGB) image = cv2.resize(image_rbg, (1024, 1024)) except minio.error.S3Error: @@ -120,7 +121,7 @@ class GenerateImage: else: # 有污点 保存图片到本地 测试用 self.generate_data['status'] = "SUCCESS" self.generate_data['message'] = "success" - self.generate_data['image_url'] = str(GI_SYS_IMAGE_URL) + self.generate_data['image_url'] = "aida-sys-image/generate_image/white_image.jpg" self.redis_client.set(self.tasks_id, json.dumps(self.generate_data)) # logger.info(f"stain_detection result : {self.generate_data}") @@ -171,12 +172,12 @@ class GenerateImage: raise Exception(str(e)) finally: dict_generate_data, str_generate_data = self.read_tasks_status() - if not DEBUG: + if not settings.DEBUG: publish_status(str_generate_data, GI_RABBITMQ_QUEUES) def infer_cancel(tasks_id): - redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) + redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True) data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'} generate_data = json.dumps(data) redis_client.set(tasks_id, generate_data) @@ -186,12 +187,12 @@ def infer_cancel(tasks_id): if __name__ == '__main__': rd = GenerateImageModel( tasks_id="123-89", - prompt="Women's clothing ,dress,technical drawing style, clean line art, no shading, no texture, flat sketch, no human body, no face, centered composition, pure white background, single garmentsingle garment only, front flat view", - image_url="aida-collection-element/87/Printboard/842c09cf-7297-42d9-9e6e-9c17d4a13cb5.jpg", - mode='txt2img', - category="test", - gender="male", - version="high" + prompt="a single item of sketch of dress, 4k, white background", + image_url="aida-collection-element/89/Sketchboard/95f20cdc-e059-435c-b8b1-d04cc9e80c3d.png", + mode='img2img', + category="sketch", + gender="Female", + version="fast" ) server = GenerateImage(rd) print(server.get_result()) diff --git a/app/service/generate_image/service_generate_multi_view.py b/app/service/generate_image/service_generate_multi_view.py index 5ac7819..6f12da5 100644 --- a/app/service/generate_image/service_generate_multi_view.py +++ b/app/service/generate_image/service_generate_multi_view.py @@ -15,11 +15,11 @@ import numpy as np import redis import tritonclient.grpc as grpcclient -from app.core.config import * +from app.core.config import settings, GMV_MODEL_URL, GMV_MODEL_NAME, GMV_RABBITMQ_QUEUES from app.schemas.generate_image import GenerateMultiViewModel from app.service.generate_image.utils.mq import publish_status from app.service.generate_image.utils.upload_sd_image import upload_png_sd -from app.service.utils.oss_client import oss_get_image +from app.service.utils.new_oss_client import oss_get_image logger = logging.getLogger() @@ -27,7 +27,7 @@ logger = logging.getLogger() class GenerateMultiView: def __init__(self, request_data): self.grpc_client = grpcclient.InferenceServerClient(url=GMV_MODEL_URL) - self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) + self.redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True) self.image = self.get_image(request_data.image_url) self.tasks_id = request_data.tasks_id self.user_id = self.tasks_id[self.tasks_id.rfind('-') + 1:] @@ -35,7 +35,8 @@ class GenerateMultiView: self.redis_client.set(self.tasks_id, json.dumps(self.generate_data)) self.redis_client.expire(self.tasks_id, 600) - def get_image(self, image_url): + @staticmethod + def get_image(image_url): try: image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL") return image @@ -92,12 +93,12 @@ class GenerateMultiView: raise Exception(str(e)) finally: dict_generate_data, str_generate_data = self.read_tasks_status() - if not DEBUG: + if not settings.DEBUG: publish_status(str_generate_data, GMV_RABBITMQ_QUEUES) def infer_cancel(tasks_id): - redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) + redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True) data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'} generate_data = json.dumps(data) redis_client.set(tasks_id, generate_data) diff --git a/app/service/generate_image/service_generate_product_image.py b/app/service/generate_image/service_generate_product_image.py index 1191352..828da67 100644 --- a/app/service/generate_image/service_generate_product_image.py +++ b/app/service/generate_image/service_generate_product_image.py @@ -35,7 +35,7 @@ # # self.channel = self.connection.channel() # # self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) # self.grpc_client = grpcclient.InferenceServerClient(url=GPI_MODEL_URL) -# self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) +# self.redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True) # self.category = "product_image" # self.image_strength = request_data.image_strength # self.batch_size = 1 @@ -126,7 +126,7 @@ # # # def infer_cancel(tasks_id): -# redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) +# redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True) # data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'} # gen_product_data = json.dumps(data) # redis_client.set(tasks_id, gen_product_data) @@ -208,21 +208,23 @@ import numpy as np import redis import tritonclient.grpc as grpcclient from PIL import Image +from minio import Minio from tritonclient.utils import np_to_triton_dtype -from app.core.config import * +from app.core.config import settings, GPI_MODEL_URL, GPI_MODEL_NAME_SINGLE, GPI_MODEL_NAME_OVERALL, GPI_RABBITMQ_QUEUES from app.schemas.generate_image import GenerateProductImageModel from app.service.generate_image.utils.mq import publish_status from app.service.generate_image.utils.upload_sd_image import upload_SDXL_image -from app.service.utils.oss_client import oss_get_image +from app.service.utils.new_oss_client import oss_get_image logger = logging.getLogger() +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) class GenerateProductImage: def __init__(self, request_data): self.grpc_client = grpcclient.InferenceServerClient(url=GPI_MODEL_URL) - self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) + self.redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True) self.category = "product_image" self.image_strength = request_data.image_strength self.batch_size = 1 @@ -313,12 +315,12 @@ class GenerateProductImage: raise Exception(str(e)) finally: dict_gen_product_data, str_gen_product_data = self.read_tasks_status() - if not DEBUG: + if not settings.DEBUG: publish_status(str_gen_product_data, GPI_RABBITMQ_QUEUES) def infer_cancel(tasks_id): - redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) + redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True) data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'} gen_product_data = json.dumps(data) redis_client.set(tasks_id, gen_product_data) @@ -326,7 +328,7 @@ def infer_cancel(tasks_id): def pre_processing_image(image_url): - image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL") + image = oss_get_image(oss_client=minio_client, bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL") # 目标图片的尺寸 target_width = 512 target_height = 768 diff --git a/app/service/generate_image/service_generate_relight_image.py b/app/service/generate_image/service_generate_relight_image.py index 7db12c2..c67ca3a 100644 --- a/app/service/generate_image/service_generate_relight_image.py +++ b/app/service/generate_image/service_generate_relight_image.py @@ -18,11 +18,11 @@ import tritonclient.grpc as grpcclient from PIL import Image from tritonclient.utils import np_to_triton_dtype -from app.core.config import * +from app.core.config import settings, GRI_MODEL_URL, GRI_MODEL_NAME_SINGLE, GRI_MODEL_NAME_OVERALL, GRI_RABBITMQ_QUEUES from app.schemas.generate_image import GenerateRelightImageModel from app.service.generate_image.utils.mq import publish_status from app.service.generate_image.utils.upload_sd_image import upload_SDXL_image -from app.service.utils.oss_client import oss_get_image +from app.service.utils.new_oss_client import oss_get_image logger = logging.getLogger() @@ -30,7 +30,7 @@ logger = logging.getLogger() class GenerateRelightImage: def __init__(self, request_data): self.grpc_client = grpcclient.InferenceServerClient(url=GRI_MODEL_URL) - self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) + self.redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True) self.category = "relight_image" self.batch_size = 1 self.prompt = request_data.prompt @@ -134,9 +134,10 @@ class GenerateRelightImage: raise Exception(str(e)) finally: dict_gen_product_data, str_gen_product_data = self.read_tasks_status() - if not DEBUG: + if not settings.DEBUG: publish_status(str_gen_product_data, GRI_RABBITMQ_QUEUES) + def pre_processing_image(image_url): image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL") # 目标图片的尺寸 @@ -178,8 +179,9 @@ def pre_processing_image(image_url): # image = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA) return image + def infer_cancel(tasks_id): - redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) + redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True) data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'} gen_product_data = json.dumps(data) redis_client.set(tasks_id, gen_product_data) diff --git a/app/service/generate_image/service_generate_single_logo.py b/app/service/generate_image/service_generate_single_logo.py index 1e6b0d2..3fe1ee3 100644 --- a/app/service/generate_image/service_generate_single_logo.py +++ b/app/service/generate_image/service_generate_single_logo.py @@ -11,18 +11,16 @@ import json import logging import time -import cv2 import numpy as np import redis +import tritonclient.grpc as grpcclient from PIL import Image -from minio import Minio from tritonclient.utils import np_to_triton_dtype -from app.core.config import * -import tritonclient.grpc as grpcclient +from app.core.config import settings, GI_RABBITMQ_QUEUES, GSL_MODEL_NAME, GSL_MODEL_URL from app.schemas.generate_image import GenerateSingleLogoImageModel from app.service.generate_image.utils.mq import publish_status -from app.service.generate_image.utils.upload_sd_image import upload_png_sd, upload_SDXL_image +from app.service.generate_image.utils.upload_sd_image import upload_SDXL_image logger = logging.getLogger() @@ -30,7 +28,7 @@ logger = logging.getLogger() class GenerateSingleLogoImage: def __init__(self, request_data): self.grpc_client = grpcclient.InferenceServerClient(url=GSL_MODEL_URL) - self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) + self.redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True) self.batch_size = 1 self.category = "single_logo" self.negative_prompts = "bad, ugly" @@ -93,12 +91,12 @@ class GenerateSingleLogoImage: raise Exception(str(e)) finally: dict_generate_data, str_generate_data = self.read_tasks_status() - if not DEBUG: + if not settings.DEBUG: publish_status(str_generate_data, GI_RABBITMQ_QUEUES) def infer_cancel(tasks_id): - redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) + redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True) data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'} generate_data = json.dumps(data) redis_client.set(tasks_id, generate_data) diff --git a/app/service/generate_image/service_pose_transform.py b/app/service/generate_image/service_pose_transform.py index 1616d76..d43952b 100644 --- a/app/service/generate_image/service_pose_transform.py +++ b/app/service/generate_image/service_pose_transform.py @@ -17,21 +17,23 @@ import numpy as np import redis import tritonclient.grpc as grpcclient from PIL import Image +from minio import Minio from tritonclient.utils import np_to_triton_dtype -from app.core.config import * +from app.core.config import settings, PS_RABBITMQ_QUEUES, PT_MODEL_URL from app.schemas.pose_transform import PoseTransformModel from app.service.generate_image.utils.mq import publish_status from app.service.generate_image.utils.pose_transform_upload import upload_gif, upload_video, upload_first_image -from app.service.utils.oss_client import oss_get_image +from app.service.utils.new_oss_client import oss_get_image logger = logging.getLogger() +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) class PoseTransformService: def __init__(self, request_data): self.grpc_client = grpcclient.InferenceServerClient(url=PT_MODEL_URL) - self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) + self.redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True) self.category = "pose_transform" self.image_url = request_data.image_url self.pose_num = request_data.pose_id @@ -115,16 +117,14 @@ class PoseTransformService: raise Exception(str(e)) finally: dict_pose_transform_data, str_pose_transform_data = self.read_tasks_status() - if not DEBUG: + if not settings.DEBUG: publish_status(json.dumps(str_pose_transform_data), PS_RABBITMQ_QUEUES) logger.info( f" [x] Sent to: {PS_RABBITMQ_QUEUES} data:@@@@ {json.dumps(dict_pose_transform_data, indent=4)}") - - def infer_cancel(tasks_id): - redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) + redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True) data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'} pose_transform_data = json.dumps(data) redis_client.set(tasks_id, pose_transform_data) @@ -132,8 +132,7 @@ def infer_cancel(tasks_id): def pre_processing_image(image_url): - image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], - data_type="PIL") + image = oss_get_image(oss_client=minio_client, bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL") # 目标图片的尺寸 target_width = 512 target_height = 768 diff --git a/app/service/generate_image/test.py b/app/service/generate_image/test.py deleted file mode 100644 index 2c7277c..0000000 --- a/app/service/generate_image/test.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/env python -# -*- coding: UTF-8 -*- -""" -@Project :trinity_client -@File :service_att_recognition.py -@Author :周成融 -@Date :2023/7/26 12:01:05 -@detail : -""" -import json -import logging -import time -from io import BytesIO - -import cv2 -import minio -import redis -import tritonclient.grpc as grpcclient -import numpy as np -from minio import Minio -from tritonclient.utils import np_to_triton_dtype - -from app.core.config import * -from app.schemas.generate_image import GenerateImageModel -from app.service.generate_image.utils.adjust_contrast import adjust_contrast -from app.service.generate_image.utils.image_processing import remove_background, stain_detection -from app.service.generate_image.utils.upload_sd_image import upload_png_sd - -logger = logging.getLogger() - - -class GenerateImage: - def __init__(self, request_data): - if DEBUG is False: - self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) - self.channel = self.connection.channel() - # self.connection = pika.BlockingConnection(pika.ConnectionParameters(**RABBITMQ_PARAMS)) - # self.channel = self.connection.channel() - self.minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) - self.grpc_client = grpcclient.InferenceServerClient(url=GI_MODEL_URL) - self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) - if request_data.mode == "img2img": - self.image = self.get_image(request_data.image_url) - self.prompt = request_data.prompt - else: - self.image = np.random.randint(0, 256, (1024, 1024, 3), dtype=np.uint8) - self.prompt = request_data.prompt - - self.tasks_id = request_data.tasks_id - self.user_id = self.tasks_id[self.tasks_id.rfind('-') + 1:] - self.mode = request_data.mode - self.batch_size = 1 - self.category = request_data.category - self.index = 0 - self.generate_data = {'tasks_id': self.tasks_id, 'status': 'PENDING', 'message': "pending", 'data': ''} - self.redis_client.set(self.tasks_id, json.dumps(self.generate_data)) - self.redis_client.expire(self.tasks_id, 600) - - def get_image(self, image_url): - # Get data of an object. - # Read data from response. - try: - response = self.minio_client.get_object(image_url.split('/')[0], image_url[image_url.find('/') + 1:]) - image_file = BytesIO(response.data) - image_array = np.asarray(bytearray(image_file.read()), dtype=np.uint8) - image_cv2 = cv2.imdecode(image_array, cv2.IMREAD_COLOR) - image = cv2.resize(image_cv2, (1024, 1024)) - except minio.error.S3Error: - image = np.random.randint(0, 256, (1024, 1024, 3), dtype=np.uint8) - return image - - def callback(self, result, error): - if error: - self.generate_data['status'] = "FAILURE" - self.generate_data['message'] = str(error) - self.generate_data['data'] = str(error) - self.redis_client.set(self.tasks_id, json.dumps(self.generate_data)) - else: - image_result = result.as_numpy("generated_image")[0] - is_smudge = True - if self.category == "sketch": - # 去背景 - remove_bg_image = remove_background(np.asarray(image_result)) - # 污点检测 - is_smudge, not_smudge_image = stain_detection(remove_bg_image) - image_result = not_smudge_image - if is_smudge: # 无污点 - image_result = adjust_contrast(image_result) - image_url = upload_png_sd(image_result, user_id=self.user_id, category=f"{self.category}", object_name=f"{self.tasks_id}.png") - # logger.info(f"upload image SUCCESS : {image_url}") - self.generate_data['status'] = "SUCCESS" - self.generate_data['message'] = "success" - self.generate_data['data'] = str(image_url) - self.redis_client.set(self.tasks_id, json.dumps(self.generate_data)) - else: # 有污点 - self.generate_data['status'] = "SUCCESS" - self.generate_data['message'] = "success" - self.generate_data['data'] = str(GI_SYS_IMAGE_URL) - self.redis_client.set(self.tasks_id, json.dumps(self.generate_data)) - # logger.info(f"stain_detection result : {self.generate_data}") - - def read_tasks_status(self): - status_data = self.redis_client.get(self.tasks_id) - return json.loads(status_data), status_data - - def infer(self, inputs): - return self.grpc_client.infer( - model_name=GI_MODEL_NAME, - inputs=inputs, - # callback=self.callback - ) - - def get_result(self): - try: - prompts = [self.prompt] * self.batch_size - modes = [self.mode] * self.batch_size - images = [self.image.astype(np.float16)] * self.batch_size - - text_obj = np.array(prompts, dtype="object").reshape((-1, 1)) - mode_obj = np.array(modes, dtype="object").reshape((-1, 1)) - image_obj = np.array(images, dtype=np.float16).reshape((-1, 1024, 1024, 3)) - - input_text = grpcclient.InferInput("prompt", text_obj.shape, np_to_triton_dtype(text_obj.dtype)) - input_image = grpcclient.InferInput("input_image", image_obj.shape, "FP16") - input_mode = grpcclient.InferInput("mode", mode_obj.shape, np_to_triton_dtype(text_obj.dtype)) - - input_text.set_data_from_numpy(text_obj) - input_image.set_data_from_numpy(image_obj) - input_mode.set_data_from_numpy(mode_obj) - - inputs = [input_text, input_image, input_mode] - ctx = self.infer(inputs) - time_out = 600 - generate_data = None - while time_out > 0: - generate_data, _ = self.read_tasks_status() - # logger.info(generate_data) - if generate_data['status'] in ["REVOKED", "FAILURE"]: - ctx.cancel() - break - elif generate_data['status'] == "SUCCESS": - break - time_out -= 1 - time.sleep(0.1) - # logger.info(time_out, generate_data) - return generate_data - except Exception as e: - # self.generate_data['status'] = "FAILURE" - # self.generate_data['message'] = "failure" - # self.generate_data['data'] = str(e) - # self.redis_client.set(self.tasks_id, json.dumps(self.generate_data)) - raise Exception(str(e)) - # finally: - # dict_generate_data, str_generate_data = self.read_tasks_status() - # if DEBUG is False: - # self.channel.basic_publish(exchange='', routing_key=GI_RABBITMQ_QUEUES, body=str_generate_data) - # logger.info(f" [x] Sent {json.dumps(dict_generate_data, indent=4)}") - - -def infer_cancel(tasks_id): - redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) - data = {'tasks_id': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'} - generate_data = json.dumps(data) - redis_client.set(tasks_id, generate_data) - return data - - -if __name__ == '__main__': - rd = GenerateImageModel( - tasks_id="123-89", - prompt='skeleton sitting by the side of a river looking soulful, concert poster, 4k, artistic', - image_url="", - mode='txt2img', - category="test" - ) - server = GenerateImage(rd) - print(server.get_result()) diff --git a/app/service/generate_image/utils/image_processing.py b/app/service/generate_image/utils/image_processing.py index 02d8bee..692ffc9 100644 --- a/app/service/generate_image/utils/image_processing.py +++ b/app/service/generate_image/utils/image_processing.py @@ -7,7 +7,7 @@ import numpy as np import torch import tritonclient.http as httpclient -from app.core.config import * +from app.core.config import settings, DESIGN_MODEL_URL, DESIGN_MODEL_NAME from app.service.generate_image.utils.upload_sd_image import upload_stain_png_sd, upload_face_png_sd logger = logging.getLogger() @@ -65,40 +65,40 @@ def get_contours(image): # transformed_img = image.astype(np.float32) # # 输入集 # inputs = [ -# httpclient.InferInput(SEGMENTATION['input'], transformed_img.shape, datatype="FP32") +# httpclient.InferInput(DESIGN_MODEL_NAME, transformed_img.shape, datatype="FP32") # ] # inputs[0].set_data_from_numpy(transformed_img, binary_data=True) # # 输出集 # outputs = [ -# httpclient.InferRequestedOutput(SEGMENTATION['output'], binary_data=True), +# httpclient.InferRequestedOutput("seg_input__0", binary_data=True), # ] # results = client.infer(model_name=SEGMENTATION['name'], inputs=inputs, outputs=outputs) # # 推理 # # 取结果 -# inference_output1 = torch.from_numpy(results.as_numpy(SEGMENTATION['output'])) +# inference_output1 = torch.from_numpy(results.as_numpy("seg_input__0")) # seg_result = seg_postprocess(inference_output1, ori_shape) # return seg_result def seg_infer_image(image_obj): image, ori_shape = seg_preprocess(image_obj) - client = httpclient.InferenceServerClient(url=f"{DESIGN_MODEL_URL}") + client = httpclient.InferenceServerClient(url=DESIGN_MODEL_URL) transformed_img = image.astype(np.float32) # 输入集 inputs = [ - httpclient.InferInput(SEGMENTATION['input'], transformed_img.shape, datatype="FP32") + httpclient.InferInput("seg_input__0", transformed_img.shape, datatype="FP32") ] inputs[0].set_data_from_numpy(transformed_img, binary_data=True) # 输出集 outputs = [ - httpclient.InferRequestedOutput(SEGMENTATION['output'], binary_data=True), + httpclient.InferRequestedOutput("seg_output__0", binary_data=True), ] start_time = time.time() - results = client.infer(model_name=SEGMENTATION['new_model_name'], inputs=inputs, outputs=outputs) + results = client.infer(model_name=DESIGN_MODEL_NAME, inputs=inputs, outputs=outputs) print(f"KNet infer time is :{time.time() - start_time}") # 推理 # 取结果 - inference_output1 = results.as_numpy(SEGMENTATION['output']) - seg_result = seg_postprocess(inference_output1, ori_shape) + inference_output1 = results.as_numpy("seg_output__0") + seg_result = seg_postprocess(inference_output1) return seg_result @@ -110,7 +110,7 @@ def seg_infer_image(image_obj): # return seg_pred # KNet -def seg_postprocess(output, ori_shape): +def seg_postprocess(output): # seg_logit = F.interpolate(torch.tensor(output).float(), size=ori_shape, scale_factor=None, mode='bilinear', align_corners=False) # seg_logit = F.softmax(seg_logit, dim=1) # seg_pred = seg_logit.argmax(dim=1) @@ -201,7 +201,7 @@ def stain_detection(image, user_id, category, tasks_id, spot_size=100): # 如果有连续的纯白区域存在 if filtered_contours: # 将纯白区域替换为灰色 - if DEBUG: + if settings.DEBUG: for cnt in filtered_contours: x, y, w, h = cv2.boundingRect(cnt) # 在原始图像上进行替换 @@ -216,7 +216,7 @@ def stain_detection(image, user_id, category, tasks_id, spot_size=100): if is_pure_white: return False, None - if DEBUG: + if settings.DEBUG: for corner_coords in [ (0, 0), # (0, width - spot_size), @@ -236,7 +236,7 @@ def stain_detection(image, user_id, category, tasks_id, spot_size=100): ]: cv2.rectangle(dst, corner_coords, (corner_coords[0] + spot_size, corner_coords[1] + spot_size), (0, 0, 255), 2) cv2.rectangle(dst, (center_x - spot_size // 2, center_y - spot_size // 2), (center_x + spot_size // 2, center_y + spot_size // 2), (0, 255, 0), 2) # 在原始图像上绘制矩形框 - image_url = upload_stain_png_sd(dst, user_id=user_id, category=f"{category}", object_name=f"{tasks_id}.png") + upload_stain_png_sd(dst, user_id=user_id, category=f"{category}", object_name=f"{tasks_id}.png") return True, image @@ -262,10 +262,10 @@ def generate_category_recognition(image, gender): scores = inference_output.detach().numpy() import pandas as pd - attr_type = pd.read_csv(CATEGORY_PATH) + attr_type = pd.read_csv(settings.CATEGORY_PATH) colattr = list(attr_type['labelName']) - task = attr_type['taskName'][0] + # attr_type['taskName'][0] maxsc = np.max(scores[0][:5]) indexs = np.argwhere(scores == maxsc)[:, 1] @@ -321,12 +321,13 @@ def face_detect_pic(image, user_id, category, tasks_id): # cv2.imshow("gray", gray) # 2、训练一组人脸 + FACE_CLASSIFIER = "" face_detector = cv2.CascadeClassifier(FACE_CLASSIFIER) # 3、检测人脸(用灰度图检测,返回人脸矩形坐标(4个角)) faces_rect = face_detector.detectMultiScale(gray, 1.05, 3) - if DEBUG: + if settings.DEBUG: dst = image.copy() for x, y, w, h in faces_rect: cv2.rectangle(dst, (x, y), (x + w, y + h), (0, 0, 255), 3) # 画出矩形框 @@ -336,7 +337,7 @@ def face_detect_pic(image, user_id, category, tasks_id): dst = image.copy() for x, y, w, h in faces_rect: cv2.rectangle(dst, (x, y), (x + w, y + h), (0, 0, 255), 3) # 画出矩形框 - image_url = upload_face_png_sd(dst, user_id=user_id, category=f"{category}", object_name=f"{tasks_id}.png") + upload_face_png_sd(dst, user_id=user_id, category=f"{category}", object_name=f"{tasks_id}.png") return len(faces_rect) diff --git a/app/service/generate_image/utils/mq.py b/app/service/generate_image/utils/mq.py index 86e1df6..1e812f8 100644 --- a/app/service/generate_image/utils/mq.py +++ b/app/service/generate_image/utils/mq.py @@ -3,7 +3,7 @@ import json import pika import logging -from app.core.config import RABBITMQ_PARAMS +from app.core.rabbit_mq_config import RABBITMQ_PARAMS logger = logging.getLogger(__name__) diff --git a/app/service/generate_image/utils/pose_transform_upload.py b/app/service/generate_image/utils/pose_transform_upload.py index f5e5318..956955a 100644 --- a/app/service/generate_image/utils/pose_transform_upload.py +++ b/app/service/generate_image/utils/pose_transform_upload.py @@ -3,19 +3,13 @@ import logging import os.path import numpy as np -# import boto3 from minio import Minio from moviepy.video.io.ImageSequenceClip import ImageSequenceClip -from app.core.config import * +from app.core.config import settings from app.service.utils.new_oss_client import oss_upload_image -# minio 配置 -MINIO_URL = "www.minio-api.aida.com.hk" -MINIO_ACCESS = 'vXKFLSJkYeEq2DrSZvkB' -MINIO_SECRET = 'uKTZT3x7C43WvPN9QTc99DiRkwddWZrG9Uh3JVlR' -MINIO_SECURE = True -minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) def upload_first_image(image, user_id, category, file_name): @@ -25,7 +19,7 @@ def upload_first_image(image, user_id, category, file_name): image_data.seek(0) image_bytes = image_data.read() object_name = f'{user_id}/{category}/{file_name}' - req = oss_upload_image(oss_client=minio_client, bucket=GI_MINIO_BUCKET, object_name=object_name, image_bytes=image_bytes) + oss_upload_image(oss_client=minio_client, bucket="aida-users", object_name=object_name, image_bytes=image_bytes) image_url = f"aida-users/{object_name}" return image_url except Exception as e: @@ -35,7 +29,7 @@ def upload_first_image(image, user_id, category, file_name): def upload_gif(gif_buffer, user_id, category, file_name): try: object_name = f'{user_id}/{category}/{file_name}' - req = minio_client.put_object( + minio_client.put_object( "aida-users", object_name, gif_buffer, @@ -62,8 +56,8 @@ def upload_video(frames, user_id, category, file_name): logging.warning(f"upload_video runtime exception : {e}") -def ndarray_to_video(images, output_path, frame_size=(512, 768), fps=9): - save_path = os.path.join(POSE_TRANSFORM_VIDEO_PATH, output_path) +def ndarray_to_video(images, output_path, fps=9): + save_path = os.path.join("../pose_transform_video/", output_path) clip = ImageSequenceClip([frame for frame in images], fps=fps) clip.write_videofile(save_path, codec='libx264') diff --git a/app/service/generate_image/utils/upload_sd_image.py b/app/service/generate_image/utils/upload_sd_image.py index a63488c..0693c1d 100644 --- a/app/service/generate_image/utils/upload_sd_image.py +++ b/app/service/generate_image/utils/upload_sd_image.py @@ -9,16 +9,13 @@ """ import io import logging - -# import boto3 import cv2 -from PIL import Image from minio import Minio -from app.core.config import * -from app.service.utils.oss_client import oss_upload_image +from app.core.config import settings +from app.service.utils.new_oss_client import oss_upload_image -minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) # s3 = boto3.client('s3', aws_access_key_id=S3_ACCESS_KEY, aws_secret_access_key=S3_AWS_SECRET_ACCESS_KEY, region_name=S3_REGION_NAME) @@ -52,7 +49,7 @@ def upload_SDXL_image(image, user_id, category, file_name): # content_type='image/jpeg' # ) object_name = f'{user_id}/{category}/{file_name}' - req = oss_upload_image(bucket=GI_MINIO_BUCKET, object_name=object_name, image_bytes=image_bytes) + oss_upload_image(oss_client=minio_client, bucket="aida-users", object_name=object_name, image_bytes=image_bytes) image_url = f"aida-users/{object_name}" return image_url except Exception as e: @@ -63,7 +60,7 @@ def upload_png_sd(image, user_id, category, file_name): try: _, img_byte_array = cv2.imencode('.jpg', image) object_name = f'{user_id}/{category}/{file_name}' - req = oss_upload_image(bucket=GI_MINIO_BUCKET, object_name=object_name, image_bytes=img_byte_array) + oss_upload_image(oss_client=minio_client, bucket="aida-users", object_name=object_name, image_bytes=img_byte_array) image_url = f"aida-users/{object_name}" return image_url except Exception as e: diff --git a/app/service/image2sketch/checkpoints/download_checkpoints.py b/app/service/image2sketch/checkpoints/download_checkpoints.py deleted file mode 100644 index 03cc2c6..0000000 --- a/app/service/image2sketch/checkpoints/download_checkpoints.py +++ /dev/null @@ -1,45 +0,0 @@ -import os - -from minio import Minio -from minio.error import S3Error - -MINIO_URL = "www.minio.aida.com.hk:12024" -MINIO_ACCESS = 'vXKFLSJkYeEq2DrSZvkB' -MINIO_SECRET = 'uKTZT3x7C43WvPN9QTc99DiRkwddWZrG9Uh3JVlR' -MINIO_SECURE = True -# 配置MinIO客户端 -minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) - - -# 下载函数 -def download_folder(bucket_name, folder_name, local_dir): - try: - # 确保本地目录存在 - if not os.path.exists(local_dir): - os.makedirs(local_dir) - - # 遍历MinIO中的文件 - objects = minio_client.list_objects(bucket_name, prefix=folder_name, recursive=True) - for obj in objects: - # 构造本地文件路径 - local_file_path = os.path.join(local_dir, obj.object_name[len(folder_name):]) - local_file_dir = os.path.dirname(local_file_path) - - # 确保本地目录存在 - if not os.path.exists(local_file_dir): - os.makedirs(local_file_dir) - - # 下载文件 - minio_client.fget_object(bucket_name, obj.object_name, local_file_path) - print(f"Downloaded {obj.object_name} to {local_file_path}") - - except S3Error as e: - print(f"Error occurred: {e}") - - -# 使用示例 -bucket_name = "test" # 替换成你的bucket名称 -folder_name = "checkpoints/" # 权重文件夹的路径 -local_dir = "app/service/image2sketch/checkpoints" # 替换成你希望保存到的本地目录 - -download_folder(bucket_name, folder_name, local_dir) diff --git a/app/service/image2sketch/datasets/ref_unpair/testC/style_1.jpg b/app/service/image2sketch/datasets/ref_unpair/testC/style_1.jpg deleted file mode 100644 index 3a66b7f..0000000 Binary files a/app/service/image2sketch/datasets/ref_unpair/testC/style_1.jpg and /dev/null differ diff --git a/app/service/image2sketch/datasets/ref_unpair/testC/style_2.jpeg b/app/service/image2sketch/datasets/ref_unpair/testC/style_2.jpeg deleted file mode 100644 index 0347322..0000000 Binary files a/app/service/image2sketch/datasets/ref_unpair/testC/style_2.jpeg and /dev/null differ diff --git a/app/service/image2sketch/datasets/ref_unpair/testC/style_3.png b/app/service/image2sketch/datasets/ref_unpair/testC/style_3.png deleted file mode 100644 index 8d8bcf4..0000000 Binary files a/app/service/image2sketch/datasets/ref_unpair/testC/style_3.png and /dev/null differ diff --git a/app/service/image2sketch/infer.py b/app/service/image2sketch/infer.py deleted file mode 100644 index 8ec241f..0000000 --- a/app/service/image2sketch/infer.py +++ /dev/null @@ -1,89 +0,0 @@ -import os - -import numpy as np -import torch -import torchvision.transforms as transforms -from PIL import Image - -from .models import create_model - - -def tensor2im(input_image, imtype=np.uint8): - if not isinstance(input_image, np.ndarray): - if isinstance(input_image, torch.Tensor): # get the data from a variable - image_tensor = input_image.data - else: - return input_image - image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array - if image_numpy.shape[0] == 1: # grayscale to RGB - image_numpy = np.tile(image_numpy, (3, 1, 1)) - image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling - else: # if it is a numpy array, do nothing - image_numpy = input_image - return image_numpy.astype(imtype) - - -def save_image(image_numpy, image_path, w, h, aspect_ratio=1.0): - """Save a numpy image to the disk - - Parameters: - image_numpy (numpy array) -- input numpy array - image_path (str) -- the path of the image - """ - - image_pil = Image.fromarray(image_numpy) - image_pil = image_pil.resize((w, h)) - image_pil.save(image_path) - - -def save_img(image_tensor, w, h, filename): - image_pil = tensor2im(image_tensor) - - save_image(image_pil, filename, w, h, aspect_ratio=1.0) - print("Image saved as {}".format(filename)) - - -def load_img(filepath): - img = Image.open(filepath).convert('L') - # print(img.size) - width = img.size[0] - height = img.size[1] - # img = img.resize((512, 512), Image.BICUBIC) - return img, width, height - - -if __name__ == '__main__': - img_A = "/workspace/Semi_ref2sketch_code/datasets/ref_unpair/testA/real_Dress_732caedc416a0cbfedd0e6528040eac7.jpg_Img.jpg" - img_B = "/workspace/Semi_ref2sketch_code/datasets/ref_unpair/testC/style_3.png" - from opt import Config - - opt = Config() # get test options - # hard-code some parameters for test - opt.num_threads = 0 # test code only supports num_threads = 0 - opt.batch_size = 1 # test code only supports batch_size = 1 - opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed. - opt.no_flip = True # no flip; comment this line if results on flipped images are needed. - opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file. - device = torch.device("cuda:0") - model = create_model(opt) # create a model given opt.model and other options - model.setup(opt) - transform_list = [transforms.ToTensor(), transforms.Normalize([0.5], [0.5])] - transform = transforms.Compose(transform_list) - if opt.eval: - model.eval() - data = {} - print(os.getcwd()) - B = reference, _, _ = load_img(r"/app/service/image2sketch/datasets/ref_unpair/testC/style_3.png") - style_img = transform(reference) - data['B'] = style_img - data['B'] = data['B'].unsqueeze(0).to(device) - A = Image.open(r"E:\workspace\trinity_client_aida\app\service\image2sketch\datasets\ref_unpair\testA\real_Dress_3200fecdc83d0c556c2bd96aedbd7fbf.jpg_Img.jpg") - width = A.size[0] - height = A.size[1] - # data['A'] = A.resize((512, 512)) - data['A'] = transform(A) - data['A'] = data['A'].unsqueeze(0).to(device) - model.set_input(data) - model.test() # run inference - visuals = model.get_current_visuals() # get image results - save_img(visuals['content_output'].cpu(), width, height, "result/result.jpg") diff --git a/app/service/image2sketch/models/__init__.py b/app/service/image2sketch/models/__init__.py deleted file mode 100644 index 809105c..0000000 --- a/app/service/image2sketch/models/__init__.py +++ /dev/null @@ -1,49 +0,0 @@ -import importlib - -from app.service.image2sketch.models import unpaired_model as modellib -from .base_model import BaseModel - - -def find_model_using_name(model_name): - """Import the module "models/[model_name]_model.py". - - In the file, the class called DatasetNameModel() will - be instantiated. It has to be a subclass of BaseModel, - and it is case-insensitive. - """ - # model_filename = "." + model_name + "_model" - # modellib = importlib.import_module(model_filename) - model = None - target_model_name = model_name.replace('_', '') + 'model' - for name, cls in modellib.__dict__.items(): - if name.lower() == target_model_name.lower() \ - and issubclass(cls, BaseModel): - model = cls - - if model is None: - print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) - exit(0) - - return model - - -def get_option_setter(model_name): - """Return the static method of the model class.""" - model_class = find_model_using_name(model_name) - return model_class.modify_commandline_options - - -def create_model(opt): - """Create a model given the option. - - This function warps the class CustomDatasetDataLoader. - This is the main interface between this package and 'train.py'/'test.py' - - Example: - >>> from .models import create_model - >>> model = create_model(opt) - """ - model = find_model_using_name(opt.model) - instance = model(opt) - print("model [%s] was created" % type(instance).__name__) - return instance diff --git a/app/service/image2sketch/models/base_model.py b/app/service/image2sketch/models/base_model.py deleted file mode 100644 index 6de961b..0000000 --- a/app/service/image2sketch/models/base_model.py +++ /dev/null @@ -1,230 +0,0 @@ -import os -import torch -from collections import OrderedDict -from abc import ABC, abstractmethod -from . import networks - - -class BaseModel(ABC): - """This class is an abstract base class (ABC) for models. - To create a subclass, you need to implement the following five functions: - -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). - -- : unpack data from dataset and apply preprocessing. - -- : produce intermediate results. - -- : calculate losses, gradients, and update network weights. - -- : (optionally) add model-specific options and set default options. - """ - - def __init__(self, opt): - """Initialize the BaseModel class. - - Parameters: - opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions - - When creating your custom class, you need to implement your own initialization. - In this function, you should first call - Then, you need to define four lists: - -- self.loss_names (str list): specify the training losses that you want to plot and save. - -- self.model_names (str list): define networks used in our training. - -- self.visual_names (str list): specify the images that you want to display and save. - -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. - """ - self.opt = opt - self.gpu_ids = opt.gpu_ids - self.isTrain = opt.isTrain - self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU - self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir - if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark. - torch.backends.cudnn.benchmark = True - self.loss_names = [] - self.model_names = [] - self.visual_names = [] - self.optimizers = [] - self.image_paths = [] - self.metric = 0 # used for learning rate policy 'plateau' - - @staticmethod - def modify_commandline_options(parser, is_train): - """Add new model-specific options, and rewrite default values for existing options. - - Parameters: - parser -- original option parser - is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. - - Returns: - the modified parser. - """ - return parser - - @abstractmethod - def set_input(self, input): - """Unpack input data from the dataloader and perform necessary pre-processing steps. - - Parameters: - input (dict): includes the data itself and its metadata information. - """ - pass - - @abstractmethod - def forward(self): - """Run forward pass; called by both functions and .""" - pass - - @abstractmethod - def optimize_parameters(self): - """Calculate losses, gradients, and update network weights; called in every training iteration""" - pass - - def setup(self, opt): - """Load and print networks; create schedulers - - Parameters: - opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions - """ - if self.isTrain: - self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers] - if not self.isTrain or opt.continue_train: - load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch - self.load_networks(load_suffix) - self.print_networks(opt.verbose) - - def eval(self): - """Make models eval mode during test time""" - for name in self.model_names: - if isinstance(name, str): - net = getattr(self, 'net' + name) - net.eval() - - def test(self): - """Forward function used in test time. - - This function wraps function in no_grad() so we don't save intermediate steps for backprop - It also calls to produce additional visualization results - """ - with torch.no_grad(): - self.forward() - self.compute_visuals() - - def compute_visuals(self): - """Calculate additional output images for visdom and HTML visualization""" - pass - - def get_image_paths(self): - """ Return image paths that are used to load current data""" - return self.image_paths - - def update_learning_rate(self): - """Update learning rates for all the networks; called at the end of every epoch""" - old_lr = self.optimizers[0].param_groups[0]['lr'] - for scheduler in self.schedulers: - if self.opt.lr_policy == 'plateau': - scheduler.step(self.metric) - else: - scheduler.step() - - lr = self.optimizers[0].param_groups[0]['lr'] - print('learning rate %.7f -> %.7f' % (old_lr, lr)) - - def get_current_visuals(self): - """Return visualization images. train.py will display these images with visdom, and save the images to a HTML""" - visual_ret = OrderedDict() - for name in self.visual_names: - if isinstance(name, str): - visual_ret[name] = getattr(self, name) - return visual_ret - - def get_current_losses(self): - """Return traning losses / errors. train.py will print out these errors on console, and save them to a file""" - errors_ret = OrderedDict() - for name in self.loss_names: - if isinstance(name, str): - errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number - return errors_ret - - def save_networks(self, epoch): - """Save all the networks to the disk. - - Parameters: - epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) - """ - for name in self.model_names: - if isinstance(name, str): - save_filename = '%s_net_%s.pth' % (epoch, name) - save_path = os.path.join(self.save_dir, save_filename) - net = getattr(self, 'net' + name) - - if len(self.gpu_ids) > 0 and torch.cuda.is_available(): - torch.save(net.module.cpu().state_dict(), save_path) - net.cuda(self.gpu_ids[0]) - else: - torch.save(net.cpu().state_dict(), save_path) - - def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): - """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)""" - key = keys[i] - if i + 1 == len(keys): # at the end, pointing to a parameter/buffer - if module.__class__.__name__.startswith('InstanceNorm') and \ - (key == 'running_mean' or key == 'running_var'): - if getattr(module, key) is None: - state_dict.pop('.'.join(keys)) - if module.__class__.__name__.startswith('InstanceNorm') and \ - (key == 'num_batches_tracked'): - state_dict.pop('.'.join(keys)) - else: - self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) - - def load_networks(self, epoch): - """Load all the networks from the disk. - - Parameters: - epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) - """ - for name in self.model_names: - if isinstance(name, str): - load_filename = '%s_net_%s.pth' % (epoch, name) - load_path = os.path.join(self.save_dir, load_filename) - net = getattr(self, 'net' + name) - if isinstance(net, torch.nn.DataParallel): - net = net.module - print('loading the model from %s' % load_path) - # if you are using PyTorch newer than 0.4 (e.g., built from - # GitHub source), you can remove str() on self.device - state_dict = torch.load(load_path, map_location=str(self.device)) - if hasattr(state_dict, '_metadata'): - del state_dict._metadata - - # patch InstanceNorm checkpoints prior to 0.4 - for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop - self.__patch_instance_norm_state_dict(state_dict, net, key.split('.')) - net.load_state_dict(state_dict) - - def print_networks(self, verbose): - """Print the total number of parameters in the network and (if verbose) network architecture - - Parameters: - verbose (bool) -- if verbose: print the network architecture - """ - print('---------- Networks initialized -------------') - for name in self.model_names: - if isinstance(name, str): - net = getattr(self, 'net' + name) - num_params = 0 - for param in net.parameters(): - num_params += param.numel() - if verbose: - print(net) - print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) - print('-----------------------------------------------') - - def set_requires_grad(self, nets, requires_grad=False): - """Set requies_grad=Fasle for all the networks to avoid unnecessary computations - Parameters: - nets (network list) -- a list of networks - requires_grad (bool) -- whether the networks require gradients or not - """ - if not isinstance(nets, list): - nets = [nets] - for net in nets: - if net is not None: - for param in net.parameters(): - param.requires_grad = requires_grad diff --git a/app/service/image2sketch/models/layer.py b/app/service/image2sketch/models/layer.py deleted file mode 100644 index df96a35..0000000 --- a/app/service/image2sketch/models/layer.py +++ /dev/null @@ -1,354 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class CNR2d(nn.Module): - def __init__(self, nch_in, nch_out, kernel_size=4, stride=1, padding=1, norm='bnorm', relu=0.0, drop=[], bias=[]): - super().__init__() - - if bias == []: - if norm == 'bnorm': - bias = False - else: - bias = True - - layers = [] - layers += [Conv2d(nch_in, nch_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias)] - - if norm != []: - layers += [Norm2d(nch_out, norm)] - - if relu != []: - layers += [ReLU(relu)] - - if drop != []: - layers += [nn.Dropout2d(drop)] - - self.cbr = nn.Sequential(*layers) - - def forward(self, x): - return self.cbr(x) - - -class DECNR2d(nn.Module): - def __init__(self, nch_in, nch_out, kernel_size=4, stride=1, padding=1, output_padding=0, norm='bnorm', relu=0.0, drop=[], bias=[]): - super().__init__() - - if bias == []: - if norm == 'bnorm': - bias = False - else: - bias = True - - layers = [] - layers += [Deconv2d(nch_in, nch_out, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, bias=bias)] - - if norm != []: - layers += [Norm2d(nch_out, norm)] - - if relu != []: - layers += [ReLU(relu)] - - if drop != []: - layers += [nn.Dropout2d(drop)] - - self.decbr = nn.Sequential(*layers) - - def forward(self, x): - return self.decbr(x) - - -class ResBlock(nn.Module): - def __init__(self, nch_in, nch_out, kernel_size=3, stride=1, padding=1, padding_mode='reflection', norm='inorm', relu=0.0, drop=[], bias=[]): - super().__init__() - - if bias == []: - if norm == 'bnorm': - bias = False - else: - bias = True - - layers = [] - - # 1st conv - layers += [Padding(padding, padding_mode=padding_mode)] - layers += [CNR2d(nch_in, nch_out, kernel_size=kernel_size, stride=stride, padding=0, norm=norm, relu=relu)] - - if drop != []: - layers += [nn.Dropout2d(drop)] - - # 2nd conv - layers += [Padding(padding, padding_mode=padding_mode)] - layers += [CNR2d(nch_in, nch_out, kernel_size=kernel_size, stride=stride, padding=0, norm=norm, relu=[])] - - self.resblk = nn.Sequential(*layers) - - def forward(self, x): - return x + self.resblk(x) - - -class ResBlock_cat(nn.Module): - def __init__(self, nch_in, nch_out, kernel_size=3, stride=1, padding=1, padding_mode='reflection', norm='inorm', relu=0.0, drop=[], bias=[]): - super().__init__() - - if bias == []: - if norm == 'bnorm': - bias = False - else: - bias = True - - layers = [] - - # 1st conv - layers += [Padding(padding, padding_mode=padding_mode)] - layers += [CNR2d(nch_in*2, nch_out, kernel_size=kernel_size, stride=stride, padding=0, norm=norm, relu=relu)] - - if drop != []: - layers += [nn.Dropout2d(drop)] - - # 2nd conv - layers += [Padding(padding, padding_mode=padding_mode)] - layers += [CNR2d(nch_in, nch_out, kernel_size=kernel_size, stride=stride, padding=0, norm=norm, relu=[])] - - self.resblk = nn.Sequential(*layers) - - def forward(self,x,y): - output = x + self.resblk(torch.cat([x,y],dim=1)) - return output - -class LinearBlock(nn.Module): - def __init__(self, input_dim, output_dim, norm='none', activation='relu'): - super(LinearBlock, self).__init__() - use_bias = True - # initialize fully connected layer - if norm == 'sn': - self.fc = SpectralNorm(nn.Linear(input_dim, output_dim, bias=use_bias)) - else: - self.fc = nn.Linear(input_dim, output_dim, bias=use_bias) - - # initialize normalization - norm_dim = output_dim - if norm == 'bn': - self.norm = nn.BatchNorm1d(norm_dim) - elif norm == 'in': - self.norm = nn.InstanceNorm1d(norm_dim) - elif norm == 'ln': - self.norm = LayerNorm(norm_dim) - elif norm == 'none' or norm == 'sn': - self.norm = None - else: - assert 0, "Unsupported normalization: {}".format(norm) - - # initialize activation - if activation == 'relu': - self.activation = nn.ReLU(inplace=True) - elif activation == 'lrelu': - self.activation = nn.LeakyReLU(0.2, inplace=True) - elif activation == 'prelu': - self.activation = nn.PReLU() - elif activation == 'selu': - self.activation = nn.SELU(inplace=True) - elif activation == 'tanh': - self.activation = nn.Tanh() - elif activation == 'none': - self.activation = None - else: - assert 0, "Unsupported activation: {}".format(activation) - - def forward(self, x): - out = self.fc(x) - if self.norm: - out = self.norm(out) - if self.activation: - out = self.activation(out) - return out - -class MLP(nn.Module): - def __init__(self, input_dim, output_dim, dim, n_blk, norm='none', activ='relu'): - - super(MLP, self).__init__() - self.model = [] - self.model += [LinearBlock(input_dim, dim, norm=norm, activation=activ)] - for i in range(n_blk - 2): - self.model += [LinearBlock(dim, dim, norm=norm, activation=activ)] - self.model += [LinearBlock(dim, output_dim, norm='none', activation='none')] # no output activations - self.model = nn.Sequential(*self.model) - - def forward(self, x): - return self.model(x.view(x.size(0), -1)) - -class CNR1d(nn.Module): - def __init__(self, nch_in, nch_out, norm='bnorm', relu=0.0, drop=[]): - super().__init__() - - if norm == 'bnorm': - bias = False - else: - bias = True - - layers = [] - layers += [nn.Linear(nch_in, nch_out, bias=bias)] - - if norm != []: - layers += [Norm2d(nch_out, norm)] - - if relu != []: - layers += [ReLU(relu)] - - if drop != []: - layers += [nn.Dropout2d(drop)] - - self.cbr = nn.Sequential(*layers) - - def forward(self, x): - return self.cbr(x) - - -class Conv2d(nn.Module): - def __init__(self, nch_in, nch_out, kernel_size=4, stride=1, padding=1, bias=True): - super(Conv2d, self).__init__() - self.conv = nn.Conv2d(nch_in, nch_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias) - - def forward(self, x): - return self.conv(x) - - -class Deconv2d(nn.Module): - def __init__(self, nch_in, nch_out, kernel_size=4, stride=1, padding=1, output_padding=0, bias=True): - super(Deconv2d, self).__init__() - self.deconv = nn.ConvTranspose2d(nch_in, nch_out, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, bias=bias) - - # layers = [nn.Upsample(scale_factor=2, mode='bilinear'), - # nn.ReflectionPad2d(1), - # nn.Conv2d(nch_in , nch_out, kernel_size=3, stride=1, padding=0)] - # - # self.deconv = nn.Sequential(*layers) - - def forward(self, x): - return self.deconv(x) - - -class Linear(nn.Module): - def __init__(self, nch_in, nch_out): - super(Linear, self).__init__() - self.linear = nn.Linear(nch_in, nch_out) - - def forward(self, x): - return self.linear(x) - - -class Norm2d(nn.Module): - def __init__(self, nch, norm_mode): - super(Norm2d, self).__init__() - if norm_mode == 'bnorm': - self.norm = nn.BatchNorm2d(nch) - elif norm_mode == 'inorm': - self.norm = nn.InstanceNorm2d(nch) - - def forward(self, x): - return self.norm(x) - - -class ReLU(nn.Module): - def __init__(self, relu): - super(ReLU, self).__init__() - if relu > 0: - self.relu = nn.LeakyReLU(relu, True) - elif relu == 0: - self.relu = nn.ReLU(True) - - def forward(self, x): - return self.relu(x) - - -class Padding(nn.Module): - def __init__(self, padding, padding_mode='zeros', value=0): - super(Padding, self).__init__() - if padding_mode == 'reflection': - self. padding = nn.ReflectionPad2d(padding) - elif padding_mode == 'replication': - self.padding = nn.ReplicationPad2d(padding) - elif padding_mode == 'constant': - self.padding = nn.ConstantPad2d(padding, value) - elif padding_mode == 'zeros': - self.padding = nn.ZeroPad2d(padding) - - def forward(self, x): - return self.padding(x) - - -class Pooling2d(nn.Module): - def __init__(self, nch=[], pool=2, type='avg'): - super().__init__() - - if type == 'avg': - self.pooling = nn.AvgPool2d(pool) - elif type == 'max': - self.pooling = nn.MaxPool2d(pool) - elif type == 'conv': - self.pooling = nn.Conv2d(nch, nch, kernel_size=pool, stride=pool) - - def forward(self, x): - return self.pooling(x) - - -class UnPooling2d(nn.Module): - def __init__(self, nch=[], pool=2, type='nearest'): - super().__init__() - - if type == 'nearest': - self.unpooling = nn.Upsample(scale_factor=pool, mode='nearest', align_corners=True) - elif type == 'bilinear': - self.unpooling = nn.Upsample(scale_factor=pool, mode='bilinear', align_corners=True) - elif type == 'conv': - self.unpooling = nn.ConvTranspose2d(nch, nch, kernel_size=pool, stride=pool) - - def forward(self, x): - return self.unpooling(x) - - -class Concat(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x1, x2): - diffy = x2.size()[2] - x1.size()[2] - diffx = x2.size()[3] - x1.size()[3] - - x1 = F.pad(x1, [diffx // 2, diffx - diffx // 2, - diffy // 2, diffy - diffy // 2]) - - return torch.cat([x2, x1], dim=1) - - -class TV1dLoss(nn.Module): - def __init__(self): - super(TV1dLoss, self).__init__() - - def forward(self, input): - # loss = torch.mean(torch.abs(input[:, :, :, :-1] - input[:, :, :, 1:])) + \ - # torch.mean(torch.abs(input[:, :, :-1, :] - input[:, :, 1:, :])) - loss = torch.mean(torch.abs(input[:, :-1] - input[:, 1:])) - - return loss - - -class TV2dLoss(nn.Module): - def __init__(self): - super(TV2dLoss, self).__init__() - - def forward(self, input): - loss = torch.mean(torch.abs(input[:, :, :, :-1] - input[:, :, :, 1:])) + \ - torch.mean(torch.abs(input[:, :, :-1, :] - input[:, :, 1:, :])) - return loss - - -class SSIM2dLoss(nn.Module): - def __init__(self): - super(SSIM2dLoss, self).__init__() - - def forward(self, input, targer): - loss = 0 - return loss - diff --git a/app/service/image2sketch/models/networks.py b/app/service/image2sketch/models/networks.py deleted file mode 100644 index fc341c2..0000000 --- a/app/service/image2sketch/models/networks.py +++ /dev/null @@ -1,734 +0,0 @@ -import functools - -from torch.nn import init -from torch.optim import lr_scheduler - -from .layer import * - - -############################################################################### -# Helper Functions -############################################################################### - - -class Identity(nn.Module): - def forward(self, x): - return x - - -def get_norm_layer(norm_type='instance'): - """Return a normalization layer - - Parameters: - norm_type (str) -- the name of the normalization layer: batch | instance | none - - For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev). - For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics. - """ - if norm_type == 'batch': - norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True) - elif norm_type == 'instance': - norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) - elif norm_type == 'none': - def norm_layer(x): - return Identity() - else: - raise NotImplementedError('normalization layer [%s] is not found' % norm_type) - return norm_layer - - -def get_scheduler(optimizer, opt): - """Return a learning rate scheduler - - Parameters: - optimizer -- the optimizer of the network - opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.  - opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine - - For 'linear', we keep the same learning rate for the first epochs - and linearly decay the rate to zero over the next epochs. - For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers. - See https://pytorch.org/docs/stable/optim.html for more details. - """ - if opt.lr_policy == 'linear': - def lambda_rule(epoch): - lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1) - return lr_l - - scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) - elif opt.lr_policy == 'step': - scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) - elif opt.lr_policy == 'plateau': - scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) - elif opt.lr_policy == 'cosine': - scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0) - else: - return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) - return scheduler - - -def init_weights(net, init_type='normal', init_gain=0.02): - """Initialize network weights. - - Parameters: - net (network) -- network to be initialized - init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal - init_gain (float) -- scaling factor for normal, xavier and orthogonal. - - We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might - work better for some applications. Feel free to try yourself. - """ - - def init_func(m): # define the initialization function - classname = m.__class__.__name__ - if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): - if init_type == 'normal': - init.normal_(m.weight.data, 0.0, init_gain) - elif init_type == 'xavier': - init.xavier_normal_(m.weight.data, gain=init_gain) - elif init_type == 'kaiming': - init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') - elif init_type == 'orthogonal': - init.orthogonal_(m.weight.data, gain=init_gain) - else: - raise NotImplementedError('initialization method [%s] is not implemented' % init_type) - if hasattr(m, 'bias') and m.bias is not None: - init.constant_(m.bias.data, 0.0) - elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies. - init.normal_(m.weight.data, 1.0, init_gain) - init.constant_(m.bias.data, 0.0) - - print('initialize network with %s' % init_type) - net.apply(init_func) # apply the initialization function - - -def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]): - """Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights - Parameters: - net (network) -- the network to be initialized - init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal - gain (float) -- scaling factor for normal, xavier and orthogonal. - gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 - - Return an initialized network. - """ - if len(gpu_ids) > 0: - assert (torch.cuda.is_available()) - net.to(gpu_ids[0]) - net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs - init_weights(net, init_type, init_gain=init_gain) - return net - - -def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]): - net = None - norm_layer = get_norm_layer(norm_type=norm) - - if netG == 'ref_unpair_cbam_cat': - net = ref_unpair(input_nc, output_nc, ngf, norm='inorm', status='ref_unpair_cbam_cat') - elif netG == 'ref_unpair_recon': - net = ref_unpair(input_nc, output_nc, ngf, norm='inorm', status='ref_unpair_recon') - elif netG == 'triplet': - net = triplet(input_nc, output_nc, ngf, norm='inorm') - - else: - raise NotImplementedError('Generator model name [%s] is not recognized' % netG) - return init_net(net, init_type, init_gain, gpu_ids) - - -class AdaIN(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x, y): - eps = 1e-5 - mean_x = torch.mean(x, dim=[2, 3]) - mean_y = torch.mean(y, dim=[2, 3]) - - std_x = torch.std(x, dim=[2, 3]) - std_y = torch.std(y, dim=[2, 3]) - - mean_x = mean_x.unsqueeze(-1).unsqueeze(-1) - mean_y = mean_y.unsqueeze(-1).unsqueeze(-1) - - std_x = std_x.unsqueeze(-1).unsqueeze(-1) + eps - std_y = std_y.unsqueeze(-1).unsqueeze(-1) + eps - - out = (x - mean_x) / std_x * std_y + mean_y - - return out - - -class HED(nn.Module): - def __init__(self): - super(HED, self).__init__() - - self.moduleVggOne = nn.Sequential( - nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1), - nn.ReLU(inplace=False), - nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1), - nn.ReLU(inplace=False) - ) - - self.moduleVggTwo = nn.Sequential( - nn.MaxPool2d(kernel_size=2, stride=2), - nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1), - nn.ReLU(inplace=False), - nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), - nn.ReLU(inplace=False) - ) - - self.moduleVggThr = nn.Sequential( - nn.MaxPool2d(kernel_size=2, stride=2), - nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1), - nn.ReLU(inplace=False), - nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1), - nn.ReLU(inplace=False), - nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1), - nn.ReLU(inplace=False) - ) - - self.moduleVggFou = nn.Sequential( - nn.MaxPool2d(kernel_size=2, stride=2), - nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1), - nn.ReLU(inplace=False), - nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1), - nn.ReLU(inplace=False), - nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1), - nn.ReLU(inplace=False) - ) - - self.moduleVggFiv = nn.Sequential( - nn.MaxPool2d(kernel_size=2, stride=2), - nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1), - nn.ReLU(inplace=False), - nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1), - nn.ReLU(inplace=False), - nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1), - nn.ReLU(inplace=False) - ) - - self.moduleScoreOne = nn.Conv2d(in_channels=64, out_channels=1, kernel_size=1, stride=1, padding=0) - self.moduleScoreTwo = nn.Conv2d(in_channels=128, out_channels=1, kernel_size=1, stride=1, padding=0) - self.moduleScoreThr = nn.Conv2d(in_channels=256, out_channels=1, kernel_size=1, stride=1, padding=0) - self.moduleScoreFou = nn.Conv2d(in_channels=512, out_channels=1, kernel_size=1, stride=1, padding=0) - self.moduleScoreFiv = nn.Conv2d(in_channels=512, out_channels=1, kernel_size=1, stride=1, padding=0) - - self.moduleCombine = nn.Sequential( - nn.Conv2d(in_channels=5, out_channels=1, kernel_size=1, stride=1, padding=0), - nn.Sigmoid() - ) - - def forward(self, tensorInput): - tensorBlue = (tensorInput[:, 2:3, :, :] * 255.0) - 104.00698793 - tensorGreen = (tensorInput[:, 1:2, :, :] * 255.0) - 116.66876762 - tensorRed = (tensorInput[:, 0:1, :, :] * 255.0) - 122.67891434 - tensorInput = torch.cat([tensorBlue, tensorGreen, tensorRed], 1) - - tensorVggOne = self.moduleVggOne(tensorInput) - tensorVggTwo = self.moduleVggTwo(tensorVggOne) - tensorVggThr = self.moduleVggThr(tensorVggTwo) - tensorVggFou = self.moduleVggFou(tensorVggThr) - tensorVggFiv = self.moduleVggFiv(tensorVggFou) - - tensorScoreOne = self.moduleScoreOne(tensorVggOne) - tensorScoreTwo = self.moduleScoreTwo(tensorVggTwo) - tensorScoreThr = self.moduleScoreThr(tensorVggThr) - tensorScoreFou = self.moduleScoreFou(tensorVggFou) - tensorScoreFiv = self.moduleScoreFiv(tensorVggFiv) - - tensorScoreOne = nn.functional.interpolate(input=tensorScoreOne, size=(tensorInput.size(2), tensorInput.size(3)), mode='bilinear', align_corners=False) - tensorScoreTwo = nn.functional.interpolate(input=tensorScoreTwo, size=(tensorInput.size(2), tensorInput.size(3)), mode='bilinear', align_corners=False) - tensorScoreThr = nn.functional.interpolate(input=tensorScoreThr, size=(tensorInput.size(2), tensorInput.size(3)), mode='bilinear', align_corners=False) - tensorScoreFou = nn.functional.interpolate(input=tensorScoreFou, size=(tensorInput.size(2), tensorInput.size(3)), mode='bilinear', align_corners=False) - tensorScoreFiv = nn.functional.interpolate(input=tensorScoreFiv, size=(tensorInput.size(2), tensorInput.size(3)), mode='bilinear', align_corners=False) - - return self.moduleCombine(torch.cat([tensorScoreOne, tensorScoreTwo, tensorScoreThr, tensorScoreFou, tensorScoreFiv], 1)) - # return self.moduleCombine(torch.cat([ tensorScoreOne, tensorScoreTwo, tensorScoreThr, tensorScoreOne, tensorScoreTwo ], 1)) - - # return torch.sigmoid(tensorScoreOne),torch.sigmoid(tensorScoreTwo),torch.sigmoid(tensorScoreThr),torch.sigmoid(tensorScoreFou),torch.sigmoid(tensorScoreFiv),self.moduleCombine(torch.cat([ tensorScoreOne, tensorScoreTwo, tensorScoreThr, tensorScoreFou, tensorScoreFiv ], 1)) - # return torch.sigmoid(tensorScoreTwo) - - -def define_HED(init_weights_, gpu_ids_=[]): - net = HED() - - if len(gpu_ids_) > 0: - assert (torch.cuda.is_available()) - net.to(gpu_ids_[0]) - net = torch.nn.DataParallel(net, gpu_ids_) # multi-GPUs - - if not init_weights_ == None: - device = torch.device('cuda:{}'.format(gpu_ids_[0])) if gpu_ids_ else torch.device('cpu') - print('Loading model from: %s' % init_weights_) - state_dict = torch.load(init_weights_, map_location=str(device)) - if isinstance(net, torch.nn.DataParallel): - net.module.load_state_dict(state_dict) - else: - net.load_state_dict(state_dict) - print('load the weights successfully') - - return net - - -def define_styletps(init_weights_, gpu_ids_=[], shape=False): - net = None - if shape == False: - net = triplet() - if len(gpu_ids_) > 0: - assert (torch.cuda.is_available()) - net.to(gpu_ids_[0]) - net = torch.nn.DataParallel(net, gpu_ids_) # multi-GPUs - - if not init_weights_ == None: - device = torch.device('cuda:{}'.format(gpu_ids_[0])) if gpu_ids_ else torch.device('cpu') - print('Loading model from: %s' % init_weights_) - state_dict = torch.load(init_weights_, map_location=str(device)) - if isinstance(net, torch.nn.DataParallel): - net.module.load_state_dict(state_dict) - else: - net.load_state_dict(state_dict) - print('load the weights successfully') - - return net - - -class triplet(nn.Module): - def __init__(self): # mnblk=4 - super(triplet, self).__init__() - - # self.channels = nch_in - self.nch_in = 1 - self.nch_out = 1 - self.nch_ker = 64 - self.norm = 'bnorm' - # self.nblk = nblk - - if self.norm == 'bnorm': - self.bias = False - else: - self.bias = True - - self.conv0 = CNR2d(self.nch_in, self.nch_ker, kernel_size=7, stride=1, padding=3, norm=self.norm, relu=0.0) - self.conv1 = CNR2d(self.nch_ker, 2 * self.nch_ker, kernel_size=4, stride=2, padding=1, norm=self.norm, relu=0.0) - self.conv2 = CNR2d(2 * self.nch_ker, 4 * self.nch_ker, kernel_size=4, stride=2, padding=1, norm=self.norm, relu=0.0) - - self.final_pool = nn.AdaptiveAvgPool2d((1, 1)) - self.linear = nn.Linear(256, 128) - - def forward(self, x, y, z): - - x = self.conv0(x) - x = self.conv1(x) - x = self.conv2(x) - x = self.final_pool(x) - x = torch.flatten(x, 1) - x = self.linear(x) - - y = self.conv0(y) - y = self.conv1(y) - y = self.conv2(y) - y = self.final_pool(y) - y = torch.flatten(y, 1) - y = self.linear(y) - - z = self.conv0(z) - z = self.conv1(z) - z = self.conv2(z) - z = self.final_pool(z) - z = torch.flatten(z, 1) - z = self.linear(z) - - return x, y, z - - -class MLP(nn.Module): - def __init__(self, input_dim, output_dim, dim, n_blk, norm='none', activ='relu'): - super(MLP, self).__init__() - self.model = [] - self.model += [LinearBlock(input_dim, dim, norm=norm, activation=activ)] - for i in range(n_blk - 2): - self.model += [LinearBlock(dim, dim, norm=norm, activation=activ)] - self.model += [LinearBlock(dim, output_dim, norm='none', activation='none')] # no output activations - self.model = nn.Sequential(*self.model) - - def forward(self, x): - return self.model(x.view(x.size(0), -1)) - - -class ref_unpair(nn.Module): - def __init__(self, nch_in, nch_out, nch_ker=64, norm='bnorm', nblk=4, status='ref_unpair'): - super(ref_unpair, self).__init__() - - nch_ker = 64 - # self.channels = nch_in - self.nch_in = nch_in - self.nchs_in = 1 - self.status = status - - if self.status == 'ref_unpair_recon': - self.nch_out = 3 - self.nch_in = 1 - else: - self.nch_out = 1 - - self.nch_ker = nch_ker - self.norm = norm - self.nblk = nblk - self.dec0 = [] - - if status == 'ref_unpair_cbam_cat': - self.cbam_c = CBAM(nch_ker * 8, 16, 3, cbam_status="channel") - self.cbam_s = CBAM(nch_ker * 8, 16, 3, cbam_status="spatial") - - self.enc1_s = CNR2d(self.nchs_in, self.nch_ker, kernel_size=7, stride=1, padding=3, norm=self.norm, relu=0.0) - self.enc2_s = CNR2d(self.nch_ker, 2 * self.nch_ker, kernel_size=4, stride=2, padding=1, norm=self.norm, relu=0.0) - self.enc3_s = CNR2d(2 * self.nch_ker, 4 * self.nch_ker, kernel_size=4, stride=2, padding=1, norm=self.norm, relu=0.0) - self.enc4_s = CNR2d(4 * self.nch_ker, 8 * self.nch_ker, kernel_size=4, stride=2, padding=1, norm=self.norm, relu=0.0) - - if norm == 'bnorm': - self.bias = False - else: - self.bias = True - - self.enc1_c = CNR2d(self.nch_in, self.nch_ker, kernel_size=7, stride=1, padding=3, norm=self.norm, relu=0.0) - self.enc2_c = CNR2d(self.nch_ker, 2 * self.nch_ker, kernel_size=4, stride=2, padding=1, norm=self.norm, relu=0.0) - self.enc3_c = CNR2d(2 * self.nch_ker, 4 * self.nch_ker, kernel_size=4, stride=2, padding=1, norm=self.norm, relu=0.0) - self.enc4_c = CNR2d(4 * self.nch_ker, 8 * self.nch_ker, kernel_size=4, stride=2, padding=1, norm=self.norm, relu=0.0) - - if status == 'ref_unpair_cbam_cat': - self.res_cat1 = ResBlock_cat(8 * self.nch_ker, 8 * self.nch_ker, kernel_size=3, stride=1, padding=1, norm=self.norm, relu=0.0, padding_mode='reflection') - self.res_cat2 = ResBlock_cat(8 * self.nch_ker, 8 * self.nch_ker, kernel_size=3, stride=1, padding=1, norm=self.norm, relu=0.0, padding_mode='reflection') - self.res_cat3 = ResBlock_cat(8 * self.nch_ker, 8 * self.nch_ker, kernel_size=3, stride=1, padding=1, norm=self.norm, relu=0.0, padding_mode='reflection') - self.res_cat4 = ResBlock_cat(8 * self.nch_ker, 8 * self.nch_ker, kernel_size=3, stride=1, padding=1, norm=self.norm, relu=0.0, padding_mode='reflection') - - if self.nblk and status != 'ref_unpair_cbam_cat': - res = [] - for i in range(self.nblk): - res += [ResBlock(8 * self.nch_ker, 8 * self.nch_ker, kernel_size=3, stride=1, padding=1, norm=self.norm, relu=0.0, padding_mode='reflection')] - self.res1 = nn.Sequential(*res) - - # self.dec0 += [DECNR2d(16 * self.nch_ker, 8 * self.nch_ker, kernel_size=4, stride=2, padding=1, norm=self.norm, relu=0.0)] - self.dec0 += [DECNR2d(8 * self.nch_ker, 4 * self.nch_ker, kernel_size=4, stride=2, padding=1, norm=self.norm, relu=0.0)] - self.dec0 += [DECNR2d(4 * self.nch_ker, 2 * self.nch_ker, kernel_size=4, stride=2, padding=1, norm=self.norm, relu=0.0)] - self.dec0 += [DECNR2d(2 * self.nch_ker, 1 * self.nch_ker, kernel_size=4, stride=2, padding=1, norm=self.norm, relu=0.0)] - self.dec0 += [DECNR2d(1 * self.nch_ker, 1 * self.nch_ker, kernel_size=7, stride=1, padding=3, norm=self.norm, relu=0.0)] - self.dec0 += [nn.Conv2d(1 * self.nch_ker, self.nch_out, kernel_size=3, stride=1, padding=1)] - - self.dec = nn.Sequential(*self.dec0) - - def forward(self, content, style): - - content_cs = self.enc1_c(content) - content_cs = self.enc2_c(content_cs) - content_cs = self.enc3_c(content_cs) - content_cs = self.enc4_c(content_cs) - # content_cs = self.enc5_c(content_cs) - - if self.status == 'ref_unpair_cbam_cat': - cbam_content_cs = self.cbam_s(content_cs) - sp_content_cs = content_cs + cbam_content_cs - - style_cs = self.enc1_s(style) - style_cs = self.enc2_s(style_cs) - style_cs = self.enc3_s(style_cs) - style_cs = self.enc4_s(style_cs) - - cbam_style_cs = self.cbam_c(style_cs) - ch_style_cs = style_cs + cbam_style_cs - - content_output = self.adaptive_instance_normalization(content_cs, style_cs) - cbam_content_output = self.adaptive_instance_normalization(sp_content_cs, ch_style_cs) - - content_output = self.res_cat1(content_output, cbam_content_output) - content_output = self.res_cat2(content_output, cbam_content_output) - content_output = self.res_cat3(content_output, cbam_content_output) - content_output = self.res_cat4(content_output, cbam_content_output) - - - else: - content_output = content_cs - - if self.nblk and self.status != 'ref_unpair_cbam_cat': - content_cs = self.res1(content_output) - - content_output = self.dec(content_output) - - content_output = torch.tanh(content_output) - - return content_output - - def calc_mean_std(self, feat, eps=1e-5): - # eps is a small value added to the variance to avoid divide-by-zero. - size = feat.size() - assert (len(size) == 4) - N, C = size[:2] - feat_var = feat.view(N, C, -1).var(dim=2) + eps - feat_std = feat_var.sqrt().view(N, C, 1, 1) - feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1) - return feat_mean, feat_std - - def adaptive_instance_normalization(self, content_feat, style_feat): - assert (content_feat.size()[:2] == style_feat.size()[:2]) - size = content_feat.size() - style_mean, style_std = self.calc_mean_std(style_feat) - content_mean, content_std = self.calc_mean_std(content_feat) - - normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(size) - return normalized_feat * style_std.expand(size) + style_mean.expand(size) - - -def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]): - net = None - norm_layer = get_norm_layer(norm_type=norm) - - if netD == 'basic': # default PatchGAN classifier - net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer) - elif netD == 'n_layers': # more options - net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer) - elif netD == 'pixel': # classify if each pixel is real or fake - net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer) - else: - raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD) - return init_net(net, init_type, init_gain, gpu_ids) - - -############################################################################## -# Classes -############################################################################## -class GANLoss(nn.Module): - """Define different GAN objectives. - - The GANLoss class abstracts away the need to create the target label tensor - that has the same size as the input. - """ - - def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0): - """ Initialize the GANLoss class. - - Parameters: - gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp. - target_real_label (bool) - - label for a real image - target_fake_label (bool) - - label of a fake image - - Note: Do not use sigmoid as the last layer of Discriminator. - LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss. - """ - super(GANLoss, self).__init__() - self.register_buffer('real_label', torch.tensor(target_real_label)) - self.register_buffer('fake_label', torch.tensor(target_fake_label)) - self.gan_mode = gan_mode - if gan_mode == 'lsgan': - self.loss = nn.MSELoss() - elif gan_mode == 'vanilla': - self.loss = nn.BCEWithLogitsLoss() - elif gan_mode in ['wgangp']: - self.loss = None - else: - raise NotImplementedError('gan mode %s not implemented' % gan_mode) - - def get_target_tensor(self, prediction, target_is_real): - if target_is_real: - target_tensor = self.real_label - else: - target_tensor = self.fake_label - return target_tensor.expand_as(prediction) - - def __call__(self, prediction, target_is_real): - if self.gan_mode in ['lsgan', 'vanilla']: - target_tensor = self.get_target_tensor(prediction, target_is_real) - loss = self.loss(prediction, target_tensor) - elif self.gan_mode == 'wgangp': - if target_is_real: - loss = -prediction.mean() - else: - loss = prediction.mean() - return loss - - -def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0): - if lambda_gp > 0.0: - if type == 'real': # either use real images, fake images, or a linear interpolation of two. - interpolatesv = real_data - elif type == 'fake': - interpolatesv = fake_data - elif type == 'mixed': - alpha = torch.rand(real_data.shape[0], 1, device=device) - alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape) - interpolatesv = alpha * real_data + ((1 - alpha) * fake_data) - else: - raise NotImplementedError('{} not implemented'.format(type)) - interpolatesv.requires_grad_(True) - disc_interpolates = netD(interpolatesv) - gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv, - grad_outputs=torch.ones(disc_interpolates.size()).to(device), - create_graph=True, retain_graph=True, only_inputs=True) - gradients = gradients[0].view(real_data.size(0), -1) # flat the data - gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps - return gradient_penalty, gradients - else: - return 0.0, None - - -class NLayerDiscriminator(nn.Module): - """Defines a PatchGAN discriminator""" - - def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d): - """Construct a PatchGAN discriminator - - Parameters: - input_nc (int) -- the number of channels in input images - ndf (int) -- the number of filters in the last conv layer - n_layers (int) -- the number of conv layers in the discriminator - norm_layer -- normalization layer - """ - super(NLayerDiscriminator, self).__init__() - if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters - use_bias = norm_layer.func == nn.InstanceNorm2d - else: - use_bias = norm_layer == nn.InstanceNorm2d - kw = 4 - padw = 1 - sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)] - nf_mult = 1 - nf_mult_prev = 1 - for n in range(1, n_layers): # gradually increase the number of filters - nf_mult_prev = nf_mult - nf_mult = min(2 ** n, 8) - sequence += [ - nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), - norm_layer(ndf * nf_mult), - nn.LeakyReLU(0.2, True) - ] - - nf_mult_prev = nf_mult - nf_mult = min(2 ** n_layers, 8) - sequence += [ - nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), - norm_layer(ndf * nf_mult), - nn.LeakyReLU(0.2, True) - ] - - sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map - self.model = nn.Sequential(*sequence) - - def forward(self, input): - """Standard forward.""" - return self.model(input) - - -class PixelDiscriminator(nn.Module): - """Defines a 1x1 PatchGAN discriminator (pixelGAN)""" - - def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d): - """Construct a 1x1 PatchGAN discriminator - - Parameters: - input_nc (int) -- the number of channels in input images - ndf (int) -- the number of filters in the last conv layer - norm_layer -- normalization layer - """ - super(PixelDiscriminator, self).__init__() - if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters - use_bias = norm_layer.func == nn.InstanceNorm2d - else: - use_bias = norm_layer == nn.InstanceNorm2d - - self.net = [ - nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0), - nn.LeakyReLU(0.2, True), - nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias), - norm_layer(ndf * 2), - nn.LeakyReLU(0.2, True), - nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)] - - self.net = nn.Sequential(*self.net) - - def forward(self, input): - """Standard forward.""" - return self.net(input) - - -class CBAM(nn.Module): - def __init__(self, n_channels_in, reduction_ratio, kernel_size, cbam_status): - super(CBAM, self).__init__() - self.n_channels_in = n_channels_in - self.reduction_ratio = reduction_ratio - self.kernel_size = kernel_size - self.channel_attention = ChannelAttention_nopara(n_channels_in, reduction_ratio) - self.spatial_attention = SpatialAttention_nopara(kernel_size) - self.status = cbam_status - - def forward(self, x): - ## We don't use cbam in this version - if self.status == "cbam": - chan_att = self.channel_attention(x) - fp = chan_att * x - spat_att = self.spatial_attention(fp) - fpp = spat_att * fp - - if self.status == "spatial": - spat_att = self.spatial_attention(x) # * s_para_1d - fpp = spat_att * x - if self.status == "channel": - chan_att = self.channel_attention(x) # * c_para_1d - fpp = chan_att * x - - return fpp # ,c_wgt,s_wgt - - -class SpatialAttention_nopara(nn.Module): - def __init__(self, kernel_size): - super(SpatialAttention_nopara, self).__init__() - self.kernel_size = kernel_size - assert kernel_size % 2 == 1, "Odd kernel size required" - self.conv = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=kernel_size, padding=int((kernel_size - 1) / 2)) - - def forward(self, x): - max_pool = self.agg_channel(x, "max") - avg_pool = self.agg_channel(x, "avg") - pool = torch.cat([max_pool, avg_pool], dim=1) - conv = self.conv(pool) - conv = conv.repeat(1, x.size()[1], 1, 1) - att = torch.sigmoid(conv) - return att - - def agg_channel(self, x, pool="max"): - b, c, h, w = x.size() - x = x.view(b, c, h * w) - x = x.permute(0, 2, 1) - if pool == "max": - x = F.max_pool1d(x, c) - elif pool == "avg": - x = F.avg_pool1d(x, c) - x = x.permute(0, 2, 1) - x = x.view(b, 1, h, w) - return x - - -class ChannelAttention_nopara(nn.Module): - def __init__(self, n_channels_in, reduction_ratio): - super(ChannelAttention_nopara, self).__init__() - self.n_channels_in = n_channels_in - self.reduction_ratio = reduction_ratio - self.middle_layer_size = int(self.n_channels_in / float(self.reduction_ratio)) - self.bottleneck = nn.Sequential( - nn.Linear(self.n_channels_in, self.middle_layer_size), - nn.ReLU(), - nn.Linear(self.middle_layer_size, self.n_channels_in) - ) - - def forward(self, x): - kernel = (x.size()[2], x.size()[3]) - avg_pool = F.avg_pool2d(x, kernel) - max_pool = F.max_pool2d(x, kernel) - avg_pool = avg_pool.view(avg_pool.size()[0], -1) - max_pool = max_pool.view(max_pool.size()[0], -1) - avg_pool_bck = self.bottleneck(avg_pool) - max_pool_bck = self.bottleneck(max_pool) - pool_sum = avg_pool_bck + max_pool_bck - sig_pool = torch.sigmoid(pool_sum) - sig_pool = sig_pool.unsqueeze(2).unsqueeze(3) - # out = sig_pool.repeat(1,1,kernel[0], kernel[1]) - - return sig_pool diff --git a/app/service/image2sketch/models/perceptual.py b/app/service/image2sketch/models/perceptual.py deleted file mode 100644 index 666fab8..0000000 --- a/app/service/image2sketch/models/perceptual.py +++ /dev/null @@ -1,86 +0,0 @@ -import torch -import torchvision - -class VGGPerceptualLoss(torch.nn.Module): - def __init__(self, resize=True): - super(VGGPerceptualLoss, self).__init__() - blocks = [] - blocks.append(torchvision.models.vgg16(pretrained=True).features[:4].eval()) - blocks.append(torchvision.models.vgg16(pretrained=True).features[4:9].eval()) - blocks.append(torchvision.models.vgg16(pretrained=True).features[9:16].eval()) - blocks.append(torchvision.models.vgg16(pretrained=True).features[16:23].eval()) - for bl in blocks: - for p in bl: - p.requires_grad = False - self.blocks = torch.nn.ModuleList(blocks) - self.transform = torch.nn.functional.interpolate - self.mean = torch.nn.Parameter(torch.tensor([0.485, 0.456, 0.406]).view(1,3,1,1)) - self.std = torch.nn.Parameter(torch.tensor([0.229, 0.224, 0.225]).view(1,3,1,1)) - self.resize = resize - - def forward(self, input, target, feature_layers=[0, 1, 2, 3], style_layers=[]): - if input.shape[1] != 3: - input = input.repeat(1, 3, 1, 1) - target = target.repeat(1, 3, 1, 1) - input = (input-self.mean) / self.std - target = (target-self.mean) / self.std - if self.resize: - input = self.transform(input, mode='bilinear', size=(224, 224), align_corners=False) - target = self.transform(target, mode='bilinear', size=(224, 224), align_corners=False) - loss = 0.0 - x = input - y = target - for i, block in enumerate(self.blocks): - x = block(x) - y = block(y) - if i in feature_layers: - loss += torch.nn.functional.l1_loss(x, y) - if i in style_layers: - act_x = x.reshape(x.shape[0], x.shape[1], -1) - act_y = y.reshape(y.shape[0], y.shape[1], -1) - gram_x = act_x @ act_x.permute(0, 2, 1) - gram_y = act_y @ act_y.permute(0, 2, 1) - loss += torch.nn.functional.l1_loss(gram_x, gram_y) - return loss - -class VGGstyleLoss(torch.nn.Module): - def __init__(self, resize=True): - super(VGGstyleLoss, self).__init__() - blocks = [] - blocks.append(torchvision.models.vgg16(pretrained=True).features[:4].eval()) - blocks.append(torchvision.models.vgg16(pretrained=True).features[4:9].eval()) - blocks.append(torchvision.models.vgg16(pretrained=True).features[9:16].eval()) - blocks.append(torchvision.models.vgg16(pretrained=True).features[16:23].eval()) - for bl in blocks: - for p in bl: - p.requires_grad = False - self.blocks = torch.nn.ModuleList(blocks) - self.transform = torch.nn.functional.interpolate - self.mean = torch.nn.Parameter(torch.tensor([0.485, 0.456, 0.406]).view(1,3,1,1)) - self.std = torch.nn.Parameter(torch.tensor([0.229, 0.224, 0.225]).view(1,3,1,1)) - self.resize = resize - - def forward(self, input, target, feature_layers=[0,1,2,3], style_layers=[]): - if input.shape[1] != 3: - input = input.repeat(1, 3, 1, 1) - target = target.repeat(1, 3, 1, 1) - input = (input-self.mean) / self.std - target = (target-self.mean) / self.std - if self.resize: - input = self.transform(input, mode='bilinear', size=(224, 224), align_corners=False) - target = self.transform(target, mode='bilinear', size=(224, 224), align_corners=False) - loss = 0.0 - x = input - y = target - for i, block in enumerate(self.blocks): - x = block(x) - y = block(y) - if i in feature_layers: - loss += torch.nn.functional.l1_loss(x, y) - if i in style_layers: - act_x = x.reshape(x.shape[0], x.shape[1], -1) - act_y = y.reshape(y.shape[0], y.shape[1], -1) - gram_x = act_x @ act_x.permute(0, 2, 1) - gram_y = act_y @ act_y.permute(0, 2, 1) - loss += torch.nn.functional.l1_loss(gram_x, gram_y) - return loss diff --git a/app/service/image2sketch/models/template_model.py b/app/service/image2sketch/models/template_model.py deleted file mode 100644 index 45c68b2..0000000 --- a/app/service/image2sketch/models/template_model.py +++ /dev/null @@ -1,82 +0,0 @@ -import torch -from .base_model import BaseModel -from . import networks - - -class TemplateModel(BaseModel): - @staticmethod - def modify_commandline_options(parser, is_train=True): - """Add new model-specific options and rewrite default values for existing options. - - Parameters: - parser -- the option parser - is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options. - - Returns: - the modified parser. - """ - parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset. - if is_train: - parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model. - - return parser - - def __init__(self, opt): - """Initialize this model class. - - Parameters: - opt -- training/test options - - A few things can be done here. - - (required) call the initialization function of BaseModel - - define loss function, visualization images, model names, and optimizers - """ - BaseModel.__init__(self, opt) # call the initialization method of BaseModel - # specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk. - self.loss_names = ['loss_G'] - # specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images. - self.visual_names = ['data_A', 'data_B', 'output'] - # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks. - # you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them. - self.model_names = ['G'] - # define networks; you can use opt.isTrain to specify different behaviors for training and test. - self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids) - if self.isTrain: # only defined during training time - # define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss. - # We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device) - self.criterionLoss = torch.nn.L1Loss() - # define and initialize optimizers. You can define one optimizer for each network. - # If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. - self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) - self.optimizers = [self.optimizer] - - # Our program will automatically call to define schedulers, load networks, and print networks - - def set_input(self, input): - """Unpack input data from the dataloader and perform necessary pre-processing steps. - - Parameters: - input: a dictionary that contains the data itself and its metadata information. - """ - AtoB = self.opt.direction == 'AtoB' # use to swap data_A and data_B - self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A - self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B - self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths - - def forward(self): - """Run forward pass. This will be called by both functions and .""" - self.output = self.netG(self.data_A) # generate output image given the input data_A - - def backward(self): - """Calculate losses, gradients, and update network weights; called in every training iteration""" - # caculate the intermediate results if necessary; here self.output has been computed during function - # calculate loss given the input and intermediate results - self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression - self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G - - def optimize_parameters(self): - """Update network weights; it will be called in every training iteration.""" - self.forward() # first call forward to calculate intermediate results - self.optimizer.zero_grad() # clear network G's existing gradients - self.backward() # calculate gradients for network G - self.optimizer.step() # update gradients for network G diff --git a/app/service/image2sketch/models/test_model.py b/app/service/image2sketch/models/test_model.py deleted file mode 100644 index 2f70821..0000000 --- a/app/service/image2sketch/models/test_model.py +++ /dev/null @@ -1,45 +0,0 @@ -from .base_model import BaseModel -from . import networks - - -class TestModel(BaseModel): - """ This TesteModel can be used to generate CycleGAN results for only one direction. - This model will automatically set '--dataset_mode single', which only loads the images from one collection. - - See the test instruction for more details. - """ - @staticmethod - def modify_commandline_options(parser, is_train=True): - assert not is_train, 'TestModel cannot be used during training time' - parser.set_defaults(dataset_mode='single') - parser.add_argument('--model_suffix', type=str, default='', help='In checkpoints_dir, [epoch]_net_G[model_suffix].pth will be loaded as the generator.') - - return parser - - def __init__(self, opt): - assert(not opt.isTrain) - BaseModel.__init__(self, opt) - # specify the training losses you want to print out. The training/test scripts will call - self.loss_names = [] - # specify the images you want to save/display. The training/test scripts will call - self.visual_names = ['real', 'fake'] - # specify the models you want to save to the disk. The training/test scripts will call and - self.model_names = ['G' + opt.model_suffix] # only generator is needed. - self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, - opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) - - # assigns the model to self.netG_[suffix] so that it can be loaded - # please see - setattr(self, 'netG' + opt.model_suffix, self.netG) # store netG in self. - - def set_input(self, input): - self.real = input['A'].to(self.device) - self.image_paths = input['A_paths'] - - def forward(self): - """Run forward pass.""" - self.fake = self.netG(self.real) # G(real) - - def optimize_parameters(self): - """No optimization for test model.""" - pass diff --git a/app/service/image2sketch/models/triplet_model.py b/app/service/image2sketch/models/triplet_model.py deleted file mode 100644 index a667d49..0000000 --- a/app/service/image2sketch/models/triplet_model.py +++ /dev/null @@ -1,68 +0,0 @@ -import torch -from .base_model import BaseModel -from . import networks -from util.image_pool import ImagePool - - -class TripletModel(BaseModel): - - @staticmethod - def modify_commandline_options(parser, is_train=True): - parser.set_defaults(norm='batch', netG='triplet', dataset_mode='triplet') - if is_train: - parser.set_defaults(pool_size=0, gan_mode='vanilla') - parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss') - - return parser - - def __init__(self, opt): - - BaseModel.__init__(self, opt) - - self.loss_names = ['G_triplet'] - self.visual_names = ['x','y'] - - if self.isTrain: - self.model_names = ['G'] - else: - self.model_names = ['G'] - self.netG = networks.define_G(1, 1, opt.ngf, opt.netG, opt.norm, - not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) - - - if self.isTrain: - self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images - self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images - - self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) - self.criterionL1 = torch.nn.L1Loss() - - self.triplet = torch.nn.TripletMarginLoss(margin=3.0) - self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) - self.optimizers.append(self.optimizer_G) - - def set_input(self, input): - AtoB = self.opt.direction == 'AtoB' - self.real_A = input['A' if AtoB else 'B'].to(self.device) - self.real_B = input['B' if AtoB else 'A'].to(self.device) - self.real_C = input['C'].to(self.device) - - self.image_paths = input['A_paths' if AtoB else 'B_paths'] - - - - def forward(self): - self.x,self.y,self.z = self.netG(self.real_A,self.real_B,self.real_C) - - - def backward_G(self): - self.loss_G_triplet_1 = self.triplet(self.x,self.y,self.z) - self.loss_G_triplet = self.loss_G_triplet_1 - - self.loss_G = self.loss_G_triplet - self.loss_G.backward() - - def optimize_parameters(self): - self.optimizer_G.zero_grad() - self.backward_G() - self.optimizer_G.step() diff --git a/app/service/image2sketch/models/unpaired_model.py b/app/service/image2sketch/models/unpaired_model.py deleted file mode 100644 index 9c043ca..0000000 --- a/app/service/image2sketch/models/unpaired_model.py +++ /dev/null @@ -1,144 +0,0 @@ -import torch - -from . import networks -from .base_model import BaseModel -from .perceptual import VGGPerceptualLoss -from ..util.image_pool import ImagePool - - -class UnpairedModel(BaseModel): - - @staticmethod - def modify_commandline_options(parser, is_train=True): - parser.set_defaults(norm='batch', netG='ref_unpair_cbam_cat', netG2='ref_unpair_recon', dataset_mode='unaligned') - if is_train: - parser.set_defaults(pool_size=0, gan_mode='vanilla') - parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss') - - return parser - - def __init__(self, opt): - BaseModel.__init__(self, opt) - # specify the training losses you want to print out. The training/test scripts will call - self.loss_names = ['G_GAN', 'G_L1_1', 'G_Rec', 'G_line', 'D_real', 'D_fake'] - self.visual_names = ['real_A', 'content_output', 'real_B'] - - if self.isTrain: - self.model_names = ['G_A', 'G_B', 'D'] - else: # during test time, only load G - self.model_names = ['G_A', 'G_B'] - # define networks (both generator and discriminator) - self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm, - not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) - self.netG_B = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG2, opt.norm, - not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) - - if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc - self.netD = networks.define_D(1, opt.ndf, opt.netD, - opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) - self.styletps = networks.define_styletps(init_weights_='./checkpoints/contrastive_pretrained.pth', gpu_ids_=self.gpu_ids, shape=False) - self.HED = networks.define_HED(init_weights_='./checkpoints/network-bsds500.pytorch', gpu_ids_=self.gpu_ids) - - if self.isTrain: # define discriminators - self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD, - opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) - self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD, - opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) - - if self.isTrain: - self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images - self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images - # define loss functions - self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) - self.criterionL1_1 = torch.nn.L1Loss() - self.criterionL1_2 = torch.nn.L1Loss() - self.criterionL1_3 = torch.nn.L1Loss() - self.per_loss_1 = VGGPerceptualLoss().to(self.device) - self.per_loss_2 = VGGPerceptualLoss().to(self.device) - self.per_loss_3 = VGGPerceptualLoss().to(self.device) - - self.optimizer_GA = torch.optim.Adam(self.netG_A.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) - self.optimizer_GB = torch.optim.Adam(self.netG_B.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) - - self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) - self.optimizers.append(self.optimizer_GA) - self.optimizers.append(self.optimizer_GB) - - self.optimizers.append(self.optimizer_D) - - def set_input(self, input): - """Unpack input data from the dataloader and perform necessary pre-processing steps. - - Parameters: - input (dict): include the data itself and its metadata information. - - The option 'direction' can be used to swap images in domain A and domain B. - """ - AtoB = self.opt.direction == 'AtoB' - self.real_A = input['A' if AtoB else 'B'].to(self.device) - self.real_B = input['B' if AtoB else 'A'].to(self.device) - # self.image_paths = input['A_paths' if AtoB else 'B_paths'] - - def forward(self): - """Run forward pass; called by both functions and .""" - self.content_output = self.netG_A(self.real_A, self.real_B) - self.rec_output = self.netG_B(self.content_output, self.content_output) - - def update_process(self, epoch, total_epoch): - self.epoch_count = epoch - self.epoch_count_total = total_epoch - - def backward_D(self): - """Calculate GAN loss for the discriminator - - Parameters: - netD (network) -- the discriminator D - real (tensor array) -- real images - fake (tensor array) -- images generated by a generator - - Return the discriminator loss. - We also call loss_D.backward() to calculate the gradients. - """ - # Real - pred_real = self.netD(self.real_B) - self.loss_D_real = self.criterionGAN(pred_real, True) - # Fake - pred_fake = self.netD(self.content_output.detach()) - self.loss_D_fake = self.criterionGAN(pred_fake, False) - # Combined loss and calculate gradients - loss_D = (self.loss_D_real + self.loss_D_fake) * 0.5 - loss_D.backward() - return loss_D - - def backward_G(self): - """Calculate GAN and L1 loss for the generator""" - - pred_fake = self.netD(self.content_output) - self.loss_G_GAN = self.criterionGAN(pred_fake, True) - - self.content_output_line = self.HED(self.real_A) - self.rec_output_line = self.HED(self.rec_output) - self.t1, self.t2, _ = self.styletps(self.content_output, self.real_B, self.real_B) - - decay_lambda = 5 - ((self.epoch_count * 4.5) / self.epoch_count_total) - self.loss_G_L1_1 = self.criterionL1_1(self.t1, self.t2) * 10 - self.loss_G_Rec = self.per_loss_2(self.real_A, self.rec_output) * decay_lambda - self.loss_G_line = self.per_loss_3(self.content_output_line, self.rec_output_line) * decay_lambda - - self.loss_G = self.loss_G_GAN + self.loss_G_L1_1 + self.loss_G_Rec + self.loss_G_line - self.loss_G.backward() - - def optimize_parameters(self): - self.forward() # compute fake images: G(A) - # update D - self.set_requires_grad(self.netD, True) # enable backprop for D - self.optimizer_D.zero_grad() # set D's gradients to zero - self.backward_D() # calculate gradients for backward_D_unsuper - self.optimizer_D.step() # update D's weights - # update G - self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G - self.optimizer_GA.zero_grad() # set G's gradients to zero - self.optimizer_GB.zero_grad() # set G's gradients to zero - self.backward_G() # calculate graidents for G - self.optimizer_GA.step() # udpate G's weights - self.optimizer_GB.step() # udpate G's weights diff --git a/app/service/image2sketch/opt.py b/app/service/image2sketch/opt.py deleted file mode 100644 index eb453fb..0000000 --- a/app/service/image2sketch/opt.py +++ /dev/null @@ -1,57 +0,0 @@ -from app.core.config import DEBUG - - -class Config: - def __init__(self): - # 基本参数 - self.dataroot = "app/service/image2sketch/datasets/ref_unpair" - self.name = 'semi_unpair' - self.gpu_ids = [0] - # 模型参数 - self.model = 'unpaired' - self.input_nc = 3 - self.output_nc = 3 - self.ngf = 64 - self.ndf = 64 - self.netD = 'basic' - self.netG = 'ref_unpair_cbam_cat' - self.netG2 = 'ref_unpair_recon' - self.n_layers_D = 3 - self.norm = 'instance' - self.init_type = 'normal' - self.init_gain = 0.02 - self.no_dropout = False # 对应 `--no_dropout` - # 数据集参数 - self.dataset_mode = 'single' - self.direction = 'AtoB' - self.serial_batches = True # 对应 `--serial_batches` - self.num_threads = 4 - self.batch_size = 4 - self.load_size = 512 - self.crop_size = 512 - self.max_dataset_size = float("inf") - self.preprocess = 'resize_and_crop' - self.no_flip = False # 对应 `--no_flip` - self.display_winsize = 256 - # 额外参数 - self.epoch = '100' - self.load_iter = 0 - self.verbose = False # 对应 `--verbose` - self.suffix = '' - self.isTrain = False - self.results_dir = 'service/image2sketch/results' - self.aspect_ratio = 1.0 - self.phase = 'test' - self.eval = False - self.num_test = 1000 - self.morm = 'batch' - if DEBUG: - self.style_image1 = "service/image2sketch/datasets/ref_unpair/testC/style_1.jpg" - self.style_image2 = "service/image2sketch/datasets/ref_unpair/testC/style_2.jpeg" - self.style_image3 = "service/image2sketch/datasets/ref_unpair/testC/style_3.png" - self.checkpoints_dir = 'service/image2sketch/checkpoints/' - else: - self.checkpoints_dir = 'app/service/image2sketch/checkpoints/' - self.style_image1 = "app/service/image2sketch/datasets/ref_unpair/testC/style_1.jpg" - self.style_image2 = "app/service/image2sketch/datasets/ref_unpair/testC/style_2.jpeg" - self.style_image3 = "app/service/image2sketch/datasets/ref_unpair/testC/style_3.png" diff --git a/app/service/image2sketch/server.py b/app/service/image2sketch/server.py deleted file mode 100644 index 3094eea..0000000 --- a/app/service/image2sketch/server.py +++ /dev/null @@ -1,88 +0,0 @@ -import logging - -import cv2 -import numpy as np -import torch -import torchvision.transforms as transforms -from PIL import Image - -from app.schemas.image2sketch import Image2SketchModel -from app.service.image2sketch.infer import tensor2im -from app.service.image2sketch.models import create_model -from app.service.image2sketch.opt import Config -from app.service.utils.oss_client import oss_get_image, oss_upload_image - -logger = logging.getLogger() - - -def tensor2im(input_image, imtype=np.uint8): - if not isinstance(input_image, np.ndarray): - if isinstance(input_image, torch.Tensor): # get the data from a variable - image_tensor = input_image.data - else: - return input_image - image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array - if image_numpy.shape[0] == 1: # grayscale to RGB - image_numpy = np.tile(image_numpy, (3, 1, 1)) - image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling - else: # if it is a numpy array, do nothing - image_numpy = input_image - return image_numpy.astype(imtype) - - -class Image2SketchServer: - def __init__(self, request_data): - self.image_url = request_data.image_url - self.style_image_url = request_data.style_image_url - self.sketch_bucket = request_data.sketch_bucket - self.sketch_name = request_data.sketch_name - self.opt = Config() - self.opt.num_threads = 0 # test code only supports num_threads = 0 - self.opt.batch_size = 1 # test code only supports batch_size = 1 - self.opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed. - self.opt.no_flip = True # no flip; comment this line if results on flipped images are needed. - self.opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file. - self.data = {} - device = torch.device("cuda:0") - self.model = create_model(self.opt) - self.model.setup(self.opt) - transform_list = [transforms.ToTensor(), transforms.Normalize([0.5], [0.5])] - transform = transforms.Compose(transform_list) - if request_data.default_style == "1": - style_img = Image.open(self.opt.style_image1).convert('L') - elif request_data.default_style == "2": - style_img = Image.open(self.opt.style_image2).convert('L') - elif request_data.default_style == "3": - style_img = Image.open(self.opt.style_image3).convert('L') - else: - style_img = oss_get_image(bucket=self.style_image_url.split('/')[0], object_name=self.style_image_url[self.style_image_url.find('/') + 1:], data_type="PIL") - style_img = style_img.convert('L') - style_img = transform(style_img) - self.data['B'] = style_img - self.data['B'] = self.data['B'].unsqueeze(0).to(device) - A, self.width, self.height = self.get_image(self.image_url) - self.data['A'] = transform(A) - self.data['A'] = self.data['A'].unsqueeze(0).to(device) - - def get_result(self): - self.model.set_input(self.data) - self.model.test() # run inference - visuals = self.model.get_current_visuals() # get image results - image_numpy = tensor2im(visuals['content_output'].cpu()) - image_bytes = cv2.imencode(".jpg", image_numpy)[1].tobytes() - req = oss_upload_image(bucket=self.sketch_bucket, object_name=self.sketch_name, image_bytes=image_bytes) - return f"{req.bucket_name}/{req.object_name}" - - def get_image(self, image_url): - image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL") - image = image.convert('RGB') - width = image.size[0] - height = image.size[1] - return image, width, height - - -if __name__ == '__main__': - data = Image2SketchModel(image_url="test/real_Dress_790b2c6e370644e134df7abdfe7e54d9.jpg_Img.jpg", sketch_bucket="test", sketch_name="test123.jpg") - server = Image2SketchServer(data) - sketch_url = server.get_result() - print(sketch_url) diff --git a/app/service/image2sketch/util/__init__.py b/app/service/image2sketch/util/__init__.py deleted file mode 100644 index ae36f63..0000000 --- a/app/service/image2sketch/util/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""This package includes a miscellaneous collection of useful helper functions.""" diff --git a/app/service/image2sketch/util/get_data.py b/app/service/image2sketch/util/get_data.py deleted file mode 100644 index 97edc3c..0000000 --- a/app/service/image2sketch/util/get_data.py +++ /dev/null @@ -1,110 +0,0 @@ -from __future__ import print_function -import os -import tarfile -import requests -from warnings import warn -from zipfile import ZipFile -from bs4 import BeautifulSoup -from os.path import abspath, isdir, join, basename - - -class GetData(object): - """A Python script for downloading CycleGAN or pix2pix datasets. - - Parameters: - technique (str) -- One of: 'cyclegan' or 'pix2pix'. - verbose (bool) -- If True, print additional information. - - Examples: - >>> from util.get_data import GetData - >>> gd = GetData(technique='cyclegan') - >>> new_data_path = gd.get(save_path='./datasets') # options will be displayed. - - Alternatively, You can use bash scripts: 'scripts/download_pix2pix_model.sh' - and 'scripts/download_cyclegan_model.sh'. - """ - - def __init__(self, technique='cyclegan', verbose=True): - url_dict = { - 'pix2pix': 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/', - 'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets' - } - self.url = url_dict.get(technique.lower()) - self._verbose = verbose - - def _print(self, text): - if self._verbose: - print(text) - - @staticmethod - def _get_options(r): - soup = BeautifulSoup(r.text, 'lxml') - options = [h.text for h in soup.find_all('a', href=True) - if h.text.endswith(('.zip', 'tar.gz'))] - return options - - def _present_options(self): - r = requests.get(self.url) - options = self._get_options(r) - print('Options:\n') - for i, o in enumerate(options): - print("{0}: {1}".format(i, o)) - choice = input("\nPlease enter the number of the " - "dataset above you wish to download:") - return options[int(choice)] - - def _download_data(self, dataset_url, save_path): - if not isdir(save_path): - os.makedirs(save_path) - - base = basename(dataset_url) - temp_save_path = join(save_path, base) - - with open(temp_save_path, "wb") as f: - r = requests.get(dataset_url) - f.write(r.content) - - if base.endswith('.tar.gz'): - obj = tarfile.open(temp_save_path) - elif base.endswith('.zip'): - obj = ZipFile(temp_save_path, 'r') - else: - raise ValueError("Unknown File Type: {0}.".format(base)) - - self._print("Unpacking Data...") - obj.extractall(save_path) - obj.close() - os.remove(temp_save_path) - - def get(self, save_path, dataset=None): - """ - - Download a dataset. - - Parameters: - save_path (str) -- A directory to save the data to. - dataset (str) -- (optional). A specific dataset to download. - Note: this must include the file extension. - If None, options will be presented for you - to choose from. - - Returns: - save_path_full (str) -- the absolute path to the downloaded data. - - """ - if dataset is None: - selected_dataset = self._present_options() - else: - selected_dataset = dataset - - save_path_full = join(save_path, selected_dataset.split('.')[0]) - - if isdir(save_path_full): - warn("\n'{0}' already exists. Voiding Download.".format( - save_path_full)) - else: - self._print('Downloading Data...') - url = "{0}/{1}".format(self.url, selected_dataset) - self._download_data(url, save_path=save_path) - - return abspath(save_path_full) diff --git a/app/service/image2sketch/util/html.py b/app/service/image2sketch/util/html.py deleted file mode 100644 index cc3262a..0000000 --- a/app/service/image2sketch/util/html.py +++ /dev/null @@ -1,86 +0,0 @@ -import dominate -from dominate.tags import meta, h3, table, tr, td, p, a, img, br -import os - - -class HTML: - """This HTML class allows us to save images and write texts into a single HTML file. - - It consists of functions such as (add a text header to the HTML file), - (add a row of images to the HTML file), and (save the HTML to the disk). - It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API. - """ - - def __init__(self, web_dir, title, refresh=0): - """Initialize the HTML classes - - Parameters: - web_dir (str) -- a directory that stores the webpage. HTML file will be created at /index.html; images will be saved at 0: - with self.doc.head: - meta(http_equiv="refresh", content=str(refresh)) - - def get_image_dir(self): - """Return the directory that stores images""" - return self.img_dir - - def add_header(self, text): - """Insert a header to the HTML file - - Parameters: - text (str) -- the header text - """ - with self.doc: - h3(text) - - def add_images(self, ims, txts, links, width=400): - """add images to the HTML file - - Parameters: - ims (str list) -- a list of image paths - txts (str list) -- a list of image names shown on the website - links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page - """ - self.t = table(border=1, style="table-layout: fixed;") # Insert a table - self.doc.add(self.t) - with self.t: - with tr(): - for im, txt, link in zip(ims, txts, links): - with td(style="word-wrap: break-word;", halign="center", valign="top"): - with p(): - with a(href=os.path.join('images', link)): - img(style="width:%dpx" % width, src=os.path.join('images', im)) - br() - p(txt) - - def save(self): - """save the current content to the HMTL file""" - html_file = '%s/index.html' % self.web_dir - f = open(html_file, 'wt') - f.write(self.doc.render()) - f.close() - - -if __name__ == '__main__': # we show an example usage here. - html = HTML('web/', 'test_html') - html.add_header('hello world') - - ims, txts, links = [], [], [] - for n in range(4): - ims.append('image_%d.png' % n) - txts.append('text_%d' % n) - links.append('image_%d.png' % n) - html.add_images(ims, txts, links) - html.save() diff --git a/app/service/image2sketch/util/image_pool.py b/app/service/image2sketch/util/image_pool.py deleted file mode 100644 index 6d086f8..0000000 --- a/app/service/image2sketch/util/image_pool.py +++ /dev/null @@ -1,54 +0,0 @@ -import random -import torch - - -class ImagePool(): - """This class implements an image buffer that stores previously generated images. - - This buffer enables us to update discriminators using a history of generated images - rather than the ones produced by the latest generators. - """ - - def __init__(self, pool_size): - """Initialize the ImagePool class - - Parameters: - pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created - """ - self.pool_size = pool_size - if self.pool_size > 0: # create an empty pool - self.num_imgs = 0 - self.images = [] - - def query(self, images): - """Return an image from the pool. - - Parameters: - images: the latest generated images from the generator - - Returns images from the buffer. - - By 50/100, the buffer will return input images. - By 50/100, the buffer will return images previously stored in the buffer, - and insert the current images to the buffer. - """ - if self.pool_size == 0: # if the buffer size is 0, do nothing - return images - return_images = [] - for image in images: - image = torch.unsqueeze(image.data, 0) - if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer - self.num_imgs = self.num_imgs + 1 - self.images.append(image) - return_images.append(image) - else: - p = random.uniform(0, 1) - if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer - random_id = random.randint(0, self.pool_size - 1) # randint is inclusive - tmp = self.images[random_id].clone() - self.images[random_id] = image - return_images.append(tmp) - else: # by another 50% chance, the buffer will return the current image - return_images.append(image) - return_images = torch.cat(return_images, 0) # collect all the images and return - return return_images diff --git a/app/service/image2sketch/util/util.py b/app/service/image2sketch/util/util.py deleted file mode 100644 index b050c13..0000000 --- a/app/service/image2sketch/util/util.py +++ /dev/null @@ -1,103 +0,0 @@ -"""This module contains simple helper functions """ -from __future__ import print_function -import torch -import numpy as np -from PIL import Image -import os - - -def tensor2im(input_image, imtype=np.uint8): - """"Converts a Tensor array into a numpy image array. - - Parameters: - input_image (tensor) -- the input image tensor array - imtype (type) -- the desired type of the converted numpy array - """ - if not isinstance(input_image, np.ndarray): - if isinstance(input_image, torch.Tensor): # get the data from a variable - image_tensor = input_image.data - else: - return input_image - image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array - if image_numpy.shape[0] == 1: # grayscale to RGB - image_numpy = np.tile(image_numpy, (3, 1, 1)) - image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling - else: # if it is a numpy array, do nothing - image_numpy = input_image - return image_numpy.astype(imtype) - - -def diagnose_network(net, name='network'): - """Calculate and print the mean of average absolute(gradients) - - Parameters: - net (torch network) -- Torch network - name (str) -- the name of the network - """ - mean = 0.0 - count = 0 - for param in net.parameters(): - if param.grad is not None: - mean += torch.mean(torch.abs(param.grad.data)) - count += 1 - if count > 0: - mean = mean / count - print(name) - print(mean) - - -def save_image(image_numpy, image_path, aspect_ratio=1.0): - """Save a numpy image to the disk - - Parameters: - image_numpy (numpy array) -- input numpy array - image_path (str) -- the path of the image - """ - - image_pil = Image.fromarray(image_numpy) - h, w, _ = image_numpy.shape - - if aspect_ratio > 1.0: - image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC) - if aspect_ratio < 1.0: - image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC) - image_pil.save(image_path) - - -def print_numpy(x, val=True, shp=False): - """Print the mean, min, max, median, std, and size of a numpy array - - Parameters: - val (bool) -- if print the values of the numpy array - shp (bool) -- if print the shape of the numpy array - """ - x = x.astype(np.float64) - if shp: - print('shape,', x.shape) - if val: - x = x.flatten() - print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( - np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) - - -def mkdirs(paths): - """create empty directories if they don't exist - - Parameters: - paths (str list) -- a list of directory paths - """ - if isinstance(paths, list) and not isinstance(paths, str): - for path in paths: - mkdir(path) - else: - mkdir(paths) - - -def mkdir(path): - """create a single empty directory if it didn't exist - - Parameters: - path (str) -- a single directory path - """ - if not os.path.exists(path): - os.makedirs(path) diff --git a/app/service/image2sketch/util/visualizer.py b/app/service/image2sketch/util/visualizer.py deleted file mode 100644 index 239c5ee..0000000 --- a/app/service/image2sketch/util/visualizer.py +++ /dev/null @@ -1,223 +0,0 @@ -import numpy as np -import os -import sys -import ntpath -import time -from . import util, html -from subprocess import Popen, PIPE - - -if sys.version_info[0] == 2: - VisdomExceptionBase = Exception -else: - VisdomExceptionBase = ConnectionError - - -def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256): - """Save images to the disk. - - Parameters: - webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details) - visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs - image_path (str) -- the string is used to create image paths - aspect_ratio (float) -- the aspect ratio of saved images - width (int) -- the images will be resized to width x width - - This function will save images stored in 'visuals' to the HTML file specified by 'webpage'. - """ - image_dir = webpage.get_image_dir() - short_path = ntpath.basename(image_path[0]) - name = os.path.splitext(short_path)[0] - - webpage.add_header(name) - ims, txts, links = [], [], [] - - for label, im_data in visuals.items(): - im = util.tensor2im(im_data) - image_name = '%s_%s.png' % (name, label) - save_path = os.path.join(image_dir, image_name) - util.save_image(im, save_path, aspect_ratio=aspect_ratio) - ims.append(image_name) - txts.append(label) - links.append(image_name) - webpage.add_images(ims, txts, links, width=width) - - -class Visualizer(): - """This class includes several functions that can display/save images and print/save logging information. - - It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images. - """ - - def __init__(self, opt): - """Initialize the Visualizer class - - Parameters: - opt -- stores all the experiment flags; needs to be a subclass of BaseOptions - Step 1: Cache the training/test options - Step 2: connect to a visdom server - Step 3: create an HTML object for saveing HTML filters - Step 4: create a logging file to store training losses - """ - self.opt = opt # cache the option - self.display_id = opt.display_id - self.use_html = opt.isTrain and not opt.no_html - self.win_size = opt.display_winsize - self.name = opt.name - self.port = opt.display_port - self.saved = False - ''' - if self.display_id > 0: # connect to a visdom server given and - import visdom - self.ncols = opt.display_ncols - self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env) - if not self.vis.check_connection(): - self.create_visdom_connections() - ''' - if self.use_html: # create an HTML object at /web/; images will be saved under /web/images/ - self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') - self.img_dir = os.path.join(self.web_dir, 'images') - print('create web directory %s...' % self.web_dir) - util.mkdirs([self.web_dir, self.img_dir]) - # create a logging file to store training losses - self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') - with open(self.log_name, "a") as log_file: - now = time.strftime("%c") - log_file.write('================ Training Loss (%s) ================\n' % now) - - def reset(self): - """Reset the self.saved status""" - self.saved = False - ''' - def create_visdom_connections(self): - """If the program could not connect to Visdom server, this function will start a new server at port < self.port > """ - cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port - print('\n\nCould not connect to Visdom server. \n Trying to start a server....') - print('Command: %s' % cmd) - Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) - - def display_current_results(self, visuals, epoch, save_result): - """Display current results on visdom; save current results to an HTML file. - - Parameters: - visuals (OrderedDict) - - dictionary of images to display or save - epoch (int) - - the current epoch - save_result (bool) - - if save the current results to an HTML file - """ - if self.display_id > 0: # show images in the browser using visdom - ncols = self.ncols - if ncols > 0: # show all the images in one visdom panel - ncols = min(ncols, len(visuals)) - h, w = next(iter(visuals.values())).shape[:2] - table_css = """""" % (w, h) # create a table css - # create a table of images. - title = self.name - label_html = '' - label_html_row = '' - images = [] - idx = 0 - for label, image in visuals.items(): - image_numpy = util.tensor2im(image) - label_html_row += '%s' % label - images.append(image_numpy.transpose([2, 0, 1])) - idx += 1 - if idx % ncols == 0: - label_html += '%s' % label_html_row - label_html_row = '' - white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255 - while idx % ncols != 0: - images.append(white_image) - label_html_row += '' - idx += 1 - if label_html_row != '': - label_html += '%s' % label_html_row - try: - self.vis.images(images, nrow=ncols, win=self.display_id + 1, - padding=2, opts=dict(title=title + ' images')) - label_html = '%s
' % label_html - self.vis.text(table_css + label_html, win=self.display_id + 2, - opts=dict(title=title + ' labels')) - except VisdomExceptionBase: - self.create_visdom_connections() - - else: # show each image in a separate visdom panel; - idx = 1 - try: - for label, image in visuals.items(): - image_numpy = util.tensor2im(image) - self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label), - win=self.display_id + idx) - idx += 1 - except VisdomExceptionBase: - self.create_visdom_connections() - - if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved. - self.saved = True - # save images to the disk - for label, image in visuals.items(): - image_numpy = util.tensor2im(image) - img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label)) - util.save_image(image_numpy, img_path) - - # update website - webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1) - for n in range(epoch, 0, -1): - webpage.add_header('epoch [%d]' % n) - ims, txts, links = [], [], [] - - for label, image_numpy in visuals.items(): - image_numpy = util.tensor2im(image) - img_path = 'epoch%.3d_%s.png' % (n, label) - ims.append(img_path) - txts.append(label) - links.append(img_path) - webpage.add_images(ims, txts, links, width=self.win_size) - webpage.save() - ''' - def plot_current_losses(self, epoch, counter_ratio, losses): - """display the current losses on visdom display: dictionary of error labels and values - - Parameters: - epoch (int) -- current epoch - counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1 - losses (OrderedDict) -- training losses stored in the format of (name, float) pairs - """ - if not hasattr(self, 'plot_data'): - self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())} - self.plot_data['X'].append(epoch + counter_ratio) - self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']]) - ''' - try: - self.vis.line( - X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1), - Y=np.array(self.plot_data['Y']), - opts={ - 'title': self.name + ' loss over time', - 'legend': self.plot_data['legend'], - 'xlabel': 'epoch', - 'ylabel': 'loss'}, - win=self.display_id) - except VisdomExceptionBase: - self.create_visdom_connections() - ''' - # losses: same format as |losses| of plot_current_losses - def print_current_losses(self, epoch, iters, losses, t_comp, t_data): - """print current losses on console; also save the losses to the disk - - Parameters: - epoch (int) -- current epoch - iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch) - losses (OrderedDict) -- training losses stored in the format of (name, float) pairs - t_comp (float) -- computational time per data point (normalized by batch_size) - t_data (float) -- data loading time per data point (normalized by batch_size) - """ - message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data) - for k, v in losses.items(): - message += '%s: %.3f ' % (k, v) - - print(message) # print the message - with open(self.log_name, "a") as log_file: - log_file.write('%s\n' % message) # save the message diff --git a/app/service/image2sketch_2/download_checkpoints.py b/app/service/image2sketch_2/download_checkpoints.py deleted file mode 100644 index 9048c34..0000000 --- a/app/service/image2sketch_2/download_checkpoints.py +++ /dev/null @@ -1,45 +0,0 @@ -import os - -from minio import Minio -from minio.error import S3Error - -MINIO_URL = "www.minio.aida.com.hk:12024" -MINIO_ACCESS = 'vXKFLSJkYeEq2DrSZvkB' -MINIO_SECRET = 'uKTZT3x7C43WvPN9QTc99DiRkwddWZrG9Uh3JVlR' -MINIO_SECURE = True -# 配置MinIO客户端 -minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) - - -# 下载函数 -def download_folder(bucket_name, folder_name, local_dir): - try: - # 确保本地目录存在 - if not os.path.exists(local_dir): - os.makedirs(local_dir) - - # 遍历MinIO中的文件 - objects = minio_client.list_objects(bucket_name, prefix=folder_name, recursive=True) - for obj in objects: - # 构造本地文件路径 - local_file_path = os.path.join(local_dir, obj.object_name[len(folder_name):]) - local_file_dir = os.path.dirname(local_file_path) - - # 确保本地目录存在 - if not os.path.exists(local_file_dir): - os.makedirs(local_file_dir) - - # 下载文件 - minio_client.fget_object(bucket_name, obj.object_name, local_file_path) - print(f"Downloaded {obj.object_name} to {local_file_path}") - - except S3Error as e: - print(f"Error occurred: {e}") - - -# 使用示例 -bucket_name = "test" # 替换成你的bucket名称 -folder_name = "checkpoints/lineart/" # 权重文件夹的路径 -local_dir = "app/service/image2sketch_2" # 替换成你希望保存到的本地目录 - -download_folder(bucket_name, folder_name, local_dir) diff --git a/app/service/image2sketch_2/server.py b/app/service/image2sketch_2/server.py deleted file mode 100644 index 41c0278..0000000 --- a/app/service/image2sketch_2/server.py +++ /dev/null @@ -1,142 +0,0 @@ -import cv2 -import numpy -import numpy as np -import torch -import torch.nn as nn -import torchvision.transforms as transforms -from PIL import Image - -from app.service.utils.oss_client import oss_get_image, oss_upload_image - -norm_layer = nn.InstanceNorm2d - -weights = [(0.7, 0.3), (0.5, 0.5), (0.3, 0.7), (0.1, 0.9), (0, 1)] -kernel = np.ones((3, 3), np.uint8) - - -class ResidualBlock(nn.Module): - def __init__(self, in_features): - super(ResidualBlock, self).__init__() - - conv_block = [nn.ReflectionPad2d(1), - nn.Conv2d(in_features, in_features, 3), - norm_layer(in_features), - nn.ReLU(inplace=True), - nn.ReflectionPad2d(1), - nn.Conv2d(in_features, in_features, 3), - norm_layer(in_features) - ] - - self.conv_block = nn.Sequential(*conv_block) - - def forward(self, x): - return x + self.conv_block(x) - - -class Generator(nn.Module): - def __init__(self, input_nc, output_nc, n_residual_blocks=9, sigmoid=True): - super(Generator, self).__init__() - - # Initial convolution block - model0 = [nn.ReflectionPad2d(3), - nn.Conv2d(input_nc, 64, 7), - norm_layer(64), - nn.ReLU(inplace=True)] - self.model0 = nn.Sequential(*model0) - - # Downsampling - model1 = [] - in_features = 64 - out_features = in_features * 2 - for _ in range(2): - model1 += [nn.Conv2d(in_features, out_features, 3, stride=2, padding=1), - norm_layer(out_features), - nn.ReLU(inplace=True)] - in_features = out_features - out_features = in_features * 2 - self.model1 = nn.Sequential(*model1) - - model2 = [] - # Residual blocks - for _ in range(n_residual_blocks): - model2 += [ResidualBlock(in_features)] - self.model2 = nn.Sequential(*model2) - - # Upsampling - model3 = [] - out_features = in_features // 2 - for _ in range(2): - model3 += [nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1), - norm_layer(out_features), - nn.ReLU(inplace=True)] - in_features = out_features - out_features = in_features // 2 - self.model3 = nn.Sequential(*model3) - - # Output layer - model4 = [nn.ReflectionPad2d(3), - nn.Conv2d(64, output_nc, 7)] - if sigmoid: - model4 += [nn.Sigmoid()] - - self.model4 = nn.Sequential(*model4) - - def forward(self, x, cond=None): - out = self.model0(x) - out = self.model1(out) - out = self.model2(out) - out = self.model3(out) - out = self.model4(out) - - return out - - -model1 = Generator(3, 1, 3) -model1.load_state_dict(torch.load('app/service/image2sketch_2/model.pth', map_location=torch.device('cpu'))) -model1.eval() - - -def predict(input_img, width): - transform = transforms.Compose([transforms.Resize(width, Image.BICUBIC), transforms.ToTensor()]) - input_img = transform(input_img) - input_img = torch.unsqueeze(input_img, 0) - - with torch.no_grad(): - drawing = model1(input_img)[0].detach() - - drawing = transforms.ToPILImage()(drawing) - - # 转ndarray - drawing = numpy.array(drawing) - return drawing - - -def get_image(image_url): - image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL") - image = image.convert('RGB') - width = image.size[0] - height = image.size[1] - return image, width, height - - -def processing_pipeline(image_url, thickness, sketch_bucket, sketch_name): - thickness = int(thickness) - # 提取sketch - image, width, height = get_image(image_url) - sketch_image = predict(image, width) - - # 设定线条粗细 - if thickness != 0: - dilated = cv2.erode(sketch_image, kernel, iterations=1) - # 将原图与膨胀后的图像进行混合,使用不同的权重 - sketch_image = cv2.addWeighted(sketch_image, weights[thickness][0], dilated, weights[thickness][1], 0) - - # 上传minio - image_bytes = cv2.imencode(".jpg", sketch_image)[1].tobytes() - req = oss_upload_image(bucket=sketch_bucket, object_name=sketch_name, image_bytes=image_bytes) - return f"{req.bucket_name}/{req.object_name}" - - -if __name__ == '__main__': - result_url = processing_pipeline("aida-users/89/relight_image/d5f0d967-f8e8-424d-98f9-a8ad8313deec-0-89.png", 1, "test", "test123.jpg") - print(result_url) diff --git a/app/service/lineart/service.py b/app/service/lineart/service.py index d822dfa..c459b3c 100644 --- a/app/service/lineart/service.py +++ b/app/service/lineart/service.py @@ -6,12 +6,14 @@ import numpy as np import torch import torch.nn.functional as F import tritonclient.http as httpclient - +from minio import Minio +from app.core.config import settings from app.core.config import DESIGN_MODEL_URL from app.schemas.image2sketch import Image2SketchModel -from app.service.utils.oss_client import oss_get_image, oss_upload_image +from app.service.utils.new_oss_client import oss_get_image, oss_upload_image logger = logging.getLogger() +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) class LineArtService: @@ -58,7 +60,7 @@ class LineArtService: def put_image(self, image): try: image_bytes = cv2.imencode('.jpg', image)[1].tobytes() - oss_upload_image(bucket=self.sketch_bucket, object_name=f"{self.sketch_name}.jpg", image_bytes=image_bytes) + oss_upload_image(oss_client=minio_client, bucket=self.sketch_bucket, object_name=f"{self.sketch_name}.jpg", image_bytes=image_bytes) return f"{self.sketch_bucket}/{self.sketch_name}.jpg" except Exception as e: logger.warning(e) diff --git a/app/service/mannequins_edit/service.py b/app/service/mannequins_edit/service.py index c0f0a44..5199c94 100644 --- a/app/service/mannequins_edit/service.py +++ b/app/service/mannequins_edit/service.py @@ -3,14 +3,14 @@ import numpy as np from PIL import Image from minio import Minio -from app.core.config import MINIO_URL, MINIO_ACCESS, MINIO_SECRET, MINIO_SECURE +from app.core.config import settings from app.schemas.mannequin_edit import MannequinModel from app.service.utils.new_oss_client import oss_get_image, oss_upload_image -minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) -class MannequinEditService(): +class MannequinEditService: def __init__(self, request_data): self.resize_pixel = request_data.resize_pixel self.top = request_data.top diff --git a/app/service/project_info_extraction/service.py b/app/service/project_info_extraction/service.py index cf9df6c..ba7d787 100644 --- a/app/service/project_info_extraction/service.py +++ b/app/service/project_info_extraction/service.py @@ -1,4 +1,4 @@ -from langchain.output_parsers import ResponseSchema, StructuredOutputParser +from langchain_classic.output_parsers import ResponseSchema, StructuredOutputParser from langchain_community.chat_models import ChatTongyi from langchain_core.prompts import PromptTemplate @@ -51,7 +51,7 @@ class ProjectInfoExtraction: return self.result_data def llm_extraction_project_info(self): - output = self.model(self._input.to_messages()) + output = self.model project_info = self.output_parser.parse(output.content) self.result_data = project_info diff --git a/app/service/prompt_generation/chatgpt_for_translation.py b/app/service/prompt_generation/chatgpt_for_translation.py index 4c50d0b..79d17b6 100644 --- a/app/service/prompt_generation/chatgpt_for_translation.py +++ b/app/service/prompt_generation/chatgpt_for_translation.py @@ -7,7 +7,7 @@ from dashscope import Generation from requests import RequestException from retry import retry -from app.core.config import QWEN_API_KEY +from app.core.config import settings from app.service.chat_robot.script.service.CallQWen import get_language from app.service.prompt_generation.util import minio_util @@ -51,19 +51,19 @@ def translate_to_en(text): # "The translation of \"Material suave\" into English would be \"Smooth material.\"". Instead, directly output "Smooth material". # """ # ) - messages = [ - # { - # Translate the entire text and ensure the output is a complete and coherent sentence in English. - # "content": template, # 系统message - # "role": "system" - # }, - { - # "content": input('请输入:'), # 用户message - "content": text, # 用户message - "role": "user" - } - ] - first_response = get_response(messages) + # messages = [ + # # { + # # Translate the entire text and ensure the output is a complete and coherent sentence in English. + # # "content": template, # 系统message + # # "role": "system" + # # }, + # { + # # "content": input('请输入:'), # 用户message + # "content": text, # 用户message + # "role": "user" + # } + # ] + first_response = get_response assistant_output = first_response.output.choices[0].message print("input : {}, translate result : {}".format(text, assistant_output.content)) return assistant_output.content @@ -79,7 +79,7 @@ def translate_to_en(text): def get_response(messages): response = Generation.call( model='qwen-turbo', - api_key=QWEN_API_KEY, + api_key=settings.QWEN_API_KEY, messages=messages, # seed=random.randint(1, 10000), # 设置随机数种子seed,如果没有设置,则随机数种子默认为1234 result_format='message', # 将输出设置为message形式 @@ -90,7 +90,7 @@ def get_response(messages): def get_translation_from_llama3(text): start_time = time.time() - url = "http://10.1.1.240:11434/api/generate" + url = f"http://{settings.A6000_SERVICE_HOST}:11434/api/generate" # url = "http://10.1.1.240:1143/api/generate" # prompt = f"System: {prefix_for_llama}\nUser:[{text}]" @@ -122,6 +122,7 @@ def get_translation_from_llama3(text): logger.info(f"translation server runtime is {time.time() - start_time} , response is {response.content}") print(f"Request failed with status code {response.status_code}") print(response.text) + return "" # 在llama3中创建一个翻译模型 @@ -174,6 +175,7 @@ def get_prompt_from_image(image_path, text): logger.info(f"sketch re-generate server runtime is {time.time() - start_time} , response is {response.content}") print(f"Request failed with status code {response.status_code}") print(response.text) + return "" def main(): diff --git a/app/service/prompt_generation/util/minio_util.py b/app/service/prompt_generation/util/minio_util.py index 8708ae9..c5a8c99 100644 --- a/app/service/prompt_generation/util/minio_util.py +++ b/app/service/prompt_generation/util/minio_util.py @@ -2,20 +2,19 @@ import base64 from minio import Minio -from app.core.config import MINIO_URL, MINIO_ACCESS, MINIO_SECRET, MINIO_SECURE +from app.core.config import settings -minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) def minio_url_to_base64(minio_url: str) -> str: bucket_name, object_name = minio_url.split("/", 1) - + response = minio_client.get_object(bucket_name, object_name) try: - response = minio_client.get_object(bucket_name, object_name) image_data = response.read() return base64.b64encode(image_data).decode('utf-8') except Exception as e: raise RuntimeError(f"Failed to get object: {e}") finally: if 'response' in locals(): - response.close() \ No newline at end of file + response.close() diff --git a/app/service/recommend/scheduled_task.py b/app/service/recommend/scheduled_task.py index d3174ed..b146ffe 100644 --- a/app/service/recommend/scheduled_task.py +++ b/app/service/recommend/scheduled_task.py @@ -18,7 +18,8 @@ import pandas as pd from datetime import datetime, timedelta import json -from app.core.config import DB_CONFIG, TABLE_CATEGORIES, RECOMMEND_PATH_PREFIX +from app.core.config import TABLE_CATEGORIES, settings +from app.core.mysql_config import DB_CONFIG # 自动选择可用字体 try: @@ -51,7 +52,7 @@ minio_client = Minio( ) # 预加载系统sketch特征向量 -SYSTEM_FEATURES = np.load(f'{RECOMMEND_PATH_PREFIX}sketch_feature_dict.npy', allow_pickle=True).item() +SYSTEM_FEATURES = np.load(f'{settings.RECOMMEND_PATH_PREFIX}sketch_feature_dict.npy', allow_pickle=True).item() # 行为权重和衰减系数 BEHAVIOR_CONFIG = { @@ -61,6 +62,7 @@ BEHAVIOR_CONFIG = { 'sketchLike': {'weight': 4, 'decay': 0} # 不衰减 } + # 保存sketch_to_iid到文件 def save_sketch_to_iid(): """保存sketch到iid的映射""" @@ -147,11 +149,11 @@ def update_user_matrices(): cursor = conn.cursor() # 修改后的查询语句(移除category过滤) - cursor.execute(""" - SELECT account_id, path, COUNT(*) as like_count - FROM user_preference_log_test - GROUP BY account_id, path - """) + cursor.execute(""" + SELECT account_id, path, COUNT(*) as like_count + FROM user_preference_log_test + GROUP BY account_id, path + """) user_data = cursor.fetchall() logging.info(f"成功读取{len(user_data)}条用户偏好记录") @@ -164,17 +166,17 @@ def update_user_matrices(): feature_matrix, user_index_feature_matrix, sketch_index_feature_matrix, iid_to_category_feature_matrix = calculate_feature_matrix(user_data) # visualize_sparse_matrix(feature_matrix, '系统sketch与用户category平均特征向量关联度矩阵', 'correlation_matrix.png') # 存储矩阵 - np.save(f"{RECOMMEND_PATH_PREFIX}interaction_matrix.npy", interaction_matrix) - np.save(f"{RECOMMEND_PATH_PREFIX}feature_matrix.npy", feature_matrix) + np.save(f"{settings.RECOMMEND_PATH_PREFIX}interaction_matrix.npy", interaction_matrix) + np.save(f"{settings.RECOMMEND_PATH_PREFIX}feature_matrix.npy", feature_matrix) # - np.save(f"{RECOMMEND_PATH_PREFIX}iid_to_category_interaction_matrix.npy", iid_to_category_interaction_matrix) - np.save(f"{RECOMMEND_PATH_PREFIX}user_index_interaction_matrix.npy", user_index_interaction_matrix) + np.save(f"{settings.RECOMMEND_PATH_PREFIX}iid_to_category_interaction_matrix.npy", iid_to_category_interaction_matrix) + np.save(f"{settings.RECOMMEND_PATH_PREFIX}user_index_interaction_matrix.npy", user_index_interaction_matrix) # - np.save(f"{RECOMMEND_PATH_PREFIX}iid_to_category_feature_matrix.npy", iid_to_category_feature_matrix) - np.save(f"{RECOMMEND_PATH_PREFIX}user_index_feature_matrix.npy", user_index_feature_matrix) + np.save(f"{settings.RECOMMEND_PATH_PREFIX}iid_to_category_feature_matrix.npy", iid_to_category_feature_matrix) + np.save(f"{settings.RECOMMEND_PATH_PREFIX}user_index_feature_matrix.npy", user_index_feature_matrix) # - np.save(f"{RECOMMEND_PATH_PREFIX}sketch_index_interaction_matrix.npy", sketch_index_interaction_matrix) - np.save(f"{RECOMMEND_PATH_PREFIX}sketch_index_feature_matrix.npy", sketch_index_feature_matrix) + np.save(f"{settings.RECOMMEND_PATH_PREFIX}sketch_index_interaction_matrix.npy", sketch_index_interaction_matrix) + np.save(f"{settings.RECOMMEND_PATH_PREFIX}sketch_index_feature_matrix.npy", sketch_index_feature_matrix) # logging.info("矩阵更新完成") except Exception as e: @@ -235,6 +237,7 @@ def plot_interaction_count_matrix(interaction_count_matrix): except Exception as e: logging.error(f"绘图失败: {str(e)}", exc_info=True) + def visualize_sparse_matrix(matrix, title='Non-zero Interactions (Scatter Plot)', filename="scatter_figure_interaction.png"): if not sparse.issparse(matrix): # 转换为稀疏矩阵 @@ -253,6 +256,7 @@ def visualize_sparse_matrix(matrix, title='Non-zero Interactions (Scatter Plot)' plt.ylabel('Item Index') plt.savefig(filename) + def calculate_interaction_matrix(user_data): """基于新表结构的交互次数矩阵计算(仅系统sketch)""" # 获取所有用户ID @@ -475,6 +479,7 @@ def calculate_heat(row, current_date): # 计算热度值 = 权重 * e^(-衰减系数 * 天数) return config['weight'] * np.exp(-config['decay'] * days_passed) + def load_heat_matrix_as_array(file_path): """ 直接加载为二维numpy数组 @@ -484,10 +489,11 @@ def load_heat_matrix_as_array(file_path): saved = json.load(f) return ( np.array(saved['data']), # 二维矩阵 - saved['row_labels'], # 行标签列表 - saved['col_labels'] # 列标签列表 + saved['row_labels'], # 行标签列表 + saved['col_labels'] # 列标签列表 ) + def update_heat_matrices(): """每日计算并存储热度矩阵(gender_category × path)""" current_date = datetime.now() diff --git a/app/service/recommend/service.py b/app/service/recommend/service.py index 0db64dd..6fcb464 100644 --- a/app/service/recommend/service.py +++ b/app/service/recommend/service.py @@ -1,240 +1,240 @@ -# 预加载资源 -import logging -import time -from collections import defaultdict -import os -import json -import numpy as np - -from app.core.config import DB_CONFIG, RECOMMEND_PATH_PREFIX - -logger = logging.getLogger() -import pymysql -from concurrent.futures import ThreadPoolExecutor - -HEAT_VECTOR_FILE = 'heat_vectors_data/heat_vectors.json' # 可动态加载或配置 - -matrix_data = { - "interaction_matrix": None, - "feature_matrix": None, - "user_index_interaction": None, - "sketch_index_interaction": None, - "user_index_feature": None, - "sketch_index_feature": None, - "iid_to_sketch": None, - "category_to_iids": None, - "cached_scores": {}, - "cached_valid_idxs": {}, - "category_sketch_idxs_inter": None, - "category_sketch_idxs_feature": None, - "user_inter_full": dict(), - "user_feat_full": dict(), - "brand_feature_matrix": None, - "brand_index_map": None, - "heat_data": {}, -} - - -def load_resources(): - """加载所有矩阵和映射关系,并触发预缓存""" - try: - start_time = time.time() - - # 清空缓存 - matrix_data["cached_scores"].clear() - matrix_data["cached_valid_idxs"].clear() - - # 加载数据 - sketch_to_iid = np.load(f'{RECOMMEND_PATH_PREFIX}sketch_to_iid.npy', allow_pickle=True).item() - matrix_data["iid_to_sketch"] = {v: k for k, v in sketch_to_iid.items()} - - matrix_data["interaction_matrix"] = np.load(f"{RECOMMEND_PATH_PREFIX}interaction_matrix.npy", allow_pickle=True) - matrix_data["user_index_interaction"] = np.load(f"{RECOMMEND_PATH_PREFIX}user_index_interaction_matrix.npy", allow_pickle=True).item() - matrix_data["sketch_index_interaction"] = np.load(f"{RECOMMEND_PATH_PREFIX}sketch_index_interaction_matrix.npy", - allow_pickle=True).item() - - matrix_data["feature_matrix"] = np.load(f"{RECOMMEND_PATH_PREFIX}feature_matrix.npy", allow_pickle=True) - - brand_feature_path = f"{RECOMMEND_PATH_PREFIX}brand_feature_matrix.npy" - if os.path.exists(brand_feature_path): - matrix_data["brand_feature_matrix"] = np.load(brand_feature_path, allow_pickle=True) - else: - logger.warning("brand_feature_matrix 文件不存在,使用空数组") - matrix_data["brand_feature_matrix"] = np.array([]) - - # brand_index_map - brand_index_path = f"{RECOMMEND_PATH_PREFIX}brand_index_map.npy" - if os.path.exists(brand_index_path): - matrix_data["brand_index_map"] = np.load(brand_index_path, allow_pickle=True).item() - else: - logger.warning("brand_index_map 文件不存在,使用空字典") - matrix_data["brand_index_map"] = {} - - matrix_data["user_index_feature"] = np.load(f"{RECOMMEND_PATH_PREFIX}user_index_feature_matrix.npy", allow_pickle=True).item() - - matrix_data["sketch_index_feature"] = np.load(f"{RECOMMEND_PATH_PREFIX}sketch_index_feature_matrix.npy", allow_pickle=True).item() - - category_to_iid_map = np.load(f"{RECOMMEND_PATH_PREFIX}iid_to_category_interaction_matrix.npy", allow_pickle=True).item() - matrix_data["category_to_iids"] = defaultdict(list) - for iid, cat in category_to_iid_map.items(): - matrix_data["category_to_iids"][cat].append(iid) - - logger.info(f"资源加载完成,耗时: {time.time() - start_time:.2f}秒") - - # 触发预缓存 - precache_user_category() - - if os.path.exists(HEAT_VECTOR_FILE): - with open(HEAT_VECTOR_FILE, 'r', encoding='utf-8') as f: - heat_json = json.load(f) - matrix_data["heat_data"] = heat_json.get("data", {}) - logger.info(f"热度向量数据加载完成,共加载 {len(matrix_data['heat_data'])} 个类别") - else: - matrix_data["heat_data"] = {} - - except Exception as e: - logger.error(f"资源加载失败: {str(e)}") - raise RuntimeError("初始化失败") - - -def precache_user_category(): - """优化后的用户分类预缓存(添加耗时统计)""" - if not all([ - matrix_data["interaction_matrix"] is not None, - matrix_data["feature_matrix"] is not None, - matrix_data["user_index_interaction"] is not None - ]): - logger.warning("资源未加载完成,跳过预缓存") - return - - start_time = time.perf_counter() - time_stats = { - "get_all_user_categories": 0, - "process_user_category": 0, - "thread_execution": 0, - "cache_update": 0, - "total": 0, - } - - # 统计用户类别获取时间 - t1 = time.perf_counter() - user_categories = get_all_user_categories() - time_stats["get_all_user_categories"] = time.perf_counter() - t1 - - precached_count = 0 - - def process_user_category(user_id, categories): - """单用户类别缓存计算(统计耗时)""" - local_cache = {} - local_valid_idxs = {} - t_start = time.perf_counter() - - for category in categories: - cache_key = (user_id, category) - if cache_key in matrix_data["cached_scores"]: - continue - - try: - user_idx_inter = matrix_data["user_index_interaction"].get(user_id) - user_idx_feature = matrix_data["user_index_feature"].get(user_id) - - # 统计获取类别 IID 耗时 - t_iid = time.perf_counter() - category_iids = matrix_data["category_to_iids"].get(category, []) - valid_sketch_idxs_inter = [matrix_data["sketch_index_interaction"][iid] - for iid in category_iids if iid in matrix_data["sketch_index_interaction"]] - valid_sketch_idxs_feature = [matrix_data["sketch_index_feature"][iid] - for iid in category_iids if iid in matrix_data["sketch_index_feature"]] - time_stats["process_user_category"] += time.perf_counter() - t_iid - - # 统计矩阵计算耗时 - t_matrix = time.perf_counter() - processed_inter = np.zeros(len(valid_sketch_idxs_inter)) - if user_idx_inter is not None and valid_sketch_idxs_inter: - raw_inter_scores = matrix_data["interaction_matrix"][user_idx_inter, valid_sketch_idxs_inter] - processed_inter = raw_inter_scores * 0.7 - - processed_feat = np.zeros(len(valid_sketch_idxs_feature)) - if user_idx_feature is not None and valid_sketch_idxs_feature: - raw_feat_scores = matrix_data["feature_matrix"][user_idx_feature, valid_sketch_idxs_feature] - raw_feat_scores = (raw_feat_scores - np.min(raw_feat_scores)) / ( - np.max(raw_feat_scores) - np.min(raw_feat_scores) + 1e-8) - processed_feat = raw_feat_scores * 0.3 - time_stats["process_user_category"] += time.perf_counter() - t_matrix - - if len(processed_inter) == len(processed_feat): - local_cache[cache_key] = (processed_inter, processed_feat) - local_valid_idxs[cache_key] = valid_sketch_idxs_inter - - except Exception as e: - logger.error(f"预缓存失败 (user={user_id}, category={category}): {str(e)}") - - return local_cache, local_valid_idxs - - # 统计线程执行时间 - t2 = time.perf_counter() - with ThreadPoolExecutor(max_workers=8) as executor: - futures = {executor.submit(process_user_category, user_id, categories): user_id for user_id, categories in user_categories.items()} - for future in futures: - try: - t_cache = time.perf_counter() - cache_part, valid_idxs_part = future.result() - matrix_data["cached_scores"].update(cache_part) - matrix_data["cached_valid_idxs"].update(valid_idxs_part) - time_stats["cache_update"] += time.perf_counter() - t_cache - precached_count += len(cache_part) - except Exception as e: - logger.error(f"线程执行错误: {str(e)}") - time_stats["thread_execution"] = time.perf_counter() - t2 - - time_stats["total"] = time.perf_counter() - start_time - - # 输出统计信息 - logger.info(f""" - 预缓存完成,共缓存 {precached_count} 组数据,耗时统计如下: - - 获取用户类别数据: {time_stats["get_all_user_categories"]:.2f}s - - 计算用户类别缓存: {time_stats["process_user_category"]:.2f}s - - 线程任务执行: {time_stats["thread_execution"]:.2f}s - - 更新缓存数据: {time_stats["cache_update"]:.2f}s - - 总耗时: {time_stats["total"]:.2f}s - """) - - -def get_all_user_categories(): - """获取所有用户及其对应的分类""" - conn = None - try: - conn = pymysql.connect(**DB_CONFIG) - cursor = conn.cursor() - - query = """ - SELECT DISTINCT account_id, path - FROM user_preference_log_prediction - """ - cursor.execute(query) - results = cursor.fetchall() - - user_categories = defaultdict(set) - for account_id, path in results: - category = get_category_from_path(path) - user_categories[account_id].add(category) - - return dict(user_categories) - - except Exception as e: - logger.error(f"数据库查询失败: {str(e)}") - return {} - finally: - if conn: - conn.close() - - -def get_category_from_path(path: str) -> str: - """从路径解析类别""" - try: - parts = path.split('/') - if len(parts) >= 4: - return f"{parts[2]}_{parts[3]}" - return "unknown" - except: - return "unknown" +# # 预加载资源 +# import logging +# import time +# from collections import defaultdict +# import os +# import json +# import numpy as np +# +# from app.core.config import DB_CONFIG, RECOMMEND_PATH_PREFIX +# +# logger = logging.getLogger() +# import pymysql +# from concurrent.futures import ThreadPoolExecutor +# +# HEAT_VECTOR_FILE = 'heat_vectors_data/heat_vectors.json' # 可动态加载或配置 +# +# matrix_data = { +# "interaction_matrix": None, +# "feature_matrix": None, +# "user_index_interaction": None, +# "sketch_index_interaction": None, +# "user_index_feature": None, +# "sketch_index_feature": None, +# "iid_to_sketch": None, +# "category_to_iids": None, +# "cached_scores": {}, +# "cached_valid_idxs": {}, +# "category_sketch_idxs_inter": None, +# "category_sketch_idxs_feature": None, +# "user_inter_full": dict(), +# "user_feat_full": dict(), +# "brand_feature_matrix": None, +# "brand_index_map": None, +# "heat_data": {}, +# } +# +# +# def load_resources(): +# """加载所有矩阵和映射关系,并触发预缓存""" +# try: +# start_time = time.time() +# +# # 清空缓存 +# matrix_data["cached_scores"].clear() +# matrix_data["cached_valid_idxs"].clear() +# +# # 加载数据 +# sketch_to_iid = np.load(f'{RECOMMEND_PATH_PREFIX}sketch_to_iid.npy', allow_pickle=True).item() +# matrix_data["iid_to_sketch"] = {v: k for k, v in sketch_to_iid.items()} +# +# matrix_data["interaction_matrix"] = np.load(f"{RECOMMEND_PATH_PREFIX}interaction_matrix.npy", allow_pickle=True) +# matrix_data["user_index_interaction"] = np.load(f"{RECOMMEND_PATH_PREFIX}user_index_interaction_matrix.npy", allow_pickle=True).item() +# matrix_data["sketch_index_interaction"] = np.load(f"{RECOMMEND_PATH_PREFIX}sketch_index_interaction_matrix.npy", +# allow_pickle=True).item() +# +# matrix_data["feature_matrix"] = np.load(f"{RECOMMEND_PATH_PREFIX}feature_matrix.npy", allow_pickle=True) +# +# brand_feature_path = f"{RECOMMEND_PATH_PREFIX}brand_feature_matrix.npy" +# if os.path.exists(brand_feature_path): +# matrix_data["brand_feature_matrix"] = np.load(brand_feature_path, allow_pickle=True) +# else: +# logger.warning("brand_feature_matrix 文件不存在,使用空数组") +# matrix_data["brand_feature_matrix"] = np.array([]) +# +# # brand_index_map +# brand_index_path = f"{RECOMMEND_PATH_PREFIX}brand_index_map.npy" +# if os.path.exists(brand_index_path): +# matrix_data["brand_index_map"] = np.load(brand_index_path, allow_pickle=True).item() +# else: +# logger.warning("brand_index_map 文件不存在,使用空字典") +# matrix_data["brand_index_map"] = {} +# +# matrix_data["user_index_feature"] = np.load(f"{RECOMMEND_PATH_PREFIX}user_index_feature_matrix.npy", allow_pickle=True).item() +# +# matrix_data["sketch_index_feature"] = np.load(f"{RECOMMEND_PATH_PREFIX}sketch_index_feature_matrix.npy", allow_pickle=True).item() +# +# category_to_iid_map = np.load(f"{RECOMMEND_PATH_PREFIX}iid_to_category_interaction_matrix.npy", allow_pickle=True).item() +# matrix_data["category_to_iids"] = defaultdict(list) +# for iid, cat in category_to_iid_map.items(): +# matrix_data["category_to_iids"][cat].append(iid) +# +# logger.info(f"资源加载完成,耗时: {time.time() - start_time:.2f}秒") +# +# # 触发预缓存 +# precache_user_category() +# +# if os.path.exists(HEAT_VECTOR_FILE): +# with open(HEAT_VECTOR_FILE, 'r', encoding='utf-8') as f: +# heat_json = json.load(f) +# matrix_data["heat_data"] = heat_json.get("data", {}) +# logger.info(f"热度向量数据加载完成,共加载 {len(matrix_data['heat_data'])} 个类别") +# else: +# matrix_data["heat_data"] = {} +# +# except Exception as e: +# logger.error(f"资源加载失败: {str(e)}") +# raise RuntimeError("初始化失败") +# +# +# def precache_user_category(): +# """优化后的用户分类预缓存(添加耗时统计)""" +# if not all([ +# matrix_data["interaction_matrix"] is not None, +# matrix_data["feature_matrix"] is not None, +# matrix_data["user_index_interaction"] is not None +# ]): +# logger.warning("资源未加载完成,跳过预缓存") +# return +# +# start_time = time.perf_counter() +# time_stats = { +# "get_all_user_categories": 0, +# "process_user_category": 0, +# "thread_execution": 0, +# "cache_update": 0, +# "total": 0, +# } +# +# # 统计用户类别获取时间 +# t1 = time.perf_counter() +# user_categories = get_all_user_categories() +# time_stats["get_all_user_categories"] = time.perf_counter() - t1 +# +# precached_count = 0 +# +# def process_user_category(user_id, categories): +# """单用户类别缓存计算(统计耗时)""" +# local_cache = {} +# local_valid_idxs = {} +# t_start = time.perf_counter() +# +# for category in categories: +# cache_key = (user_id, category) +# if cache_key in matrix_data["cached_scores"]: +# continue +# +# try: +# user_idx_inter = matrix_data["user_index_interaction"].get(user_id) +# user_idx_feature = matrix_data["user_index_feature"].get(user_id) +# +# # 统计获取类别 IID 耗时 +# t_iid = time.perf_counter() +# category_iids = matrix_data["category_to_iids"].get(category, []) +# valid_sketch_idxs_inter = [matrix_data["sketch_index_interaction"][iid] +# for iid in category_iids if iid in matrix_data["sketch_index_interaction"]] +# valid_sketch_idxs_feature = [matrix_data["sketch_index_feature"][iid] +# for iid in category_iids if iid in matrix_data["sketch_index_feature"]] +# time_stats["process_user_category"] += time.perf_counter() - t_iid +# +# # 统计矩阵计算耗时 +# t_matrix = time.perf_counter() +# processed_inter = np.zeros(len(valid_sketch_idxs_inter)) +# if user_idx_inter is not None and valid_sketch_idxs_inter: +# raw_inter_scores = matrix_data["interaction_matrix"][user_idx_inter, valid_sketch_idxs_inter] +# processed_inter = raw_inter_scores * 0.7 +# +# processed_feat = np.zeros(len(valid_sketch_idxs_feature)) +# if user_idx_feature is not None and valid_sketch_idxs_feature: +# raw_feat_scores = matrix_data["feature_matrix"][user_idx_feature, valid_sketch_idxs_feature] +# raw_feat_scores = (raw_feat_scores - np.min(raw_feat_scores)) / ( +# np.max(raw_feat_scores) - np.min(raw_feat_scores) + 1e-8) +# processed_feat = raw_feat_scores * 0.3 +# time_stats["process_user_category"] += time.perf_counter() - t_matrix +# +# if len(processed_inter) == len(processed_feat): +# local_cache[cache_key] = (processed_inter, processed_feat) +# local_valid_idxs[cache_key] = valid_sketch_idxs_inter +# +# except Exception as e: +# logger.error(f"预缓存失败 (user={user_id}, category={category}): {str(e)}") +# +# return local_cache, local_valid_idxs +# +# # 统计线程执行时间 +# t2 = time.perf_counter() +# with ThreadPoolExecutor(max_workers=8) as executor: +# futures = {executor.submit(process_user_category, user_id, categories): user_id for user_id, categories in user_categories.items()} +# for future in futures: +# try: +# t_cache = time.perf_counter() +# cache_part, valid_idxs_part = future.result() +# matrix_data["cached_scores"].update(cache_part) +# matrix_data["cached_valid_idxs"].update(valid_idxs_part) +# time_stats["cache_update"] += time.perf_counter() - t_cache +# precached_count += len(cache_part) +# except Exception as e: +# logger.error(f"线程执行错误: {str(e)}") +# time_stats["thread_execution"] = time.perf_counter() - t2 +# +# time_stats["total"] = time.perf_counter() - start_time +# +# # 输出统计信息 +# logger.info(f""" +# 预缓存完成,共缓存 {precached_count} 组数据,耗时统计如下: +# - 获取用户类别数据: {time_stats["get_all_user_categories"]:.2f}s +# - 计算用户类别缓存: {time_stats["process_user_category"]:.2f}s +# - 线程任务执行: {time_stats["thread_execution"]:.2f}s +# - 更新缓存数据: {time_stats["cache_update"]:.2f}s +# - 总耗时: {time_stats["total"]:.2f}s +# """) +# +# +# def get_all_user_categories(): +# """获取所有用户及其对应的分类""" +# conn = None +# try: +# conn = pymysql.connect(**DB_CONFIG) +# cursor = conn.cursor() +# +# query = """ +# SELECT DISTINCT account_id, path +# FROM user_preference_log_prediction +# """ +# cursor.execute(query) +# results = cursor.fetchall() +# +# user_categories = defaultdict(set) +# for account_id, path in results: +# category = get_category_from_path(path) +# user_categories[account_id].add(category) +# +# return dict(user_categories) +# +# except Exception as e: +# logger.error(f"数据库查询失败: {str(e)}") +# return {} +# finally: +# if conn: +# conn.close() +# +# +# def get_category_from_path(path: str) -> str: +# """从路径解析类别""" +# try: +# parts = path.split('/') +# if len(parts) >= 4: +# return f"{parts[2]}_{parts[3]}" +# return "unknown" +# except: +# return "unknown" diff --git a/app/service/recommendation_system/__init__.py b/app/service/recommendation_system/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/app/service/recommendation_system/__init__.py @@ -0,0 +1 @@ + diff --git a/app/service/recommendation_system/config.py b/app/service/recommendation_system/config.py new file mode 100644 index 0000000..bd48362 --- /dev/null +++ b/app/service/recommendation_system/config.py @@ -0,0 +1,67 @@ +""" +推荐系统配置 +""" +import os +from app.core.config import settings + +# Milvus 集合名称 +MILVUS_COLLECTION_SKETCH_VECTORS = "sketch_vectors_norm" + +# Redis key 前缀 +REDIS_KEY_USER_PREF_PREFIX = "user_pref" + +# 推荐系统配置参数 +RECOMMENDATION_CONFIG = { + # 时间衰减半衰期(用于计算时间衰减权重) + # 值越小,最近的行为权重越大 + "K_half": 10, + + # 探索与利用的比例 (0.0-1.0) + # - 值越大,使用探索分支(随机推荐)的几率越大,结果更随机 + # - 值越小,使用利用分支(基于用户偏好)的几率越大,结果更精准 + # - 建议范围: 0.3-0.7,要增加随机性可提高到 0.6-0.8 + "explore_ratio": 0.5, + + # 向量检索返回的候选数量 + # 值越大,候选池越大,但计算成本也越高 + # 建议范围: 100-1000 + "topk": 200, + + # Style 加分系数(同 style 的候选进行加分) + # 值越大,匹配 style 的候选被选中的概率越大 + # 要降低某个结果的重复率,可以降低此值(如 0.1 或 0.05) + "style_bonus": 0.2, + + # Softmax 抽样的温度参数 + # - 温度越高(>1.0),概率分布越均匀,结果更随机,重复率更低 + # - 温度越低(<1.0),高分项概率越大,结果更集中,重复率更高 + # - 温度=1.0 为标准 Softmax + # - 建议范围: 1.0-3.0,要增加随机性可提高到 2.0-3.0 + "softmax_temperature": 0.07, + + # 监听间隔(秒) + "listen_interval_sec": 30, + + # 批量处理大小 + "batch_size": 1000, + + # Redis 过期时间(秒,30天) + "redis_expire_seconds": 2592000, + + # 向量维度 + "vector_dim": 2048, +} + +# 数据库表名 +TABLE_USER_PREFERENCE_LOG = "user_preference" +TABLE_SYS_FILE = "t_sys_file" + +# MySQL 连接配置(用于推荐系统) +MYSQL_CONFIG = { + "host": settings.MYSQL_HOST, + "port": settings.MYSQL_PORT, + "user": settings.MYSQL_USER, + "password": settings.MYSQL_PASSWORD, + "database": settings.MYSQL_DB, + "charset": "utf8mb4" +} diff --git a/app/service/recommendation_system/import_sys_sketch_to_milvus.py b/app/service/recommendation_system/import_sys_sketch_to_milvus.py new file mode 100644 index 0000000..b055089 --- /dev/null +++ b/app/service/recommendation_system/import_sys_sketch_to_milvus.py @@ -0,0 +1,331 @@ +""" +独立脚本:从 t_sys_file 导入系统图向量到 Milvus +可以单独运行,不依赖整个项目启动 + +使用方法: + python -m app.service.recommendation_system.import_sys_sketch_to_milvus + 或 + python app/service/recommendation_system/import_sys_sketch_to_milvus.py +""" +import sys +import os +import logging +import argparse +from pathlib import Path + +# 添加项目根目录到 Python 路径 +project_root = Path(__file__).parent.parent.parent.parent +sys.path.insert(0, str(project_root)) + +import numpy as np +import pymysql +from tqdm import tqdm + +from app.service.recommendation_system.config import ( + MYSQL_CONFIG, TABLE_SYS_FILE, + RECOMMENDATION_CONFIG, MILVUS_COLLECTION_SKETCH_VECTORS +) +from app.service.recommendation_system.vector_utils import extract_feature_vector, normalize_vector +from app.service.recommendation_system.milvus_client import create_collection, insert_vectors + +# 配置日志 +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(levelname)s - %(message)s', + handlers=[ + logging.StreamHandler(), + logging.FileHandler('import_sys_sketch.log', encoding='utf-8') + ] +) +logger = logging.getLogger(__name__) + + +def get_sys_file_records(conn, limit=None, offset=0): + """ + 从 t_sys_file 表获取系统图记录 + + Args: + conn: 数据库连接 + limit: 限制数量(None 表示不限制) + offset: 偏移量 + + Returns: + 记录列表,每个元素为 (id, url, style, level3_type, level2_type, deprecated) + """ + cursor = conn.cursor() + + query = f""" + SELECT id, url, style, level3_type, level2_type, deprecated + FROM {TABLE_SYS_FILE} + WHERE level1_type = 'Images' + AND style IS NOT NULL + AND style != '' + AND deprecated != 1 + ORDER BY id + """ + + if limit: + query += f" LIMIT {limit} OFFSET {offset}" + + cursor.execute(query) + records = cursor.fetchall() + cursor.close() + + return records + + +def get_total_count(conn): + """获取总记录数""" + cursor = conn.cursor() + cursor.execute(f""" + SELECT COUNT(*) + FROM {TABLE_SYS_FILE} + WHERE level1_type = 'Images' + AND style IS NOT NULL + AND style != '' + AND deprecated != 1 + """) + count = cursor.fetchone()[0] + cursor.close() + return count + + +def process_and_insert_batch(records, batch_size=1000, retry_times=3): + """ + 处理并批量插入向量 + + Args: + records: 记录列表 + batch_size: 批量大小 + retry_times: 失败重试次数 + + Returns: + (成功数量, 失败数量) + """ + success_count = 0 + failed_count = 0 + failed_records = [] + batch_data = [] + + # 使用 tqdm 显示进度 + with tqdm(total=len(records), desc="处理记录", unit="条") as pbar: + for idx, (sys_file_id, url, style, level3_type, level2_type, deprecated) in enumerate(records): + try: + # 计算 category + category = f"{level3_type.lower()}_{level2_type.lower()}" + + # 提取特征向量 + feature_vector = extract_feature_vector(url) + # 归一化,便于 IP≈cosine 度量 + feature_vector = normalize_vector(feature_vector) + + # 检查向量是否有效 + if np.all(feature_vector == 0): + logger.warning(f"向量提取失败,跳过: {url} (id={sys_file_id})") + failed_count += 1 + failed_records.append((sys_file_id, url)) + pbar.update(1) + continue + + # 准备数据 + data_item = { + "path": url, + "sys_file_id": sys_file_id, + "style": style, + "category": category, + "is_system_sketch": 1, + "deprecated": deprecated if deprecated else 0, + "feature_vector": feature_vector.tolist() + } + + batch_data.append(data_item) + + # 批量写入 + if len(batch_data) >= batch_size: + try: + insert_vectors(batch_data) + success_count += len(batch_data) + batch_data = [] + logger.info(f"已成功插入 {success_count} 条记录") + except Exception as e: + logger.error(f"批量写入失败: {e}") + failed_count += len(batch_data) + failed_records.extend([(item["sys_file_id"], item["path"]) for item in batch_data]) + batch_data = [] + + pbar.update(1) + + except Exception as e: + logger.error(f"处理记录失败 [id={sys_file_id}, url={url}]: {e}") + failed_count += 1 + failed_records.append((sys_file_id, url)) + pbar.update(1) + + # 写入剩余数据 + if batch_data: + try: + insert_vectors(batch_data) + success_count += len(batch_data) + logger.info(f"写入剩余 {len(batch_data)} 条记录") + except Exception as e: + logger.error(f"写入剩余数据失败: {e}") + failed_count += len(batch_data) + failed_records.extend([(item["sys_file_id"], item["path"]) for item in batch_data]) + + # 重试失败记录 + if failed_records and retry_times > 0: + logger.info(f"开始重试 {len(failed_records)} 条失败记录,最多重试 {retry_times} 次...") + + for retry in range(retry_times): + if not failed_records: + break + + retry_failed = [] + with tqdm(total=len(failed_records), desc=f"重试第 {retry + 1} 次", unit="条") as pbar: + for sys_file_id, url in failed_records: + try: + # 重新查询记录信息 + conn = pymysql.connect(**MYSQL_CONFIG) + cursor = conn.cursor() + cursor.execute(f""" + SELECT id, url, style, level3_type, level2_type, deprecated + FROM {TABLE_SYS_FILE} + WHERE id = %s + """, (sys_file_id,)) + record = cursor.fetchone() + cursor.close() + conn.close() + + if not record: + retry_failed.append((sys_file_id, url)) + pbar.update(1) + continue + + sys_file_id, url, style, level3_type, level2_type, deprecated = record + category = f"{level3_type.lower()}_{level2_type.lower()}" + + feature_vector = extract_feature_vector(url) + feature_vector = normalize_vector(feature_vector) + if np.all(feature_vector == 0): + retry_failed.append((sys_file_id, url)) + pbar.update(1) + continue + + data_item = { + "path": url, + "sys_file_id": sys_file_id, + "style": style, + "category": category, + "is_system_sketch": 1, + "deprecated": deprecated if deprecated else 0, + "feature_vector": feature_vector.tolist() + } + + insert_vectors([data_item]) + success_count += 1 + failed_count -= 1 + pbar.update(1) + + except Exception as e: + logger.error(f"重试失败 [id={sys_file_id}, url={url}]: {e}") + retry_failed.append((sys_file_id, url)) + pbar.update(1) + + failed_records = retry_failed + if failed_records: + logger.warning(f"第 {retry + 1} 次重试后仍有 {len(failed_records)} 条记录失败") + + return success_count, failed_count, failed_records + + +def main(): + """主函数""" + parser = argparse.ArgumentParser(description='从 t_sys_file 导入系统图向量到 Milvus') + parser.add_argument('--batch-size', type=int, default=1000, help='批量处理大小(默认:1000)') + parser.add_argument('--retry-times', type=int, default=3, help='失败重试次数(默认:3)') + parser.add_argument('--limit', type=int, default=None, help='限制处理数量(用于测试,默认:不限制)') + parser.add_argument('--offset', type=int, default=0, help='起始偏移量(默认:0)') + parser.add_argument('--skip-create-collection', action='store_true', help='跳过创建集合(如果集合已存在)') + + args = parser.parse_args() + + logger.info("=" * 60) + logger.info("开始从 t_sys_file 导入系统图向量到 Milvus") + logger.info("=" * 60) + logger.info(f"配置参数:") + logger.info(f" - 批量大小: {args.batch_size}") + logger.info(f" - 重试次数: {args.retry_times}") + logger.info(f" - 限制数量: {args.limit if args.limit else '不限制'}") + logger.info(f" - 起始偏移: {args.offset}") + logger.info("=" * 60) + + # 1. 创建 Milvus 集合 + if not args.skip_create_collection: + logger.info("创建 Milvus 集合...") + try: + create_collection() + logger.info("Milvus 集合创建成功(或已存在)") + except Exception as e: + logger.error(f"创建 Milvus 集合失败: {e}") + return + else: + logger.info("跳过创建集合") + + # 2. 连接数据库 + logger.info("连接数据库...") + try: + conn = pymysql.connect(**MYSQL_CONFIG) + logger.info("数据库连接成功") + except Exception as e: + logger.error(f"数据库连接失败: {e}") + return + + try: + # 3. 获取总记录数 + total_count = get_total_count(conn) + logger.info(f"找到 {total_count} 条系统图记录") + + if total_count == 0: + logger.warning("没有找到系统图数据") + return + + # 4. 获取记录 + logger.info("获取记录...") + records = get_sys_file_records(conn, limit=args.limit, offset=args.offset) + logger.info(f"获取到 {len(records)} 条记录") + + if not records: + logger.warning("没有获取到记录") + return + + # 5. 处理并插入 + logger.info("开始处理记录...") + success_count, failed_count, failed_records = process_and_insert_batch( + records, + batch_size=args.batch_size, + retry_times=args.retry_times + ) + + # 6. 输出结果 + logger.info("=" * 60) + logger.info("导入完成!") + logger.info(f" - 成功: {success_count} 条") + logger.info(f" - 失败: {failed_count} 条") + if failed_records: + logger.warning(f" - 失败记录列表(前10条):") + for sys_file_id, url in failed_records[:10]: + logger.warning(f" ID={sys_file_id}, URL={url}") + if len(failed_records) > 10: + logger.warning(f" ... 还有 {len(failed_records) - 10} 条失败记录") + logger.info("=" * 60) + + except Exception as e: + logger.error(f"处理过程中发生错误: {e}", exc_info=True) + finally: + conn.close() + logger.info("数据库连接已关闭") + + +if __name__ == "__main__": + main() + diff --git a/app/service/recommendation_system/incremental_listener.py b/app/service/recommendation_system/incremental_listener.py new file mode 100644 index 0000000..5e554eb --- /dev/null +++ b/app/service/recommendation_system/incremental_listener.py @@ -0,0 +1,347 @@ +""" +增量监听模块 +实时监听 user_preference 表的新增记录,更新用户偏好向量 +""" +import logging +import math +import pymysql +import numpy as np +from typing import List, Dict, Set, Tuple, Optional +from datetime import datetime +from collections import defaultdict + +from apscheduler.schedulers.background import BackgroundScheduler +from apscheduler.schedulers.blocking import BlockingScheduler + +from app.service.recommendation_system.config import ( + MYSQL_CONFIG, TABLE_USER_PREFERENCE_LOG, TABLE_SYS_FILE, + RECOMMENDATION_CONFIG, REDIS_KEY_USER_PREF_PREFIX +) +from app.service.recommendation_system.vector_utils import extract_feature_vector, compute_weighted_average, normalize_vector +from app.service.recommendation_system.milvus_client import query_vectors_by_paths, insert_vectors +from app.service.utils.redis_utils import Redis +import json + +logger = logging.getLogger(__name__) + + +class IncrementalListener: + """增量监听器""" + + def __init__(self): + self.last_process_time = None + self.processed_combinations: Set[Tuple[int, str]] = set() # 已处理的 (account_id, category) 组合 + self.listen_interval = RECOMMENDATION_CONFIG["listen_interval_sec"] + + def get_new_like_records(self) -> List[Tuple]: + """ + 获取新增点赞记录 + + Returns: + 记录列表,每个元素为 (id, account_id, path, category, style, data_time, is_system_sketch, sys_file_id) + """ + conn = None + try: + conn = pymysql.connect(**MYSQL_CONFIG) + cursor = conn.cursor() + + if self.last_process_time is None: + # 第一次运行,查询最近30分钟的数据 + cursor.execute(f""" + SELECT id, account_id, path, category, style, data_time + FROM {TABLE_USER_PREFERENCE_LOG} + WHERE data_time > DATE_SUB(NOW(), INTERVAL 30 MINUTE) + ORDER BY data_time + """) + else: + # 基于上次处理时间查询 + cursor.execute(f""" + SELECT id, account_id, path, category, style, data_time + FROM {TABLE_USER_PREFERENCE_LOG} + WHERE data_time > %s + ORDER BY data_time + """, (self.last_process_time,)) + + records = cursor.fetchall() + return records + + except Exception as e: + logger.error(f"获取新增点赞记录失败: {e}", exc_info=True) + return [] + finally: + if conn: + conn.close() + + def process_new_records(self, records: List[Tuple]): + """ + 处理新增记录 + + Args: + records: 记录列表 + """ + if not records: + return + + # 按用户+类别分组 + user_category_records = defaultdict(list) + for record in records: + account_id = record[1] + category = record[3] + if category: # 只处理有类别的记录 + user_category_records[(account_id, category)].append(record) + + # 去重:只处理一次每个 (account_id, category) 组合 + to_process = [] + for (account_id, category), recs in user_category_records.items(): + if (account_id, category) not in self.processed_combinations: + to_process.append((account_id, category, recs)) + self.processed_combinations.add((account_id, category)) + + logger.info(f"需要处理 {len(to_process)} 个用户-类别组合") + + # 处理每个组合 + for account_id, category, recs in to_process: + try: + self.update_user_preference_vector(account_id, category) + except Exception as e: + logger.error(f"更新用户偏好向量失败 [user={account_id}, category={category}]: {e}", exc_info=True) + + # 更新最后处理时间 + if records: + self.last_process_time = records[-1][5] # data_time + # 重置去重集合,确保下次周期不会跳过同一用户-类别 + self.processed_combinations.clear() + + def update_user_preference_vector(self, account_id: int, category: str): + """ + 更新用户偏好向量 + + Args: + account_id: 用户ID + category: 类别 + """ + conn = None + try: + conn = pymysql.connect(**MYSQL_CONFIG) + cursor = conn.cursor() + + # 1. 获取该用户该类别的所有点赞记录 + cursor.execute(f""" + SELECT path, data_time + FROM {TABLE_USER_PREFERENCE_LOG} + WHERE account_id = %s AND category = %s + ORDER BY data_time DESC + """, (account_id, category)) + + like_records = cursor.fetchall() + + if not like_records: + return + + # 2. 批量查询点赞次数 + paths = [r[0] for r in like_records] + placeholders = ','.join(['%s'] * len(paths)) + cursor.execute(f""" + SELECT path, COUNT(*) as like_count + FROM {TABLE_USER_PREFERENCE_LOG} + WHERE account_id = %s AND category = %s AND path IN ({placeholders}) + GROUP BY path + """, (account_id, category) + tuple(paths)) + + like_counts = {row[0]: row[1] for row in cursor.fetchall()} + + # 3. 批量获取向量 + vectors_dict = query_vectors_by_paths(paths) + + # 处理查询不到的 path(新用户图或异常情况) + missing_paths = [p for p in paths if p not in vectors_dict] + if missing_paths: + logger.info(f"用户 {account_id} 类别 {category} 有 {len(missing_paths)} 个 path 需要实时计算向量") + self._compute_and_insert_missing_vectors(missing_paths, conn) + # 重新查询 + vectors_dict = query_vectors_by_paths(paths) + + # 4. 计算权重并加权平均 + vectors = [] + weights = [] + K_half = RECOMMENDATION_CONFIG["K_half"] + + for k, (path, data_time) in enumerate(like_records, 1): + if path not in vectors_dict: + continue + + vector_data = vectors_dict[path] + feature_vector = np.array(vector_data["feature_vector"]) + + # 时间衰减权重 + d_k = 0.5 ** (k / K_half) + + # 点赞次数权重 + like_count = like_counts.get(path, 1) + p_i = 1 + math.log(1 + like_count) + + # 综合权重 + w_i = d_k * p_i + + vectors.append(feature_vector) + weights.append(w_i) + + if not vectors: + logger.warning(f"用户 {account_id} 类别 {category} 没有有效向量") + return + + # 5. 计算加权平均并做 L2 归一化,IP≈cosine + preference_vector = compute_weighted_average(vectors, weights) + preference_vector = normalize_vector(preference_vector) + + # 6. 写入 Redis + key = f"{REDIS_KEY_USER_PREF_PREFIX}:{account_id}:{category}" + vector_json = json.dumps(preference_vector.tolist()) + Redis.write( + key=key, + value=vector_json, + expire=RECOMMENDATION_CONFIG["redis_expire_seconds"] + ) + + logger.debug(f"用户偏好向量更新成功 [user={account_id}, category={category}]") + + except Exception as e: + logger.error(f"更新用户偏好向量失败 [user={account_id}, category={category}]: {e}", exc_info=True) + raise + finally: + if conn: + conn.close() + + def _compute_and_insert_missing_vectors(self, paths: List[str], conn: pymysql.connections.Connection): + """ + 计算并插入缺失的向量 + + Args: + paths: 缺失的 path 列表 + conn: 数据库连接 + """ + cursor = conn.cursor() + data_to_insert = [] + + for path in paths: + try: + # 判断数据来源(查询 t_sys_file 表) + cursor.execute(f""" + SELECT id, url, style, level3_type, level2_type, deprecated + FROM {TABLE_SYS_FILE} + WHERE url = %s + LIMIT 1 + """, (path,)) + + sys_file = cursor.fetchone() + + # 提取特征向量 + feature_vector = extract_feature_vector(path) + + if np.all(feature_vector == 0): + logger.warning(f"向量提取失败,跳过: {path}") + continue + + if sys_file: + # 系统图 + sys_file_id, url, style, level3_type, level2_type, deprecated = sys_file + category = f"{level3_type.lower()}_{level2_type.lower()}" + + data_item = { + "path": path, + "sys_file_id": sys_file_id, + "style": style, + "category": category, + "is_system_sketch": 1, + "deprecated": deprecated if deprecated else 0, + "feature_vector": feature_vector.tolist() + } + else: + # 用户图 + # 从 user_preference 获取 category(如果有) + cursor.execute(f""" + SELECT category + FROM {TABLE_USER_PREFERENCE_LOG} + WHERE path = %s AND category IS NOT NULL + LIMIT 1 + """, (path,)) + + category_result = cursor.fetchone() + category = category_result[0] if category_result else None + + data_item = { + "path": path, + "sys_file_id": None, + "style": None, + "category": category, + "is_system_sketch": 0, + "deprecated": 0, + "feature_vector": feature_vector.tolist() + } + + data_to_insert.append(data_item) + + except Exception as e: + logger.error(f"处理缺失向量失败 [{path}]: {e}") + + # 批量插入 + if data_to_insert: + try: + insert_vectors(data_to_insert) + logger.info(f"成功插入 {len(data_to_insert)} 个缺失向量") + except Exception as e: + logger.error(f"插入缺失向量失败: {e}") + + def process_once(self): + """单次轮询任务,供调度器调用""" + try: + records = self.get_new_like_records() + + if records: + logger.info(f"发现 {len(records)} 条新增记录") + self.process_new_records(records) + else: + logger.debug("没有新增记录") + except Exception as e: + logger.error(f"监听轮询异常: {e}", exc_info=True) + + +def start_background_listener(scheduler: BackgroundScheduler): + """将增量监听任务注册到后台调度器""" + # 降低 apscheduler 的日志级别,避免大量刷屏 + logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING) + logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING) + + listener = IncrementalListener() + scheduler.add_job( + listener.process_once, + "interval", + seconds=listener.listen_interval, + max_instances=1, + coalesce=True, + id="recommendation_incremental_listener", + replace_existing=True, + ) + logger.info("增量监听任务已注册到调度器") + + +def start_blocking_listener(): + """以阻塞方式启动调度器(用于独立脚本运行)""" + listener = IncrementalListener() + scheduler = BlockingScheduler() + scheduler.add_job( + listener.process_once, + "interval", + seconds=listener.listen_interval, + max_instances=1, + coalesce=True, + id="recommendation_incremental_listener", + replace_existing=True, + ) + logger.info("增量监听调度器已启动(BlockingScheduler)") + scheduler.start() + + +if __name__ == "__main__": + start_blocking_listener() + diff --git a/app/service/recommendation_system/milvus_client.py b/app/service/recommendation_system/milvus_client.py new file mode 100644 index 0000000..5fefa71 --- /dev/null +++ b/app/service/recommendation_system/milvus_client.py @@ -0,0 +1,332 @@ +""" +Milvus 客户端封装 +""" +import logging +from typing import List, Dict, Optional, Any +import numpy as np +from pymilvus import MilvusClient, FieldSchema, CollectionSchema, DataType, connections, Collection + +from app.core.config import settings +from app.service.recommendation_system.config import MILVUS_COLLECTION_SKETCH_VECTORS, RECOMMENDATION_CONFIG + +logger = logging.getLogger(__name__) + +# Milvus 客户端(单例) +_milvus_client = None + + +def get_milvus_client() -> MilvusClient: + """获取 Milvus 客户端(单例模式)""" + global _milvus_client + if _milvus_client is None: + try: + _milvus_client = MilvusClient( + uri=settings.MILVUS_URL, + token=settings.MILVUS_TOKEN, + db_name="", + ) + logger.info("Milvus 客户端连接成功") + except Exception as e: + logger.error(f"Milvus 客户端连接失败: {e}") + raise + return _milvus_client + + +def create_collection(): + """ + 创建 Milvus 集合 sketch_vectors + + 集合结构: + - path (PK, varchar(512)) - 主键,MinIO 逻辑 URL + - sys_file_id (int64, 可为NULL) - 系统文件ID + - style (varchar(50), 可为NULL) - 风格样式 + - category (varchar(100), 可为NULL) - 类别 + - is_system_sketch (int8, 默认 1) - 标记字段:1-系统图,0-用户图 + - deprecated (int8, 默认 0) - 是否废弃 + - feature_vector (FloatVector(2048)) - 2048维特征向量 + """ + client = get_milvus_client() + + # 检查集合是否已存在 + collections = client.list_collections() + if MILVUS_COLLECTION_SKETCH_VECTORS in collections: + logger.info(f"集合 {MILVUS_COLLECTION_SKETCH_VECTORS} 已存在") + return + + try: + # 解析 Milvus URL + # 处理 http://host.docker.internal:19530 格式 + url_clean = settings.MILVUS_URL.replace("http://", "").replace("https://", "") + if ":" in url_clean: + host, port_str = url_clean.split(":", 1) + port = int(port_str) + else: + host = url_clean + port = 19530 + + # 使用传统 API 创建集合(更可靠) + # 连接到 Milvus(如果未连接) + try: + connections.connect( + alias=settings.MILVUS_ALIAS, + host=host, + port=port, + token=settings.MILVUS_TOKEN if settings.MILVUS_TOKEN else None + ) + logger.info(f"已连接到 Milvus: {host}:{port}") + except Exception as conn_e: + # 如果连接已存在,忽略错误 + if "already exists" in str(conn_e).lower() or "Connection already exists" in str(conn_e): + logger.info("Milvus 连接已存在") + else: + logger.warning(f"连接 Milvus 时出现警告: {conn_e}") + + # 定义字段 + fields = [ + FieldSchema(name="path", dtype=DataType.VARCHAR, is_primary=True, max_length=512), + FieldSchema(name="sys_file_id", dtype=DataType.INT64), + FieldSchema(name="style", dtype=DataType.VARCHAR, max_length=50), + FieldSchema(name="category", dtype=DataType.VARCHAR, max_length=50), + FieldSchema(name="is_system_sketch", dtype=DataType.INT8), + FieldSchema(name="deprecated", dtype=DataType.INT8), + FieldSchema( + name="feature_vector", + dtype=DataType.FLOAT_VECTOR, + dim=RECOMMENDATION_CONFIG["vector_dim"] + ) + ] + + # 创建 schema + schema = CollectionSchema( + fields=fields, + description="Sketch vectors collection for recommendation system" + ) + + # 创建集合 + collection = Collection( + name=MILVUS_COLLECTION_SKETCH_VECTORS, + schema=schema, + using=settings.MILVUS_ALIAS + ) + + # 创建索引 + # 注意:使用 IP(内积)作为度量类型,与搜索时保持一致 + # 如果向量已归一化,IP 等价于 COSINE + index_params = { + "metric_type": "IP", # 内积(Inner Product) + "index_type": "IVF_FLAT", + "params": {"nlist": 1024} + } + + collection.create_index( + field_name="feature_vector", + index_params=index_params + ) + + logger.info(f"集合 {MILVUS_COLLECTION_SKETCH_VECTORS} 创建成功") + + except Exception as e: + logger.error(f"创建集合失败: {e}", exc_info=True) + raise + + +def insert_vectors(data: List[Dict[str, Any]]): + """ + 批量插入向量到 Milvus + + Args: + data: 数据列表,每个元素包含: + - path: str + - sys_file_id: int (可选) + - style: str (可选) + - category: str (可选) + - is_system_sketch: int (默认 1) + - deprecated: int (默认 0) + - feature_vector: List[float] (2048维) + """ + if not data: + return + + client = get_milvus_client() + + try: + client.insert( + collection_name=MILVUS_COLLECTION_SKETCH_VECTORS, + data=data + ) + logger.info(f"成功插入 {len(data)} 条向量数据") + except Exception as e: + logger.error(f"插入向量失败: {e}", exc_info=True) + raise + + +def query_vectors_by_paths(paths: List[str]) -> Dict[str, Dict]: + """ + 根据 path 列表批量查询向量 + + Args: + paths: path 列表 + + Returns: + {path: {feature_vector: [...], ...}} 字典 + """ + if not paths: + return {} + + client = get_milvus_client() + + try: + # 构建查询表达式 + # 使用 filter 参数而不是 expr(根据 pymilvus MilvusClient API) + # 对于字符串列表,使用单引号包裹每个值 + path_list = ", ".join([f"'{p}'" for p in paths]) + filter_expr = f"path in [{path_list}]" + + results = client.query( + collection_name=MILVUS_COLLECTION_SKETCH_VECTORS, + filter=filter_expr, + output_fields=["path", "feature_vector", "style", "category", "sys_file_id", "is_system_sketch", "deprecated"] + ) + + # 转换为字典 + result_dict = {} + for r in results: + result_dict[r["path"]] = r + + return result_dict + except Exception as e: + logger.error(f"查询向量失败: {e}", exc_info=True) + return {} + + +def search_similar_vectors( + query_vector: np.ndarray, + category: str, + topk: int = 500, + style: Optional[str] = None, + style_boost_ratio: float = 0.2 +) -> List[Dict]: + """ + 向量相似度检索 + + Args: + query_vector: 查询向量(2048维) + category: 类别过滤 + topk: 返回数量 + style: 风格过滤(可选)- 当提供时,会给对应style的结果加分 + style_boost_ratio: 风格加分比例(默认0.1,即10%) + + Returns: + 检索结果列表,每个元素包含 path, score, style, category 等字段 + """ + client = get_milvus_client() + + try: + # 如果没有指定style,使用原始逻辑 + if not style: + filter_expr = f"category == '{category}' && deprecated == 0" + results = client.search( + collection_name=MILVUS_COLLECTION_SKETCH_VECTORS, + data=[query_vector.tolist()], + anns_field="feature_vector", + search_params={"metric_type": "IP", "params": {"nprobe": 10}}, + limit=topk, + filter=filter_expr, + output_fields=["path", "style", "category", "sys_file_id"] + ) + else: + # 有style参数时,使用两阶段搜索策略 + + # 第一阶段:搜索匹配style的向量,使用boosted query vector + filter_expr_style = f"category == '{category}' && deprecated == 0 && style == '{style}'" + boosted_query = query_vector * (1 + style_boost_ratio) + results_style = client.search( + collection_name=MILVUS_COLLECTION_SKETCH_VECTORS, + data=[boosted_query.tolist()], + anns_field="feature_vector", + search_params={"metric_type": "IP", "params": {"nprobe": 10}}, + limit=topk, + filter=filter_expr_style, + output_fields=["path", "style", "category", "sys_file_id"] + ) + + # 第二阶段:搜索其他style的向量 + filter_expr_others = f"category == '{category}' && deprecated == 0 && style != '{style}'" + results_others = client.search( + collection_name=MILVUS_COLLECTION_SKETCH_VECTORS, + data=[query_vector.tolist()], + anns_field="feature_vector", + search_params={"metric_type": "IP", "params": {"nprobe": 10}}, + limit=topk, + filter=filter_expr_others, + output_fields=["path", "style", "category", "sys_file_id"] + ) + + # 合并结果 + results = [] + if results_style and len(results_style) > 0: + results.extend(results_style[0]) + if results_others and len(results_others) > 0: + results.extend(results_others[0]) + + # 转换为单个结果列表格式 + results = [results] if results else [] + + # 格式化结果 + formatted_results = [] + if results and len(results) > 0: + for hit in results[0]: + formatted_results.append({ + "path": hit.get("entity", {}).get("path", ""), + "score": hit.get("distance", 0.0), + "style": hit.get("entity", {}).get("style", ""), + "category": hit.get("entity", {}).get("category", ""), + "sys_file_id": hit.get("entity", {}).get("sys_file_id") + }) + + # 按分数排序并返回topk + formatted_results.sort(key=lambda x: x["score"], reverse=True) + return formatted_results[:topk] + + except Exception as e: + logger.error(f"向量检索失败: {e}", exc_info=True) + return [] + + +def query_random_candidates(category: str, style: Optional[str] = None, limit: int = 10) -> List[Dict]: + """ + 随机查询候选(用于探索分支) + + Args: + category: 类别 + style: 风格(可选) + limit: 返回数量 + + Returns: + 候选列表 + """ + client = get_milvus_client() + + try: + # 构建过滤表达式 + filter_expr = f"category == '{category}' && deprecated == 0" + if style: + filter_expr += f" && style == '{style}'" + + # 查询所有符合条件的记录 + results = client.query( + collection_name=MILVUS_COLLECTION_SKETCH_VECTORS, + filter=filter_expr, + output_fields=["path", "style", "category"], + limit=10000 + ) + + # 随机选择 + if len(results) > limit: + import random + results = random.sample(results, limit) + + return results + except Exception as e: + logger.error(f"随机查询候选失败: {e}", exc_info=True) + return [] diff --git a/app/service/recommendation_system/precompute.py b/app/service/recommendation_system/precompute.py new file mode 100644 index 0000000..235e80f --- /dev/null +++ b/app/service/recommendation_system/precompute.py @@ -0,0 +1,557 @@ +""" +预计算模块 +包含:数据库表结构优化、Milvus集合创建、系统图向量预计算、初始用户偏好向量生成 +""" +import logging +import math +import pymysql +import numpy as np +from datetime import datetime +from typing import List, Dict, Tuple, Optional +from collections import defaultdict + +from app.service.recommendation_system.config import ( + MYSQL_CONFIG, TABLE_USER_PREFERENCE_LOG, TABLE_SYS_FILE, + RECOMMENDATION_CONFIG, REDIS_KEY_USER_PREF_PREFIX +) +from app.service.recommendation_system.vector_utils import extract_feature_vector, normalize_vector, compute_weighted_average +from app.service.recommendation_system.milvus_client import ( + create_collection, insert_vectors, query_vectors_by_paths +) +from app.service.utils.redis_utils import Redis +import json + +logger = logging.getLogger(__name__) + + +def optimize_database_table(): + """ + 优化 user_preference 表结构 + 添加冗余字段和索引 + """ + conn = None + try: + conn = pymysql.connect(**MYSQL_CONFIG) + cursor = conn.cursor() + + # 1. 添加冗余字段 + logger.info("添加冗余字段...") + alter_sqls = [ + f"ALTER TABLE {TABLE_USER_PREFERENCE_LOG} ADD COLUMN category VARCHAR(100) COMMENT '类别:lower(level3_type + \"_\" + level2_type)'", + f"ALTER TABLE {TABLE_USER_PREFERENCE_LOG} ADD COLUMN style VARCHAR(50) COMMENT '风格样式'", + f"ALTER TABLE {TABLE_USER_PREFERENCE_LOG} ADD COLUMN is_system_sketch TINYINT(1) DEFAULT 1 COMMENT '是否为系统图(1-是,0-用户图)'", + f"ALTER TABLE {TABLE_USER_PREFERENCE_LOG} ADD COLUMN sys_file_id BIGINT NULL COMMENT '系统文件ID'", + ] + + for sql in alter_sqls: + try: + cursor.execute(sql) + logger.info(f"执行成功: {sql[:50]}...") + except Exception as e: + if "Duplicate column name" in str(e): + logger.info(f"字段已存在,跳过: {sql[:50]}...") + else: + logger.warning(f"执行失败: {sql[:50]}... 错误: {e}") + + # 2. 创建索引(MySQL 不支持 IF NOT EXISTS,需要先检查) + logger.info("创建索引...") + index_definitions = [ + ("idx_account_category_time", ["account_id", "category", "data_time"]), + ("idx_account_path", ["account_id", "path"]), + ] + + for index_name, columns in index_definitions: + try: + # 检查索引是否已存在 + cursor.execute(f""" + SELECT COUNT(*) + FROM information_schema.statistics + WHERE table_schema = DATABASE() + AND table_name = '{TABLE_USER_PREFERENCE_LOG}' + AND index_name = '{index_name}' + """) + exists = cursor.fetchone()[0] > 0 + + if exists: + logger.info(f"索引已存在,跳过: {index_name}") + else: + # 创建索引 + columns_str = ', '.join(columns) + create_sql = f"CREATE INDEX {index_name} ON {TABLE_USER_PREFERENCE_LOG}({columns_str})" + cursor.execute(create_sql) + logger.info(f"索引创建成功: {index_name}") + except Exception as e: + logger.warning(f"索引创建失败: {index_name} 错误: {e}") + + conn.commit() + logger.info("数据库表结构优化完成") + + except Exception as e: + logger.error(f"数据库表结构优化失败: {e}", exc_info=True) + if conn: + conn.rollback() + finally: + if conn: + conn.close() + + +def migrate_historical_data(batch_size: int = 1000): + """ + 历史数据迁移:批量更新冗余字段 + + Args: + batch_size: 每批处理数量 + """ + conn = None + try: + conn = pymysql.connect(**MYSQL_CONFIG) + cursor = conn.cursor() + + # 查询需要更新的记录数 + cursor.execute(f""" + SELECT COUNT(*) + FROM {TABLE_USER_PREFERENCE_LOG} u + WHERE u.category IS NULL + """) + total_count = cursor.fetchone()[0] + logger.info(f"需要迁移的记录数: {total_count}") + + if total_count == 0: + logger.info("无需迁移数据") + return + + # 分批处理 + offset = 0 + processed = 0 + + while offset < total_count: + # 查询一批记录 + cursor.execute(f""" + SELECT u.id, u.path + FROM {TABLE_USER_PREFERENCE_LOG} u + WHERE u.category IS NULL + LIMIT {batch_size} OFFSET {offset} + """) + records = cursor.fetchall() + + if not records: + break + + # 批量更新 + for record_id, path in records: + # 查询 t_sys_file 表 + cursor.execute(f""" + SELECT id, url, style, level3_type, level2_type, deprecated + FROM {TABLE_SYS_FILE} + WHERE url = %s + LIMIT 1 + """, (path,)) + + sys_file = cursor.fetchone() + + if sys_file: + # 系统图 + sys_file_id, url, style, level3_type, level2_type, deprecated = sys_file + category = f"{level3_type.lower()}_{level2_type.lower()}" + + cursor.execute(f""" + UPDATE {TABLE_USER_PREFERENCE_LOG} + SET category = %s, + style = %s, + is_system_sketch = 1, + sys_file_id = %s + WHERE id = %s + """, (category, style, sys_file_id, record_id)) + else: + # 用户图 + cursor.execute(f""" + UPDATE {TABLE_USER_PREFERENCE_LOG} + SET is_system_sketch = 0, + category = NULL, + style = NULL, + sys_file_id = NULL + WHERE id = %s + """, (record_id,)) + + conn.commit() + processed += len(records) + offset += batch_size + logger.info(f"已迁移 {processed}/{total_count} 条记录") + + logger.info("历史数据迁移完成") + + except Exception as e: + logger.error(f"历史数据迁移失败: {e}", exc_info=True) + if conn: + conn.rollback() + finally: + if conn: + conn.close() + + +def precompute_system_sketch_vectors(batch_size: int = 1000, retry_times: int = 3): + """ + 系统图向量预计算与导入 + + Args: + batch_size: 每批处理数量 + retry_times: 失败重试次数 + """ + conn = None + try: + conn = pymysql.connect(**MYSQL_CONFIG) + cursor = conn.cursor() + + # 1. 数据筛选 + logger.info("查询系统图数据...") + cursor.execute(f""" + SELECT id, url, style, level3_type, level2_type, deprecated + FROM {TABLE_SYS_FILE} + WHERE level1_type = 'Images' + AND style IS NOT NULL + AND style != '' + AND deprecated != 1 + """) + records = cursor.fetchall() + logger.info(f"找到 {len(records)} 条系统图记录") + + if not records: + logger.warning("没有找到系统图数据") + return + + # 2. 批量处理 + failed_records = [] + batch_data = [] + + for idx, (sys_file_id, url, style, level3_type, level2_type, deprecated) in enumerate(records, 1): + try: + # 计算 category + category = f"{level3_type.lower()}_{level2_type.lower()}" + + # 提取特征向量 + feature_vector = extract_feature_vector(url) + + # 检查向量是否有效 + if np.all(feature_vector == 0): + logger.warning(f"向量提取失败,跳过: {url}") + failed_records.append((sys_file_id, url)) + continue + + # 准备数据 + data_item = { + "path": url, + "sys_file_id": sys_file_id, + "style": style, + "category": category, + "is_system_sketch": 1, + "deprecated": deprecated if deprecated else 0, + "feature_vector": feature_vector.tolist() + } + + batch_data.append(data_item) + + # 批量写入 + if len(batch_data) >= batch_size: + try: + insert_vectors(batch_data) + batch_data = [] + logger.info(f"已处理 {idx}/{len(records)} 条记录") + except Exception as e: + logger.error(f"批量写入失败: {e}") + failed_records.extend([(item["sys_file_id"], item["path"]) for item in batch_data]) + batch_data = [] + + except Exception as e: + logger.error(f"处理记录失败 [{url}]: {e}") + failed_records.append((sys_file_id, url)) + + # 写入剩余数据 + if batch_data: + try: + insert_vectors(batch_data) + except Exception as e: + logger.error(f"写入剩余数据失败: {e}") + failed_records.extend([(item["sys_file_id"], item["path"]) for item in batch_data]) + + # 3. 重试失败记录 + if failed_records and retry_times > 0: + logger.info(f"重试 {len(failed_records)} 条失败记录...") + for retry in range(retry_times): + retry_failed = [] + for sys_file_id, url in failed_records: + try: + category = f"{level3_type.lower()}_{level2_type.lower()}" + feature_vector = extract_feature_vector(url) + if not np.all(feature_vector == 0): + data_item = { + "path": url, + "sys_file_id": sys_file_id, + "style": style, + "category": category, + "is_system_sketch": 1, + "deprecated": 0, + "feature_vector": feature_vector.tolist() + } + insert_vectors([data_item]) + else: + retry_failed.append((sys_file_id, url)) + except Exception as e: + logger.error(f"重试失败 [{url}]: {e}") + retry_failed.append((sys_file_id, url)) + + failed_records = retry_failed + if not failed_records: + break + + if failed_records: + logger.warning(f"仍有 {len(failed_records)} 条记录处理失败") + + logger.info("系统图向量预计算完成") + + except Exception as e: + logger.error(f"系统图向量预计算失败: {e}", exc_info=True) + finally: + if conn: + conn.close() + + +def compute_user_preference_vector( + account_id: int, + category: str, + conn: Optional[pymysql.connections.Connection] = None, + max_date: Optional[datetime] = None +) -> Optional[np.ndarray]: + """ + 计算用户偏好向量 + + Args: + account_id: 用户ID + category: 类别 + conn: 数据库连接(可选) + max_date: 最大日期(可选,用于评估时只使用训练集数据) + + Returns: + 用户偏好向量(2048维),失败返回 None + """ + from datetime import datetime + + should_close = False + if conn is None: + conn = pymysql.connect(**MYSQL_CONFIG) + should_close = True + + try: + cursor = conn.cursor() + + # 1. 获取点赞记录(如果指定了max_date,只查询该日期之前的数据) + if max_date: + cursor.execute(f""" + SELECT path, data_time + FROM {TABLE_USER_PREFERENCE_LOG} + WHERE account_id = %s AND category = %s AND style is not null + AND data_time < %s + ORDER BY data_time DESC + """, (account_id, category, max_date)) + else: + cursor.execute(f""" + SELECT path, data_time + FROM {TABLE_USER_PREFERENCE_LOG} + WHERE account_id = %s AND category = %s AND style is not null + ORDER BY data_time DESC + """, (account_id, category)) + + like_records = cursor.fetchall() + + if not like_records: + return None + + # 2. 批量查询点赞次数(如果指定了max_date,只统计该日期之前的点赞) + paths = [r[0] for r in like_records] + if not paths: + return None + + placeholders = ','.join(['%s'] * len(paths)) + if max_date: + cursor.execute(f""" + SELECT path, COUNT(*) as like_count + FROM {TABLE_USER_PREFERENCE_LOG} + WHERE account_id = %s AND category = %s AND path IN ({placeholders}) + AND data_time < %s + GROUP BY path + """, (account_id, category) + tuple(paths) + (max_date,)) + else: + cursor.execute(f""" + SELECT path, COUNT(*) as like_count + FROM {TABLE_USER_PREFERENCE_LOG} + WHERE account_id = %s AND category = %s AND path IN ({placeholders}) + GROUP BY path + """, (account_id, category) + tuple(paths)) + + like_counts = {row[0]: row[1] for row in cursor.fetchall()} + + # 3. 批量获取向量 + vectors_dict = query_vectors_by_paths(paths) + + # 处理查询不到的 path(用户图或异常情况) + missing_paths = [p for p in paths if p not in vectors_dict] + if missing_paths: + logger.info(f"用户 {account_id} 类别 {category} 有 {len(missing_paths)} 个 path 需要实时计算向量") + # 目前未有非系统图向量,跳过 + # 这里可以实时计算并写入 Milvus,但为了简化,先跳过 + # 实际实现中应该调用 vector_utils.extract_feature_vector 并写入 Milvus + + # 4. 计算权重并加权平均 + vectors = [] + weights = [] + K_half = RECOMMENDATION_CONFIG["K_half"] + + for k, (path, data_time) in enumerate(like_records, 1): + if path not in vectors_dict: + continue + + vector_data = vectors_dict[path] + feature_vector = np.array(vector_data["feature_vector"]) + + # 时间衰减权重 + d_k = 0.5 ** (k / K_half) + + # 点赞次数权重 + like_count = like_counts.get(path, 1) + p_i = 1 + math.log(1 + like_count) + + # 综合权重 + w_i = d_k * p_i + # w_i = p_i + + vectors.append(feature_vector) + weights.append(w_i) + + if not vectors: + return None + + # 5. 计算加权平均并做 L2 归一化,IP≈cosine + preference_vector = compute_weighted_average(vectors, weights) + preference_vector = normalize_vector(preference_vector) + + return preference_vector + + except Exception as e: + logger.error(f"计算用户偏好向量失败 [user={account_id}, category={category}]: {e}", exc_info=True) + return None + finally: + if should_close and conn: + conn.close() + + +def generate_initial_user_preference_vectors(batch_size: int = 100): + """ + 初始用户偏好向量生成 + + Args: + batch_size: 每批处理用户数 + """ + conn = None + try: + conn = pymysql.connect(**MYSQL_CONFIG) + cursor = conn.cursor() + + # 1. 扫描历史数据 + logger.info("扫描用户和类别组合...") + cursor.execute(f""" + SELECT DISTINCT account_id, category + FROM {TABLE_USER_PREFERENCE_LOG} + WHERE category IS NOT NULL + AND style IS NOT NULL + """) + + user_categories = cursor.fetchall() + logger.info(f"找到 {len(user_categories)} 个用户-类别组合") + + if not user_categories: + logger.warning("没有找到用户-类别组合") + return + + # 2. 批量处理 + processed = 0 + failed = 0 + + for account_id, category in user_categories: + try: + # 计算偏好向量 + preference_vector = compute_user_preference_vector(account_id, category, conn) + + if preference_vector is not None: + # 写入 Redis + key = f"{REDIS_KEY_USER_PREF_PREFIX}:{account_id}:{category}" + # 序列化向量(使用 JSON) + vector_json = json.dumps(preference_vector.tolist()) + Redis.write( + key=key, + value=vector_json, + expire=RECOMMENDATION_CONFIG["redis_expire_seconds"] + ) + processed += 1 + else: + failed += 1 + + if (processed + failed) % batch_size == 0: + logger.info(f"已处理 {processed + failed}/{len(user_categories)} 个组合,成功: {processed}, 失败: {failed}") + + except Exception as e: + logger.error(f"处理失败 [user={account_id}, category={category}]: {e}") + failed += 1 + + logger.info(f"初始用户偏好向量生成完成,成功: {processed}, 失败: {failed}") + + except Exception as e: + logger.error(f"初始用户偏好向量生成失败: {e}", exc_info=True) + finally: + if conn: + conn.close() + + +def run_precompute(): + """ + 运行所有预计算任务 + """ + logger.info("=" * 50) + logger.info("开始预计算任务") + logger.info("=" * 50) + + # 1. 优化数据库表结构 + # logger.info("\n[1/5] 优化数据库表结构...") + # optimize_database_table() + + # # 2. 创建 Milvus 集合 + # logger.info("\n[2/5] 创建 Milvus 集合...") + # create_collection() + + # 3. 历史数据迁移 + # logger.info("\n[3/5] 历史数据迁移...") + # migrate_historical_data() + + # # 4. 系统图向量预计算 + # logger.info("\n[4/5] 系统图向量预计算...") + # precompute_system_sketch_vectors() + + # 5. 初始用户偏好向量生成 + logger.info("\n[5/5] 初始用户偏好向量生成...") + generate_initial_user_preference_vectors() + + logger.info("=" * 50) + logger.info("预计算任务完成") + logger.info("=" * 50) + + +if __name__ == "__main__": + # # 1. 优化数据库表结构 + # logger.info("\n[1/5] 优化数据库表结构...") + # optimize_database_table() + # + # # 3. 历史数据迁移 + # logger.info("\n[3/5] 历史数据迁移...") + # migrate_historical_data() + + # 5. 初始用户偏好向量生成 + logger.info("\n[5/5] 初始用户偏好向量生成...") + generate_initial_user_preference_vectors() diff --git a/app/service/recommendation_system/recommendation_api.py b/app/service/recommendation_system/recommendation_api.py new file mode 100644 index 0000000..7a856b8 --- /dev/null +++ b/app/service/recommendation_system/recommendation_api.py @@ -0,0 +1,214 @@ +""" +推荐接口实现 +实现探索/利用分支、向量检索、Softmax抽样等功能 +""" +import logging +import math +import random +import numpy as np +from typing import List, Dict, Optional + +from app.service.recommendation_system.config import RECOMMENDATION_CONFIG, REDIS_KEY_USER_PREF_PREFIX +from app.service.recommendation_system.milvus_client import search_similar_vectors, query_random_candidates +from app.service.recommendation_system.precompute import compute_user_preference_vector +from app.service.recommendation_system.vector_utils import normalize_vector +from app.service.utils.redis_utils import Redis +import json + +logger = logging.getLogger(__name__) + + +def get_user_preference_vector(user_id: int, category: str) -> Optional[np.ndarray]: + """ + 获取用户偏好向量 + + Args: + user_id: 用户ID + category: 类别 + + Returns: + 用户偏好向量(2048维),失败返回 None + """ + # 1. 从 Redis 获取 + key = f"{REDIS_KEY_USER_PREF_PREFIX}:{user_id}:{category}" + vector_json = Redis.read(key) + + if vector_json: + try: + vector_list = json.loads(vector_json) + return np.array(vector_list, dtype=np.float32) + except Exception as e: + logger.warning(f"解析 Redis 向量失败 [user={user_id}, category={category}]: {e}") + + # 2. 如果不存在,实时计算 + logger.info(f"Redis 中不存在用户偏好向量,实时计算 [user={user_id}, category={category}]") + preference_vector = compute_user_preference_vector(user_id, category) + + if preference_vector is not None: + # 写入 Redis + vector_json = json.dumps(preference_vector.tolist()) + Redis.write( + key=key, + value=vector_json, + expire=RECOMMENDATION_CONFIG["redis_expire_seconds"] + ) + + return preference_vector + + +def explore_branch(category: str, style: Optional[str] = None) -> List[str]: + """ + 探索分支(随机推荐) + + Args: + category: 类别 + style: 风格(可选) + + Returns: + 推荐结果列表,每个元素包含 path, style, category 等字段 + """ + # 查询候选(随机池) + pool_size = 10 # 固定查询10个,然后随机选择 + + candidates = query_random_candidates(category, style, limit=pool_size) + + if not candidates: + logger.warning(f"探索分支:类别 {category} 没有候选数据") + return [] + + # 随机选择 + if len(candidates) > 1: + import random + candidates = random.sample(candidates, 1) + + # 格式化返回结果 + return [candidate.get("path", "") for candidate in candidates[:1]] + + +def exploit_branch( + user_id: int, + category: str, + style: Optional[str] = None +) -> List[str]: + """ + 利用分支(基于向量相似度推荐) + + Args: + user_id: 用户ID + category: 类别 + num_recommendations: 返回数量 + style: 风格(可选,用于加分) + + Returns: + 推荐结果列表,每个元素包含 path, style, category, similarity, sample_score 等字段 + """ + # 1. 获取用户偏好向量 + embedding = get_user_preference_vector(user_id, category) + + if embedding is None: + logger.warning(f"利用分支:无法获取用户偏好向量,回退到探索分支 [user={user_id}, category={category}]") + return explore_branch(category, style) + + # 2. Milvus 相似度检索(内积 IP) + topk = RECOMMENDATION_CONFIG["topk"] + results = search_similar_vectors(embedding, category, topk) + + if not results: + logger.warning(f"利用分支:向量检索无结果,回退到探索分支 [user={user_id}, category={category}]") + return explore_branch(category, style) + + # 3. Style 加分(可选,需传入 style 参数) + style_bonus = RECOMMENDATION_CONFIG["style_bonus"] + if style: + for result in results: + similarity = result["score"] + if result.get("style") == style: + # 加分:相似度 * (1 + style_bonus) + similarity = similarity * (1 + style_bonus) + result["final_score"] = similarity + else: + for result in results: + result["final_score"] = result["score"] + + # 4. Softmax 抽样 + scores = [r["final_score"] for r in results] + probabilities = softmax_with_temperature(scores, RECOMMENDATION_CONFIG["softmax_temperature"]) + + # 根据概率抽样 + if not results: + return [] + + selected_index = np.random.choice(len(results), size=1, p=probabilities, replace=False) + selected_results = [results[int(selected_index[0])]] + + # 5. 返回结果 + return [result.get("path", "") for result in selected_results] + + +def softmax_with_temperature(scores: List[float], temperature: float = 1.0) -> List[float]: + """ + Softmax 函数(带温度参数) + + Args: + scores: 分数列表 + temperature: 温度参数 + + Returns: + 概率列表 + """ + if not scores: + return [] + + # 除以温度 + scaled_scores = [s / temperature for s in scores] + + # 减去最大值(数值稳定性) + max_score = max(scaled_scores) + exp_scores = [math.exp(s - max_score) for s in scaled_scores] + + # 归一化 + sum_exp = sum(exp_scores) + if sum_exp == 0: + # 如果所有分数都是负无穷或非常小,返回均匀分布 + return [1.0 / len(scores)] * len(scores) + + probabilities = [exp_s / sum_exp for exp_s in exp_scores] + return probabilities + + +def get_recommendations( + user_id: int, + category: str, + style: Optional[str] = None +) -> List[str]: + """ + 获取推荐结果(主函数) + + Args: + user_id: 用户ID + category: 类别(如 female_skirt) + num_recommendations: 返回推荐数量(默认 1) + style: 风格(可选):若传入,则在利用分支对同 style 的候选进行加分 + + Returns: + 推荐结果列表,每个元素包含 path 等字段 + """ + try: + # 1. 读取配置参数 + explore_ratio = RECOMMENDATION_CONFIG["explore_ratio"] + + # 2. 探索/利用决策 + r = random.random() # 生成随机数 (0-1) + + if r < explore_ratio: + logger.debug(f"探索分支 [user={user_id}, category={category}]") + return explore_branch(category, style) + + logger.debug(f"利用分支 [user={user_id}, category={category}]") + return exploit_branch(user_id, category, style) + + except Exception as e: + logger.error(f"获取推荐结果失败 [user={user_id}, category={category}]: {e}", exc_info=True) + # 容错:回退到探索分支 + return explore_branch(category, style) + diff --git a/app/service/recommendation_system/vector_utils.py b/app/service/recommendation_system/vector_utils.py new file mode 100644 index 0000000..f6a78ab --- /dev/null +++ b/app/service/recommendation_system/vector_utils.py @@ -0,0 +1,189 @@ +""" +向量计算工具类 +包含 ResNet50 特征提取、向量归一化等功能 +""" +import io +import logging +import numpy as np +import torch +from torchvision import models, transforms +from PIL import Image +from minio import Minio + +from app.core.config import settings +from app.service.recommendation_system.config import RECOMMENDATION_CONFIG + +logger = logging.getLogger(__name__) + +# 图像预处理(与ResNet训练时的预处理一致) +transform = transforms.Compose([ + transforms.Resize((224, 224)), # ResNet 要求 224x224 的输入 + transforms.ToTensor(), # 转换为 Tensor + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), # 标准化 +]) + +# 加载预训练的 ResNet50 模型(去掉最后全连接层) +_resnet_model = None + + +def get_resnet_model(): + """获取 ResNet50 模型(单例模式)""" + global _resnet_model + if _resnet_model is None: + logger.info("加载 ResNet50 模型...") + _resnet_model = models.resnet50(pretrained=True) + modules = list(_resnet_model.children())[:-1] # 移除最后的全连接层 + _resnet_model = torch.nn.Sequential(*modules) + _resnet_model.eval() # 设置为评估模式 + logger.info("ResNet50 模型加载完成") + return _resnet_model + + +# MinIO 客户端(单例) +_minio_client = None + + +def get_minio_client(): + """获取 MinIO 客户端(单例模式)""" + global _minio_client + if _minio_client is None: + _minio_client = Minio( + settings.MINIO_URL, + access_key=settings.MINIO_ACCESS, + secret_key=settings.MINIO_SECRET, + secure=settings.MINIO_SECURE + ) + return _minio_client + + +def get_image_from_minio(path: str) -> Image.Image: + """ + 从 MinIO 获取图片 + + Args: + path: MinIO 逻辑 URL,格式如 "bucket_name/object_name" + + Returns: + PIL Image 对象,失败返回 None + """ + try: + # 分割路径,获取桶名和文件路径 + path_parts = path.split('/', 1) + if len(path_parts) != 2: + logger.error(f"路径格式错误: {path}") + return None + + bucket_name, file_name = path_parts + minio_client = get_minio_client() + + # 获取文件 + obj = minio_client.get_object(bucket_name, file_name) + img_data = obj.read() # 读取图像数据 + img = Image.open(io.BytesIO(img_data)) # 将数据转为图像对象 + + return img + except Exception as e: + logger.error(f"从 MinIO 获取图片失败 [{path}]: {e}") + return None + + +def extract_feature_vector(path: str) -> np.ndarray: + """ + 使用 ResNet50 提取图片特征向量(2048维) + + Args: + path: MinIO 逻辑 URL + + Returns: + 2048维特征向量(numpy array),失败返回零向量 + """ + try: + # 从 MinIO 获取图像 + img = get_image_from_minio(path) + if img is None: + logger.warning(f"无法获取图片,返回零向量: {path}") + return np.zeros(RECOMMENDATION_CONFIG["vector_dim"], dtype=np.float32) + + # 预处理 + # 部分 MinIO 图片可能是 RGBA/CMYK,转换成 RGB 以匹配 3 通道标准化参数 + if img.mode != "RGB": + try: + img = img.convert("RGB") + except Exception: + logger.warning(f"无法转换图片为RGB,返回零向量: {path}") + return np.zeros(RECOMMENDATION_CONFIG["vector_dim"], dtype=np.float32) + + img_tensor = transform(img).unsqueeze(0) # 扩展维度以适应批量处理 + + # 提取特征 + resnet_model = get_resnet_model() + with torch.no_grad(): # 在不需要计算梯度的情况下进行推断 + feature_vector = resnet_model(img_tensor) # 获取 ResNet 的输出 + feature_vector = feature_vector.squeeze().cpu().numpy() # 转换为 NumPy 数组并去掉 batch 维度 + + # 确保是 2048 维 + if feature_vector.ndim > 1: + feature_vector = feature_vector.flatten() + + # 确保维度正确 + if len(feature_vector) != RECOMMENDATION_CONFIG["vector_dim"]: + logger.warning(f"向量维度不正确: {len(feature_vector)}, 期望: {RECOMMENDATION_CONFIG['vector_dim']}") + # 如果维度不对,尝试调整 + if len(feature_vector) > RECOMMENDATION_CONFIG["vector_dim"]: + feature_vector = feature_vector[:RECOMMENDATION_CONFIG["vector_dim"]] + else: + padded = np.zeros(RECOMMENDATION_CONFIG["vector_dim"], dtype=np.float32) + padded[:len(feature_vector)] = feature_vector + feature_vector = padded + + return feature_vector.astype(np.float32) + except Exception as e: + logger.error(f"提取特征向量失败 [{path}]: {e}", exc_info=True) + return np.zeros(RECOMMENDATION_CONFIG["vector_dim"], dtype=np.float32) + + +def normalize_vector(vector: np.ndarray) -> np.ndarray: + """ + L2 归一化向量 + + Args: + vector: 输入向量 + + Returns: + 归一化后的向量 + """ + norm = np.linalg.norm(vector) + if norm == 0: + return vector + return vector / norm + + +def compute_weighted_average(vectors: list, weights: list) -> np.ndarray: + """ + 计算加权平均向量 + + Args: + vectors: 向量列表 + weights: 权重列表 + + Returns: + 加权平均向量(不做归一化,模长为加权平均后的尺度) + """ + if not vectors or not weights: + return np.zeros(RECOMMENDATION_CONFIG["vector_dim"], dtype=np.float32) + + # 确保所有向量都是 numpy array + vectors = [np.array(v) for v in vectors] + weights = np.array(weights) + + # 计算加权和 + weighted_sum = np.zeros_like(vectors[0]) + for v, w in zip(vectors, weights): + weighted_sum += v * w + + # 返回加权平均(除以权重和,不做 L2 归一化,模长不会随条数线性暴涨) + weight_total = weights.sum() + if weight_total == 0: + return weighted_sum + return weighted_sum / weight_total + diff --git a/app/service/search_image_with_text/service.py b/app/service/search_image_with_text/service.py index 6d4f490..54691a8 100644 --- a/app/service/search_image_with_text/service.py +++ b/app/service/search_image_with_text/service.py @@ -1,12 +1,8 @@ import chromadb -import hashlib - -import pandas as pd from chromadb.config import Settings from chromadb.utils.embedding_functions.ollama_embedding_function import OllamaEmbeddingFunction -from tqdm import tqdm -from app.core.config import OLLAMA_URL, CHROMADB_PATH +from app.core.config import OLLAMA_URL, settings # 读取 csv 文件 # csv_file_path = r'D:/Files/csv/output/output.csv' @@ -15,7 +11,7 @@ from app.core.config import OLLAMA_URL, CHROMADB_PATH # df = pd.read_csv(csv_file_path, encoding='Windows-1252') # 创建 Chroma 客户端 -client = chromadb.Client(Settings(is_persistent=True, persist_directory=CHROMADB_PATH)) +client = chromadb.Client(Settings(is_persistent=True, persist_directory=settings.CHROMADB_PATH)) # client = chromadb.Client(Settings(is_persistent=True, persist_directory="./service/search_image_with_text/vector_db")) # client = chromadb.Client(Settings(is_persistent=True, persist_directory="D:/workspace/AiDLab/vector_db")) # 创建集合 diff --git a/app/service/super_resolution/service.py b/app/service/super_resolution/service.py index c2cf39d..be90745 100644 --- a/app/service/super_resolution/service.py +++ b/app/service/super_resolution/service.py @@ -5,21 +5,26 @@ import time import cv2 import minio.error import numpy as np +import pika import redis import torch import tritonclient.grpc as grpcclient +from minio import Minio -from app.core.config import * +from app.core.config import settings, SR_TRITON_URL, SR_RABBITMQ_QUEUES, SR_MODEL_NAME +from app.core.rabbit_mq_config import RABBITMQ_PARAMS from app.schemas.super_resolution import SuperResolutionModel -from app.service.utils.oss_client import oss_get_image, oss_upload_image +from app.service.utils.new_oss_client import oss_get_image, oss_upload_image logger = logging.getLogger() +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) + class SuperResolution: def __init__(self, data): self.triton_client = grpcclient.InferenceServerClient(url=SR_TRITON_URL) - self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) + self.redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True) self.tasks_id = data.sr_tasks_id self.user_id = self.tasks_id[self.tasks_id.rfind('-') + 1:] self.sr_image_url = data.sr_image_url @@ -97,8 +102,8 @@ class SuperResolution: image_bytes = cv2.imencode('.jpg', image)[1].tobytes() # res = self.minio_client.put_object(f'{SR_MINIO_BUCKET}', f'{self.user_id}/sr/output/{self.tasks_id}.jpg', io.BytesIO(image_bytes), len(image_bytes), content_type='image/png') object_name = f'{self.user_id}/sr/output/{self.tasks_id}.jpg' - oss_upload_image(bucket=SR_MINIO_BUCKET, object_name=object_name, image_bytes=image_bytes) - image_url = f"{SR_MINIO_BUCKET}/{object_name}" + oss_upload_image(oss_client=minio_client, bucket="aida-users", object_name=object_name, image_bytes=image_bytes) + image_url = f"aida-users/{object_name}" return image_url except Exception as e: logger.warning(f"upload_png_mask runtime exception : {e}") @@ -122,7 +127,7 @@ class SuperResolution: def infer_cancel(tasks_id): - redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, decode_responses=True) + redis_client = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, decode_responses=True) data = {'tasks': tasks_id, 'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'} sr_data = json.dumps({'status': 'REVOKED', 'message': "revoked", 'data': 'revoked'}) redis_client.set(tasks_id, sr_data) diff --git a/app/service/utils/new_oss_client.py b/app/service/utils/new_oss_client.py index 2f292f6..ac91f47 100644 --- a/app/service/utils/new_oss_client.py +++ b/app/service/utils/new_oss_client.py @@ -8,10 +8,10 @@ import urllib3 from PIL import Image, ImageDraw from minio import Minio -from app.core.config import * +from app.core.config import settings from app.service.utils.decorator import RunTime -minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) +minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) # 自定义 Retry 类 @@ -56,17 +56,16 @@ def oss_get_image(oss_client, bucket, object_name, data_type): data_bytes = BytesIO(image_data.read()) image_object = Image.open(data_bytes) except Exception as e: - logger.warning(f"{OSS} | 获取图片出现异常 ######: {e}") + logger.warning(f" | 获取图片出现异常 ######: {e}") return image_object -@RunTime def oss_upload_image(oss_client, bucket, object_name, image_bytes): req = None try: req = oss_client.put_object(bucket_name=bucket, object_name=object_name, data=io.BytesIO(image_bytes), length=len(image_bytes), content_type='image/png') except Exception as e: - logger.warning(f"{OSS} | 上传图片出现异常 ######: {e}") + logger.warning(f" | 上传图片出现异常 ######: {e}") return req @@ -82,7 +81,7 @@ if __name__ == '__main__': # url = "aida-users/89/sketchboard/female/Dress/e6724ab7-8d3f-4677-abe0-c3e42ab7af85.jpeg" # url = "aida-users/87/print/956614a2-7e75-4fbe-9ed0-c1831e37a2c9-4-87.png" # url = "aida-users/89/single_logo/123-89.png" - url = "aida-results/result_d2577888-92d7-11f0-9cfd-e0d362103998.png" + url = "aida-results/result_a7adcbd8-ef8d-11f0-8c92-0966ede33ab5.png" # url = "aida-collection-element/12148/Sketchboard/95ea577b-305b-4a62-b30a-39c0dd3ddb3f.png" read_type = "2" diff --git a/app/service/utils/oss_client.py b/app/service/utils/oss_client.py deleted file mode 100644 index 0bd9853..0000000 --- a/app/service/utils/oss_client.py +++ /dev/null @@ -1,90 +0,0 @@ -import io -import logging -from io import BytesIO -import cv2 -import numpy as np -import urllib3 -from PIL import Image -from minio import Minio - -from app.core.config import * - - -# 自定义 Retry 类 -class CustomRetry(urllib3.Retry): - def increment(self, method=None, url=None, response=None, error=None, **kwargs): - # 调用父类的 increment 方法 - new_retry = super(CustomRetry, self).increment(method, url, response, error, **kwargs) - # 打印重试信息 - logger.info(f"重试连接: {method} {url},错误: {error},重试次数: {self.total - new_retry.total}") - return new_retry - - -logger = logging.getLogger() -timeout = urllib3.Timeout(connect=1, read=10.0) # 连接超时 5 秒,读取超时 10 秒 -http_client = urllib3.PoolManager( - num_pools=10, # 设置连接池大小 - maxsize=10, - timeout=timeout, - cert_reqs='CERT_REQUIRED', # 需要证书验证 - retries=CustomRetry( - total=5, - backoff_factor=0.2, - status_forcelist=[500, 502, 503, 504], - ), -) - - -# 获取图片 -def oss_get_image(bucket, object_name, data_type): - # cv2 默认全通道读取 - image_object = None - try: - oss_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE, http_client=http_client) - image_data = oss_client.get_object(bucket_name=bucket, object_name=object_name) - if data_type == "cv2": - image_bytes = image_data.read() - image_array = np.frombuffer(image_bytes, np.uint8) # 转成8位无符号整型 - image_object = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED) - if image_object.dtype == np.uint16: - image_object = (image_object / 256).astype('uint8') - else: - data_bytes = BytesIO(image_data.read()) - image_object = Image.open(data_bytes) - except Exception as e: - logger.warning(f"{OSS} | 获取图片出现异常 ######: {e}") - return image_object - - -def oss_upload_image(bucket, object_name, image_bytes): - req = None - try: - oss_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE) - req = oss_client.put_object(bucket_name=bucket, object_name=object_name, data=io.BytesIO(image_bytes), length=len(image_bytes), content_type='image/png') - except Exception as e: - logger.warning(f"{OSS} | 上传图片出现异常 ######: {e}") - return req - - -if __name__ == '__main__': - # url = "aida-results/result_0002186a-e631-11ee-86a6-b48351119060.png" - # url = "aida-collection-element/11523/Moodboard/f60af0d2-94c2-48f9-90ff-74b8e8a481b5.jpg" - # url = "aida-sys-image/images/female/outwear/0628000054.jpg" - # url = "aida-users/89/product_image/string-89.png" - # url = "test/845046c7-4f62-4f54-a4a9-c26d49c6969335b5b3a9-d335-4871-a46c-3cc3caf07da259629dfd1f1f555a2e2a9def7e719366.png" - # url = 'aida-users/89/relight_image/123-89.png' - # url = 'aida-users/89/relight_image/123-89.png' - # url = 'aida-users/89/relight_image/123-89.png' - # url = "aida-users/89/sketchboard/female/Dress/e6724ab7-8d3f-4677-abe0-c3e42ab7af85.jpeg" - # url = "aida-users/87/print/956614a2-7e75-4fbe-9ed0-c1831e37a2c9-4-87.png" - # url = "aida-users/89/single_logo/123-89.png" - url = "aida-results/result_e2673d92-8d25-11ef-be24-0826ae3ad6b3.png" - # url = "aida-collection-element/12148/Sketchboard/95ea577b-305b-4a62-b30a-39c0dd3ddb3f.png" - read_type = "cv2" - if read_type == "cv2": - img = oss_get_image(bucket=url.split('/')[0], object_name=url[url.find('/') + 1:], data_type=read_type) - cv2.imshow("", img) - cv2.waitKey(0) - else: - img = oss_get_image(bucket=url.split('/')[0], object_name=url[url.find('/') + 1:], data_type=read_type) - img.show() diff --git a/app/service/utils/redis_utils.py b/app/service/utils/redis_utils.py index 012fbe0..8761fde 100644 --- a/app/service/utils/redis_utils.py +++ b/app/service/utils/redis_utils.py @@ -1,6 +1,6 @@ import redis -from app.core.config import REDIS_HOST, REDIS_PORT +from app.core.config import settings class Redis(object): @@ -10,8 +10,8 @@ class Redis(object): @staticmethod def _get_r(): - host = REDIS_HOST - port = REDIS_PORT + host = settings.REDIS_HOST + port = settings.REDIS_PORT db = 0 r = redis.StrictRedis(host, port, db) return r @@ -91,6 +91,21 @@ class Redis(object): r = cls._get_r() r.expire(name, expire_in_seconds) + @classmethod + def scan_keys(cls, pattern="*"): + """ + 扫描匹配模式的key + """ + r = cls._get_r() + keys = [] + cursor = 0 + while True: + cursor, partial_keys = r.scan(cursor, match=pattern, count=1000) + keys.extend(partial_keys) + if cursor == 0: + break + return [key.decode('utf-8') if isinstance(key, bytes) else key for key in keys] + if __name__ == '__main__': redis_client = Redis() diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..a0bd4de --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,25 @@ +services: + aida_server: + build: + context: . + dockerfile: Dockerfile + working_dir: /app + volumes: + - ./app:/app/app + - ./.env_prod:/app/.env + - /etc/localtime:/etc/localtime:ro + - ./seg_cache:/seg_cache + ports: + - "10200:80" + depends_on: + - redis + redis: + image: redis + container_name: aida_redis + restart: always + ports: + - "6400:6379" + volumes: + - ./redis/data:/data + - ./redis/conf/redis.conf:/etc/redis/redis.conf + command: redis-server /etc/redis/redis.conf --appendonly yes \ No newline at end of file diff --git a/logging_env.py b/logging_env.py index 08873b0..2285782 100644 --- a/logging_env.py +++ b/logging_env.py @@ -1,10 +1,15 @@ -from app.core.config import LOGS_PATH +import os + +from app.core.config import settings LOGGER_CONFIG_DICT = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { - 'simple': {'format': '%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s'} + 'simple': { + 'format': '%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s', + 'datefmt': '%Y-%m-%d %H:%M:%S' # 补充日期格式,日志更易读 + } }, 'handlers': { 'console': { @@ -17,7 +22,7 @@ LOGGER_CONFIG_DICT = { 'class': 'logging.handlers.RotatingFileHandler', 'level': 'INFO', 'formatter': 'simple', - 'filename': f'{LOGS_PATH}info.log', + 'filename': os.path.join(settings.LOGS_PATH, 'info.log'), 'maxBytes': 10485760, 'backupCount': 50, 'encoding': 'utf8', @@ -26,7 +31,7 @@ LOGGER_CONFIG_DICT = { 'class': 'logging.handlers.RotatingFileHandler', 'level': 'ERROR', 'formatter': 'simple', - 'filename': f'{LOGS_PATH}error.log', + 'filename': os.path.join(settings.LOGS_PATH, 'error.log'), 'maxBytes': 10485760, 'backupCount': 20, 'encoding': 'utf8', @@ -35,7 +40,7 @@ LOGGER_CONFIG_DICT = { 'class': 'logging.handlers.RotatingFileHandler', 'level': 'DEBUG', 'formatter': 'simple', - 'filename': f'{LOGS_PATH}debug.log', + 'filename': os.path.join(settings.LOGS_PATH, 'debug.log'), 'maxBytes': 10485760, 'backupCount': 50, 'encoding': 'utf8', @@ -45,7 +50,7 @@ LOGGER_CONFIG_DICT = { 'my_module': {'level': 'INFO', 'handlers': ['console'], 'propagate': 'no'} }, 'root': { - 'level': 'INFO', + 'level': 'DEBUG', 'handlers': ['error_file_handler', 'info_file_handler', 'debug_file_handler', 'console'], }, } diff --git a/pyproject.toml b/pyproject.toml index aa10a24..e143cde 100755 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,17 +2,53 @@ name = "trinity-client-aida" version = "0.1.0" description = "Add your description here" -requires-python = ">=3.12" +requires-python = ">=3.12,<3.13" dependencies = [ - "apscheduler>=3.11.0", - "celery>=5.5.3", - "geventhttpclient>=2.3.4", - "google-search-results>=2.4.2", - "moviepy>=2.2.1", - "numpy==1.26.4", - "pandas-stubs==2.2.3.250527", - "pika-stubs==0.1.3", - "python-multipart>=0.0.20", - "tritonclient[all]>=2.58.0", - "types-urllib3==1.26.25.14", + "agentaction", + "aio-pika>=9.5.8", + "apscheduler>=3.11.1", + "bs4>=0.0.2", + "callbacks>=0.3.0", + "celery>=5.6.0", + "celery-types>=0.23.0", + "chromadb>=1.3.7", + "dashscope>=1.25.5", + "dominate>=2.9.1", + "dotenv>=0.9.9", + "fastapi[standard]>=0.125.0", + "image>=1.5.33", + "langchain>=1.2.0", + "langchain-community>=0.4.1", + "load>=1.0.14", + "load-dotenv>=0.1.0", + "loguru>=0.7.3", + "minio>=7.2.20", + "mmcv>=2.2.0", + "moviepy==1.0.3", + "numpy<2", + "ollama>=0.6.1", + "opencv-python>=4.11.0.86", + "pandas>=2.3.3", + "pandas-stubs~=2.3.3", + "pika>=1.3.2", + "pillow>=12.0.0", + "pyasyncore>=1.0.4", + "pydantic>=2.12.5", + "pydantic-core>=2.41.5", + "pydantic-settings>=2.12.0", + "pymilvus>=2.6.5", + "pymysql>=1.1.2", + "python-multipart>=0.0.21", + "pyviz-comms>=3.0.6", + "redis>=7.1.0", + "retry>=0.9.2", + "scikit-image>=0.26.0", + "scipy>=1.16.3", + "scipy-stubs~=1.16.3", + "seaborn>=0.13.2", + "tool>=0.8.0", + "torch>=2.9.1", + "torchvision>=0.24.1", + "tritonclient[all]>=2.63.0", + "uvicorn>=0.38.0", ] diff --git a/requirements.txt b/requirements.txt index b7e6c28..4eefcb7 100644 Binary files a/requirements.txt and b/requirements.txt differ diff --git a/uv.lock b/uv.lock index 0c28838..67a7efd 100755 --- a/uv.lock +++ b/uv.lock @@ -1,6 +1,75 @@ version = 1 -revision = 2 -requires-python = ">=3.12" +revision = 3 +requires-python = "==3.12.*" +resolution-markers = [ + "platform_machine == 'arm64' and sys_platform == 'darwin'", + "platform_machine != 'arm64' and sys_platform == 'darwin'", + "platform_machine == 'aarch64' and sys_platform == 'linux'", + "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')", +] + +[[package]] +name = "addict" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/85/ef/fd7649da8af11d93979831e8f1f8097e85e82d5bfeabc8c68b39175d8e75/addict-2.4.0.tar.gz", hash = "sha256:b3b2210e0e067a281f5646c8c5db92e99b7231ea8b0eb5f74dbdf9e259d4e494", size = 9186, upload-time = "2020-11-21T16:21:31.416Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/00/b08f23b7d7e1e14ce01419a467b583edbb93c6cdb8654e54a9cc579cd61f/addict-2.4.0-py3-none-any.whl", hash = "sha256:249bb56bbfd3cdc2a004ea0ff4c2b6ddc84d53bc2194761636eb314d5cfa5dfc", size = 3832, upload-time = "2020-11-21T16:21:29.588Z" }, +] + +[[package]] +name = "agentaction" +version = "0.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "agentmemory" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/17/fb/a3dc77bed76c33d1bb8bf5430803629fd2d35d68374f915e07d643b9dc83/agentaction-0.1.7.tar.gz", hash = "sha256:72bf750a615b6d7cc6a828a7cd200da5e504606f7b4ce6f7a357d3f862a31d14", size = 6584, upload-time = "2023-08-02T01:00:08.957Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/53/c7ee5ccecb42aaf1bc615b8bcf2a5dab2aa06c337b1cc2f5092a245390ed/agentaction-0.1.7-py3-none-any.whl", hash = "sha256:2a96b8e843a86db30f91c1ba0e9e88d74bb100d716dde0d962b052c29637152b", size = 6990, upload-time = "2023-08-02T01:00:07.338Z" }, +] + +[[package]] +name = "agentlogger" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyfiglet" }, + { name = "rich" }, + { name = "termcolor" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fa/36/3626ce002c0f7dc0d9a7e94fd744307bd4e96204794006f86e72777c1468/agentlogger-0.1.2.tar.gz", hash = "sha256:5457dde6a7c3d75521cefa27c88967a4fc926ca1ce0e616a2ecbd06d4a874f13", size = 5650, upload-time = "2023-07-22T06:59:00.415Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/93/1a/a1e80755cc9ef3aedfcec4a624635f5912424769a81b4647c6d59a77a9ef/agentlogger-0.1.2-py3-none-any.whl", hash = "sha256:11603262b11c97a0d61b134cda7aedef9299e0f86e573af1fdce5be88f598515", size = 6150, upload-time = "2023-07-22T06:58:58.859Z" }, +] + +[[package]] +name = "agentmemory" +version = "0.4.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "agentlogger" }, + { name = "chromadb" }, + { name = "psycopg2-binary" }, + { name = "python-dotenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ae/23/7d57eeea63cd5d1a958f371da49ff2eb5cbbbafc6e6b1d8c8e92fd93b647/agentmemory-0.4.8.tar.gz", hash = "sha256:cd0b753d55eb54cf686cceecb6df4f1c5037232fcecbc2dfabb12795342f5e90", size = 26143, upload-time = "2023-10-15T18:01:03.553Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/35/65fae80ce879cd0ebad12ed5610fe89e425fe3a1ada282cdeee8e8cd0ac0/agentmemory-0.4.8-py3-none-any.whl", hash = "sha256:b5fc78a2510f0aef323dabb44759e528bbf6acf75600eb1effe2f1b9af235276", size = 24187, upload-time = "2023-10-15T18:01:02.245Z" }, +] + +[[package]] +name = "aio-pika" +version = "9.5.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiormq" }, + { name = "yarl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c5/73/8d1020683970de5532b3b01732d75c8bf922a6505fcdad1a9c7c6405242a/aio_pika-9.5.8.tar.gz", hash = "sha256:7c36874115f522bbe7486c46d8dd711a4dbedd67c4e8a8c47efe593d01862c62", size = 47408, upload-time = "2025-11-12T10:37:10.215Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/91/513971861d845d28160ecb205ae2cfaf618b16918a9cd4e0b832b5360ce7/aio_pika-9.5.8-py3-none-any.whl", hash = "sha256:f4c6cb8a6c5176d00f39fd7431e9702e638449bc6e86d1769ad7548b2a506a8d", size = 54397, upload-time = "2025-11-12T10:37:08.374Z" }, +] [[package]] name = "aiohappyeyeballs" @@ -13,7 +82,7 @@ wheels = [ [[package]] name = "aiohttp" -version = "3.12.13" +version = "3.13.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohappyeyeballs" }, @@ -24,54 +93,51 @@ dependencies = [ { name = "propcache" }, { name = "yarl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/42/6e/ab88e7cb2a4058bed2f7870276454f85a7c56cd6da79349eb314fc7bbcaa/aiohttp-3.12.13.tar.gz", hash = "sha256:47e2da578528264a12e4e3dd8dd72a7289e5f812758fe086473fab037a10fcce", size = 7819160, upload-time = "2025-06-14T15:15:41.354Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1c/ce/3b83ebba6b3207a7135e5fcaba49706f8a4b6008153b4e30540c982fae26/aiohttp-3.13.2.tar.gz", hash = "sha256:40176a52c186aefef6eb3cad2cdd30cd06e3afbe88fe8ab2af9c0b90f228daca", size = 7837994, upload-time = "2025-10-28T20:59:39.937Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b4/6a/ce40e329788013cd190b1d62bbabb2b6a9673ecb6d836298635b939562ef/aiohttp-3.12.13-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0aa580cf80558557285b49452151b9c69f2fa3ad94c5c9e76e684719a8791b73", size = 700491, upload-time = "2025-06-14T15:14:00.048Z" }, - { url = "https://files.pythonhosted.org/packages/28/d9/7150d5cf9163e05081f1c5c64a0cdf3c32d2f56e2ac95db2a28fe90eca69/aiohttp-3.12.13-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b103a7e414b57e6939cc4dece8e282cfb22043efd0c7298044f6594cf83ab347", size = 475104, upload-time = "2025-06-14T15:14:01.691Z" }, - { url = "https://files.pythonhosted.org/packages/f8/91/d42ba4aed039ce6e449b3e2db694328756c152a79804e64e3da5bc19dffc/aiohttp-3.12.13-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78f64e748e9e741d2eccff9597d09fb3cd962210e5b5716047cbb646dc8fe06f", size = 467948, upload-time = "2025-06-14T15:14:03.561Z" }, - { url = "https://files.pythonhosted.org/packages/99/3b/06f0a632775946981d7c4e5a865cddb6e8dfdbaed2f56f9ade7bb4a1039b/aiohttp-3.12.13-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29c955989bf4c696d2ededc6b0ccb85a73623ae6e112439398935362bacfaaf6", size = 1714742, upload-time = "2025-06-14T15:14:05.558Z" }, - { url = "https://files.pythonhosted.org/packages/92/a6/2552eebad9ec5e3581a89256276009e6a974dc0793632796af144df8b740/aiohttp-3.12.13-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d640191016763fab76072c87d8854a19e8e65d7a6fcfcbf017926bdbbb30a7e5", size = 1697393, upload-time = "2025-06-14T15:14:07.194Z" }, - { url = "https://files.pythonhosted.org/packages/d8/9f/bd08fdde114b3fec7a021381b537b21920cdd2aa29ad48c5dffd8ee314f1/aiohttp-3.12.13-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4dc507481266b410dede95dd9f26c8d6f5a14315372cc48a6e43eac652237d9b", size = 1752486, upload-time = "2025-06-14T15:14:08.808Z" }, - { url = "https://files.pythonhosted.org/packages/f7/e1/affdea8723aec5bd0959171b5490dccd9a91fcc505c8c26c9f1dca73474d/aiohttp-3.12.13-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8a94daa873465d518db073bd95d75f14302e0208a08e8c942b2f3f1c07288a75", size = 1798643, upload-time = "2025-06-14T15:14:10.767Z" }, - { url = "https://files.pythonhosted.org/packages/f3/9d/666d856cc3af3a62ae86393baa3074cc1d591a47d89dc3bf16f6eb2c8d32/aiohttp-3.12.13-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f52420cde4ce0bb9425a375d95577fe082cb5721ecb61da3049b55189e4e6", size = 1718082, upload-time = "2025-06-14T15:14:12.38Z" }, - { url = "https://files.pythonhosted.org/packages/f3/ce/3c185293843d17be063dada45efd2712bb6bf6370b37104b4eda908ffdbd/aiohttp-3.12.13-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f7df1f620ec40f1a7fbcb99ea17d7326ea6996715e78f71a1c9a021e31b96b8", size = 1633884, upload-time = "2025-06-14T15:14:14.415Z" }, - { url = "https://files.pythonhosted.org/packages/3a/5b/f3413f4b238113be35dfd6794e65029250d4b93caa0974ca572217745bdb/aiohttp-3.12.13-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3062d4ad53b36e17796dce1c0d6da0ad27a015c321e663657ba1cc7659cfc710", size = 1694943, upload-time = "2025-06-14T15:14:16.48Z" }, - { url = "https://files.pythonhosted.org/packages/82/c8/0e56e8bf12081faca85d14a6929ad5c1263c146149cd66caa7bc12255b6d/aiohttp-3.12.13-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:8605e22d2a86b8e51ffb5253d9045ea73683d92d47c0b1438e11a359bdb94462", size = 1716398, upload-time = "2025-06-14T15:14:18.589Z" }, - { url = "https://files.pythonhosted.org/packages/ea/f3/33192b4761f7f9b2f7f4281365d925d663629cfaea093a64b658b94fc8e1/aiohttp-3.12.13-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:54fbbe6beafc2820de71ece2198458a711e224e116efefa01b7969f3e2b3ddae", size = 1657051, upload-time = "2025-06-14T15:14:20.223Z" }, - { url = "https://files.pythonhosted.org/packages/5e/0b/26ddd91ca8f84c48452431cb4c5dd9523b13bc0c9766bda468e072ac9e29/aiohttp-3.12.13-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:050bd277dfc3768b606fd4eae79dd58ceda67d8b0b3c565656a89ae34525d15e", size = 1736611, upload-time = "2025-06-14T15:14:21.988Z" }, - { url = "https://files.pythonhosted.org/packages/c3/8d/e04569aae853302648e2c138a680a6a2f02e374c5b6711732b29f1e129cc/aiohttp-3.12.13-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2637a60910b58f50f22379b6797466c3aa6ae28a6ab6404e09175ce4955b4e6a", size = 1764586, upload-time = "2025-06-14T15:14:23.979Z" }, - { url = "https://files.pythonhosted.org/packages/ac/98/c193c1d1198571d988454e4ed75adc21c55af247a9fda08236602921c8c8/aiohttp-3.12.13-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e986067357550d1aaa21cfe9897fa19e680110551518a5a7cf44e6c5638cb8b5", size = 1724197, upload-time = "2025-06-14T15:14:25.692Z" }, - { url = "https://files.pythonhosted.org/packages/e7/9e/07bb8aa11eec762c6b1ff61575eeeb2657df11ab3d3abfa528d95f3e9337/aiohttp-3.12.13-cp312-cp312-win32.whl", hash = "sha256:ac941a80aeea2aaae2875c9500861a3ba356f9ff17b9cb2dbfb5cbf91baaf5bf", size = 421771, upload-time = "2025-06-14T15:14:27.364Z" }, - { url = "https://files.pythonhosted.org/packages/52/66/3ce877e56ec0813069cdc9607cd979575859c597b6fb9b4182c6d5f31886/aiohttp-3.12.13-cp312-cp312-win_amd64.whl", hash = "sha256:671f41e6146a749b6c81cb7fd07f5a8356d46febdaaaf07b0e774ff04830461e", size = 447869, upload-time = "2025-06-14T15:14:29.05Z" }, - { url = "https://files.pythonhosted.org/packages/11/0f/db19abdf2d86aa1deec3c1e0e5ea46a587b97c07a16516b6438428b3a3f8/aiohttp-3.12.13-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d4a18e61f271127465bdb0e8ff36e8f02ac4a32a80d8927aa52371e93cd87938", size = 694910, upload-time = "2025-06-14T15:14:30.604Z" }, - { url = "https://files.pythonhosted.org/packages/d5/81/0ab551e1b5d7f1339e2d6eb482456ccbe9025605b28eed2b1c0203aaaade/aiohttp-3.12.13-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:532542cb48691179455fab429cdb0d558b5e5290b033b87478f2aa6af5d20ace", size = 472566, upload-time = "2025-06-14T15:14:32.275Z" }, - { url = "https://files.pythonhosted.org/packages/34/3f/6b7d336663337672d29b1f82d1f252ec1a040fe2d548f709d3f90fa2218a/aiohttp-3.12.13-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d7eea18b52f23c050ae9db5d01f3d264ab08f09e7356d6f68e3f3ac2de9dfabb", size = 464856, upload-time = "2025-06-14T15:14:34.132Z" }, - { url = "https://files.pythonhosted.org/packages/26/7f/32ca0f170496aa2ab9b812630fac0c2372c531b797e1deb3deb4cea904bd/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad7c8e5c25f2a26842a7c239de3f7b6bfb92304593ef997c04ac49fb703ff4d7", size = 1703683, upload-time = "2025-06-14T15:14:36.034Z" }, - { url = "https://files.pythonhosted.org/packages/ec/53/d5513624b33a811c0abea8461e30a732294112318276ce3dbf047dbd9d8b/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6af355b483e3fe9d7336d84539fef460120c2f6e50e06c658fe2907c69262d6b", size = 1684946, upload-time = "2025-06-14T15:14:38Z" }, - { url = "https://files.pythonhosted.org/packages/37/72/4c237dd127827b0247dc138d3ebd49c2ded6114c6991bbe969058575f25f/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a95cf9f097498f35c88e3609f55bb47b28a5ef67f6888f4390b3d73e2bac6177", size = 1737017, upload-time = "2025-06-14T15:14:39.951Z" }, - { url = "https://files.pythonhosted.org/packages/0d/67/8a7eb3afa01e9d0acc26e1ef847c1a9111f8b42b82955fcd9faeb84edeb4/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8ed8c38a1c584fe99a475a8f60eefc0b682ea413a84c6ce769bb19a7ff1c5ef", size = 1786390, upload-time = "2025-06-14T15:14:42.151Z" }, - { url = "https://files.pythonhosted.org/packages/48/19/0377df97dd0176ad23cd8cad4fd4232cfeadcec6c1b7f036315305c98e3f/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0b9170d5d800126b5bc89d3053a2363406d6e327afb6afaeda2d19ee8bb103", size = 1708719, upload-time = "2025-06-14T15:14:44.039Z" }, - { url = "https://files.pythonhosted.org/packages/61/97/ade1982a5c642b45f3622255173e40c3eed289c169f89d00eeac29a89906/aiohttp-3.12.13-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:372feeace612ef8eb41f05ae014a92121a512bd5067db8f25101dd88a8db11da", size = 1622424, upload-time = "2025-06-14T15:14:45.945Z" }, - { url = "https://files.pythonhosted.org/packages/99/ab/00ad3eea004e1d07ccc406e44cfe2b8da5acb72f8c66aeeb11a096798868/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a946d3702f7965d81f7af7ea8fb03bb33fe53d311df48a46eeca17e9e0beed2d", size = 1675447, upload-time = "2025-06-14T15:14:47.911Z" }, - { url = "https://files.pythonhosted.org/packages/3f/fe/74e5ce8b2ccaba445fe0087abc201bfd7259431d92ae608f684fcac5d143/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a0c4725fae86555bbb1d4082129e21de7264f4ab14baf735278c974785cd2041", size = 1707110, upload-time = "2025-06-14T15:14:50.334Z" }, - { url = "https://files.pythonhosted.org/packages/ef/c4/39af17807f694f7a267bd8ab1fbacf16ad66740862192a6c8abac2bff813/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9b28ea2f708234f0a5c44eb6c7d9eb63a148ce3252ba0140d050b091b6e842d1", size = 1649706, upload-time = "2025-06-14T15:14:52.378Z" }, - { url = "https://files.pythonhosted.org/packages/38/e8/f5a0a5f44f19f171d8477059aa5f28a158d7d57fe1a46c553e231f698435/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d4f5becd2a5791829f79608c6f3dc745388162376f310eb9c142c985f9441cc1", size = 1725839, upload-time = "2025-06-14T15:14:54.617Z" }, - { url = "https://files.pythonhosted.org/packages/fd/ac/81acc594c7f529ef4419d3866913f628cd4fa9cab17f7bf410a5c3c04c53/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:60f2ce6b944e97649051d5f5cc0f439360690b73909230e107fd45a359d3e911", size = 1759311, upload-time = "2025-06-14T15:14:56.597Z" }, - { url = "https://files.pythonhosted.org/packages/38/0d/aabe636bd25c6ab7b18825e5a97d40024da75152bec39aa6ac8b7a677630/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:69fc1909857401b67bf599c793f2183fbc4804717388b0b888f27f9929aa41f3", size = 1708202, upload-time = "2025-06-14T15:14:58.598Z" }, - { url = "https://files.pythonhosted.org/packages/1f/ab/561ef2d8a223261683fb95a6283ad0d36cb66c87503f3a7dde7afe208bb2/aiohttp-3.12.13-cp313-cp313-win32.whl", hash = "sha256:7d7e68787a2046b0e44ba5587aa723ce05d711e3a3665b6b7545328ac8e3c0dd", size = 420794, upload-time = "2025-06-14T15:15:00.939Z" }, - { url = "https://files.pythonhosted.org/packages/9d/47/b11d0089875a23bff0abd3edb5516bcd454db3fefab8604f5e4b07bd6210/aiohttp-3.12.13-cp313-cp313-win_amd64.whl", hash = "sha256:5a178390ca90419bfd41419a809688c368e63c86bd725e1186dd97f6b89c2706", size = 446735, upload-time = "2025-06-14T15:15:02.858Z" }, + { url = "https://files.pythonhosted.org/packages/29/9b/01f00e9856d0a73260e86dd8ed0c2234a466c5c1712ce1c281548df39777/aiohttp-3.13.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b1e56bab2e12b2b9ed300218c351ee2a3d8c8fdab5b1ec6193e11a817767e47b", size = 737623, upload-time = "2025-10-28T20:56:30.797Z" }, + { url = "https://files.pythonhosted.org/packages/5a/1b/4be39c445e2b2bd0aab4ba736deb649fabf14f6757f405f0c9685019b9e9/aiohttp-3.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:364e25edaabd3d37b1db1f0cbcee8c73c9a3727bfa262b83e5e4cf3489a2a9dc", size = 492664, upload-time = "2025-10-28T20:56:32.708Z" }, + { url = "https://files.pythonhosted.org/packages/28/66/d35dcfea8050e131cdd731dff36434390479b4045a8d0b9d7111b0a968f1/aiohttp-3.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c5c94825f744694c4b8db20b71dba9a257cd2ba8e010a803042123f3a25d50d7", size = 491808, upload-time = "2025-10-28T20:56:34.57Z" }, + { url = "https://files.pythonhosted.org/packages/00/29/8e4609b93e10a853b65f8291e64985de66d4f5848c5637cddc70e98f01f8/aiohttp-3.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba2715d842ffa787be87cbfce150d5e88c87a98e0b62e0f5aa489169a393dbbb", size = 1738863, upload-time = "2025-10-28T20:56:36.377Z" }, + { url = "https://files.pythonhosted.org/packages/9d/fa/4ebdf4adcc0def75ced1a0d2d227577cd7b1b85beb7edad85fcc87693c75/aiohttp-3.13.2-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:585542825c4bc662221fb257889e011a5aa00f1ae4d75d1d246a5225289183e3", size = 1700586, upload-time = "2025-10-28T20:56:38.034Z" }, + { url = "https://files.pythonhosted.org/packages/da/04/73f5f02ff348a3558763ff6abe99c223381b0bace05cd4530a0258e52597/aiohttp-3.13.2-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:39d02cb6025fe1aabca329c5632f48c9532a3dabccd859e7e2f110668972331f", size = 1768625, upload-time = "2025-10-28T20:56:39.75Z" }, + { url = "https://files.pythonhosted.org/packages/f8/49/a825b79ffec124317265ca7d2344a86bcffeb960743487cb11988ffb3494/aiohttp-3.13.2-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e67446b19e014d37342f7195f592a2a948141d15a312fe0e700c2fd2f03124f6", size = 1867281, upload-time = "2025-10-28T20:56:41.471Z" }, + { url = "https://files.pythonhosted.org/packages/b9/48/adf56e05f81eac31edcfae45c90928f4ad50ef2e3ea72cb8376162a368f8/aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4356474ad6333e41ccefd39eae869ba15a6c5299c9c01dfdcfdd5c107be4363e", size = 1752431, upload-time = "2025-10-28T20:56:43.162Z" }, + { url = "https://files.pythonhosted.org/packages/30/ab/593855356eead019a74e862f21523db09c27f12fd24af72dbc3555b9bfd9/aiohttp-3.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eeacf451c99b4525f700f078becff32c32ec327b10dcf31306a8a52d78166de7", size = 1562846, upload-time = "2025-10-28T20:56:44.85Z" }, + { url = "https://files.pythonhosted.org/packages/39/0f/9f3d32271aa8dc35036e9668e31870a9d3b9542dd6b3e2c8a30931cb27ae/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8a9b889aeabd7a4e9af0b7f4ab5ad94d42e7ff679aaec6d0db21e3b639ad58d", size = 1699606, upload-time = "2025-10-28T20:56:46.519Z" }, + { url = "https://files.pythonhosted.org/packages/2c/3c/52d2658c5699b6ef7692a3f7128b2d2d4d9775f2a68093f74bca06cf01e1/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fa89cb11bc71a63b69568d5b8a25c3ca25b6d54c15f907ca1c130d72f320b76b", size = 1720663, upload-time = "2025-10-28T20:56:48.528Z" }, + { url = "https://files.pythonhosted.org/packages/9b/d4/8f8f3ff1fb7fb9e3f04fcad4e89d8a1cd8fc7d05de67e3de5b15b33008ff/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8aa7c807df234f693fed0ecd507192fc97692e61fee5702cdc11155d2e5cadc8", size = 1737939, upload-time = "2025-10-28T20:56:50.77Z" }, + { url = "https://files.pythonhosted.org/packages/03/d3/ddd348f8a27a634daae39a1b8e291ff19c77867af438af844bf8b7e3231b/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:9eb3e33fdbe43f88c3c75fa608c25e7c47bbd80f48d012763cb67c47f39a7e16", size = 1555132, upload-time = "2025-10-28T20:56:52.568Z" }, + { url = "https://files.pythonhosted.org/packages/39/b8/46790692dc46218406f94374903ba47552f2f9f90dad554eed61bfb7b64c/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9434bc0d80076138ea986833156c5a48c9c7a8abb0c96039ddbb4afc93184169", size = 1764802, upload-time = "2025-10-28T20:56:54.292Z" }, + { url = "https://files.pythonhosted.org/packages/ba/e4/19ce547b58ab2a385e5f0b8aa3db38674785085abcf79b6e0edd1632b12f/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ff15c147b2ad66da1f2cbb0622313f2242d8e6e8f9b79b5206c84523a4473248", size = 1719512, upload-time = "2025-10-28T20:56:56.428Z" }, + { url = "https://files.pythonhosted.org/packages/70/30/6355a737fed29dcb6dfdd48682d5790cb5eab050f7b4e01f49b121d3acad/aiohttp-3.13.2-cp312-cp312-win32.whl", hash = "sha256:27e569eb9d9e95dbd55c0fc3ec3a9335defbf1d8bc1d20171a49f3c4c607b93e", size = 426690, upload-time = "2025-10-28T20:56:58.736Z" }, + { url = "https://files.pythonhosted.org/packages/0a/0d/b10ac09069973d112de6ef980c1f6bb31cb7dcd0bc363acbdad58f927873/aiohttp-3.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:8709a0f05d59a71f33fd05c17fc11fcb8c30140506e13c2f5e8ee1b8964e1b45", size = 453465, upload-time = "2025-10-28T20:57:00.795Z" }, +] + +[[package]] +name = "aiormq" +version = "6.9.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pamqp" }, + { name = "yarl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8e/f6/01bc850db6d9b46ae825e3c373f610b0544e725a1159745a6de99ad0d9f1/aiormq-6.9.2.tar.gz", hash = "sha256:d051d46086079934d3a7157f4d8dcb856b77683c2a94aee9faa165efa6a785d3", size = 30554, upload-time = "2025-10-20T10:49:59.763Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/ec/763b13f148f3760c1562cedb593feaffbae177eeece61af5d0ace7b72a3e/aiormq-6.9.2-py3-none-any.whl", hash = "sha256:ab0f4e88e70f874b0ea344b3c41634d2484b5dc8b17cb6ae0ae7892a172ad003", size = 31829, upload-time = "2025-10-20T10:49:58.547Z" }, ] [[package]] name = "aiosignal" -version = "1.3.2" +version = "1.4.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "frozenlist" }, + { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ba/b5/6d55e80f6d8a08ce22b982eafa278d823b541c925f11ee774b0b9c43473d/aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54", size = 19424, upload-time = "2024-12-13T17:10:40.86Z" } +sdist = { url = "https://files.pythonhosted.org/packages/61/62/06741b579156360248d1ec624842ad0edf697050bbaf7c3e46394e106ad1/aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7", size = 25007, upload-time = "2025-07-03T22:54:43.528Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ec/6a/bc7e17a3e87a2985d3e8f4da4cd0f481060eb78fb08596c42be62c90a4d9/aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5", size = 7597, upload-time = "2024-12-13T17:10:38.469Z" }, + { url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" }, ] [[package]] @@ -86,77 +152,240 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/26/99/fc813cd978842c26c82534010ea849eee9ab3a13ea2b74e95cb9c99e747b/amqp-5.3.1-py3-none-any.whl", hash = "sha256:43b3319e1b4e7d1251833a93d672b4af1e40f3d632d479b98661a95f117880a2", size = 50944, upload-time = "2024-11-12T19:55:41.782Z" }, ] +[[package]] +name = "annotated-doc" +version = "0.0.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/57/ba/046ceea27344560984e26a590f90bc7f4a75b06701f653222458922b558c/annotated_doc-0.0.4.tar.gz", hash = "sha256:fbcda96e87e9c92ad167c2e53839e57503ecfda18804ea28102353485033faa4", size = 7288, upload-time = "2025-11-10T22:07:42.062Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/d3/26bf1008eb3d2daa8ef4cacc7f3bfdc11818d111f7e2d0201bc6e3b49d45/annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320", size = 5303, upload-time = "2025-11-10T22:07:40.673Z" }, +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anyio" +version = "4.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/16/ce/8a777047513153587e5434fd752e89334ac33e379aa3497db860eeb60377/anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0", size = 228266, upload-time = "2025-11-28T23:37:38.911Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/9c/36c5c37947ebfb8c7f22e0eb6e4d188ee2d53aa3880f3f2744fb894f0cb1/anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb", size = 113362, upload-time = "2025-11-28T23:36:57.897Z" }, +] + [[package]] name = "apscheduler" -version = "3.11.0" +version = "3.11.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "tzlocal" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/4e/00/6d6814ddc19be2df62c8c898c4df6b5b1914f3bd024b780028caa392d186/apscheduler-3.11.0.tar.gz", hash = "sha256:4c622d250b0955a65d5d0eb91c33e6d43fd879834bf541e0a18661ae60460133", size = 107347, upload-time = "2024-11-24T19:39:26.463Z" } +sdist = { url = "https://files.pythonhosted.org/packages/07/12/3e4389e5920b4c1763390c6d371162f3784f86f85cd6d6c1bfe68eef14e2/apscheduler-3.11.2.tar.gz", hash = "sha256:2a9966b052ec805f020c8c4c3ae6e6a06e24b1bf19f2e11d91d8cca0473eef41", size = 108683, upload-time = "2025-12-22T00:39:34.884Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d0/ae/9a053dd9229c0fde6b1f1f33f609ccff1ee79ddda364c756a924c6d8563b/APScheduler-3.11.0-py3-none-any.whl", hash = "sha256:fc134ca32e50f5eadcc4938e3a4545ab19131435e851abb40b34d63d5141c6da", size = 64004, upload-time = "2024-11-24T19:39:24.442Z" }, + { url = "https://files.pythonhosted.org/packages/9f/64/2e54428beba8d9992aa478bb8f6de9e4ecaa5f8f513bcfd567ed7fb0262d/apscheduler-3.11.2-py3-none-any.whl", hash = "sha256:ce005177f741409db4e4dd40a7431b76feb856b9dd69d57e0da49d6715bfd26d", size = 64439, upload-time = "2025-12-22T00:39:33.303Z" }, +] + +[[package]] +name = "argh" +version = "0.31.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4f/34/bc0b3577a818b4b70c6e318d23fe3c81fc3bb25f978ca8a3965cd8ee3af9/argh-0.31.3.tar.gz", hash = "sha256:f30023d8be14ca5ee6b1b3eeab829151d7bbda464ae07dc4dd5347919c5892f9", size = 57570, upload-time = "2024-07-13T17:54:59.729Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/52/fcd83710b6f8786df80e5d335882d1b24d1f610f397703e94a6ffb0d6f66/argh-0.31.3-py3-none-any.whl", hash = "sha256:2edac856ff50126f6e47d884751328c9f466bacbbb6cbfdac322053d94705494", size = 44844, upload-time = "2024-07-13T17:54:57.706Z" }, +] + +[[package]] +name = "argon2-cffi" +version = "25.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "argon2-cffi-bindings" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0e/89/ce5af8a7d472a67cc819d5d998aa8c82c5d860608c4db9f46f1162d7dab9/argon2_cffi-25.1.0.tar.gz", hash = "sha256:694ae5cc8a42f4c4e2bf2ca0e64e51e23a040c6a517a85074683d3959e1346c1", size = 45706, upload-time = "2025-06-03T06:55:32.073Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4f/d3/a8b22fa575b297cd6e3e3b0155c7e25db170edf1c74783d6a31a2490b8d9/argon2_cffi-25.1.0-py3-none-any.whl", hash = "sha256:fdc8b074db390fccb6eb4a3604ae7231f219aa669a2652e0f20e16ba513d5741", size = 14657, upload-time = "2025-06-03T06:55:30.804Z" }, +] + +[[package]] +name = "argon2-cffi-bindings" +version = "25.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5c/2d/db8af0df73c1cf454f71b2bbe5e356b8c1f8041c979f505b3d3186e520a9/argon2_cffi_bindings-25.1.0.tar.gz", hash = "sha256:b957f3e6ea4d55d820e40ff76f450952807013d361a65d7f28acc0acbf29229d", size = 1783441, upload-time = "2025-07-30T10:02:05.147Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/57/96b8b9f93166147826da5f90376e784a10582dd39a393c99bb62cfcf52f0/argon2_cffi_bindings-25.1.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:aecba1723ae35330a008418a91ea6cfcedf6d31e5fbaa056a166462ff066d500", size = 54121, upload-time = "2025-07-30T10:01:50.815Z" }, + { url = "https://files.pythonhosted.org/packages/0a/08/a9bebdb2e0e602dde230bdde8021b29f71f7841bd54801bcfd514acb5dcf/argon2_cffi_bindings-25.1.0-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2630b6240b495dfab90aebe159ff784d08ea999aa4b0d17efa734055a07d2f44", size = 29177, upload-time = "2025-07-30T10:01:51.681Z" }, + { url = "https://files.pythonhosted.org/packages/b6/02/d297943bcacf05e4f2a94ab6f462831dc20158614e5d067c35d4e63b9acb/argon2_cffi_bindings-25.1.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:7aef0c91e2c0fbca6fc68e7555aa60ef7008a739cbe045541e438373bc54d2b0", size = 31090, upload-time = "2025-07-30T10:01:53.184Z" }, + { url = "https://files.pythonhosted.org/packages/c1/93/44365f3d75053e53893ec6d733e4a5e3147502663554b4d864587c7828a7/argon2_cffi_bindings-25.1.0-cp39-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1e021e87faa76ae0d413b619fe2b65ab9a037f24c60a1e6cc43457ae20de6dc6", size = 81246, upload-time = "2025-07-30T10:01:54.145Z" }, + { url = "https://files.pythonhosted.org/packages/09/52/94108adfdd6e2ddf58be64f959a0b9c7d4ef2fa71086c38356d22dc501ea/argon2_cffi_bindings-25.1.0-cp39-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d3e924cfc503018a714f94a49a149fdc0b644eaead5d1f089330399134fa028a", size = 87126, upload-time = "2025-07-30T10:01:55.074Z" }, + { url = "https://files.pythonhosted.org/packages/72/70/7a2993a12b0ffa2a9271259b79cc616e2389ed1a4d93842fac5a1f923ffd/argon2_cffi_bindings-25.1.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:c87b72589133f0346a1cb8d5ecca4b933e3c9b64656c9d175270a000e73b288d", size = 80343, upload-time = "2025-07-30T10:01:56.007Z" }, + { url = "https://files.pythonhosted.org/packages/78/9a/4e5157d893ffc712b74dbd868c7f62365618266982b64accab26bab01edc/argon2_cffi_bindings-25.1.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1db89609c06afa1a214a69a462ea741cf735b29a57530478c06eb81dd403de99", size = 86777, upload-time = "2025-07-30T10:01:56.943Z" }, + { url = "https://files.pythonhosted.org/packages/74/cd/15777dfde1c29d96de7f18edf4cc94c385646852e7c7b0320aa91ccca583/argon2_cffi_bindings-25.1.0-cp39-abi3-win32.whl", hash = "sha256:473bcb5f82924b1becbb637b63303ec8d10e84c8d241119419897a26116515d2", size = 27180, upload-time = "2025-07-30T10:01:57.759Z" }, + { url = "https://files.pythonhosted.org/packages/e2/c6/a759ece8f1829d1f162261226fbfd2c6832b3ff7657384045286d2afa384/argon2_cffi_bindings-25.1.0-cp39-abi3-win_amd64.whl", hash = "sha256:a98cd7d17e9f7ce244c0803cad3c23a7d379c301ba618a5fa76a67d116618b98", size = 31715, upload-time = "2025-07-30T10:01:58.56Z" }, + { url = "https://files.pythonhosted.org/packages/42/b9/f8d6fa329ab25128b7e98fd83a3cb34d9db5b059a9847eddb840a0af45dd/argon2_cffi_bindings-25.1.0-cp39-abi3-win_arm64.whl", hash = "sha256:b0fdbcf513833809c882823f98dc2f931cf659d9a1429616ac3adebb49f5db94", size = 27149, upload-time = "2025-07-30T10:01:59.329Z" }, +] + +[[package]] +name = "asgiref" +version = "3.11.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/76/b9/4db2509eabd14b4a8c71d1b24c8d5734c52b8560a7b1e1a8b56c8d25568b/asgiref-3.11.0.tar.gz", hash = "sha256:13acff32519542a1736223fb79a715acdebe24286d98e8b164a73085f40da2c4", size = 37969, upload-time = "2025-11-19T15:32:20.106Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/91/be/317c2c55b8bbec407257d45f5c8d1b6867abc76d12043f2d3d58c538a4ea/asgiref-3.11.0-py3-none-any.whl", hash = "sha256:1db9021efadb0d9512ce8ffaf72fcef601c7b73a8807a1bb2ef143dc6b14846d", size = 24096, upload-time = "2025-11-19T15:32:19.004Z" }, ] [[package]] name = "attrs" -version = "25.3.0" +version = "25.4.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" } +sdist = { url = "https://files.pythonhosted.org/packages/6b/5c/685e6633917e101e5dcb62b9dd76946cbb57c26e133bae9e0cd36033c0a9/attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", size = 934251, upload-time = "2025-10-06T13:54:44.725Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" }, + { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, +] + +[[package]] +name = "backoff" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/47/d7/5bbeb12c44d7c4f2fb5b56abce497eb5ed9f34d85701de869acedd602619/backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba", size = 17001, upload-time = "2022-10-05T19:19:32.061Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/df/73/b6e24bd22e6720ca8ee9a85a0c4a2971af8497d8f3193fa05390cbd46e09/backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8", size = 15148, upload-time = "2022-10-05T19:19:30.546Z" }, +] + +[[package]] +name = "bcrypt" +version = "5.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d4/36/3329e2518d70ad8e2e5817d5a4cac6bba05a47767ec416c7d020a965f408/bcrypt-5.0.0.tar.gz", hash = "sha256:f748f7c2d6fd375cc93d3fba7ef4a9e3a092421b8dbf34d8d4dc06be9492dfdd", size = 25386, upload-time = "2025-09-25T19:50:47.829Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/29/6237f151fbfe295fe3e074ecc6d44228faa1e842a81f6d34a02937ee1736/bcrypt-5.0.0-cp38-abi3-macosx_10_12_universal2.whl", hash = "sha256:fc746432b951e92b58317af8e0ca746efe93e66555f1b40888865ef5bf56446b", size = 494553, upload-time = "2025-09-25T19:49:49.006Z" }, + { url = "https://files.pythonhosted.org/packages/45/b6/4c1205dde5e464ea3bd88e8742e19f899c16fa8916fb8510a851fae985b5/bcrypt-5.0.0-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c2388ca94ffee269b6038d48747f4ce8df0ffbea43f31abfa18ac72f0218effb", size = 275009, upload-time = "2025-09-25T19:49:50.581Z" }, + { url = "https://files.pythonhosted.org/packages/3b/71/427945e6ead72ccffe77894b2655b695ccf14ae1866cd977e185d606dd2f/bcrypt-5.0.0-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:560ddb6ec730386e7b3b26b8b4c88197aaed924430e7b74666a586ac997249ef", size = 278029, upload-time = "2025-09-25T19:49:52.533Z" }, + { url = "https://files.pythonhosted.org/packages/17/72/c344825e3b83c5389a369c8a8e58ffe1480b8a699f46c127c34580c4666b/bcrypt-5.0.0-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d79e5c65dcc9af213594d6f7f1fa2c98ad3fc10431e7aa53c176b441943efbdd", size = 275907, upload-time = "2025-09-25T19:49:54.709Z" }, + { url = "https://files.pythonhosted.org/packages/0b/7e/d4e47d2df1641a36d1212e5c0514f5291e1a956a7749f1e595c07a972038/bcrypt-5.0.0-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2b732e7d388fa22d48920baa267ba5d97cca38070b69c0e2d37087b381c681fd", size = 296500, upload-time = "2025-09-25T19:49:56.013Z" }, + { url = "https://files.pythonhosted.org/packages/0f/c3/0ae57a68be2039287ec28bc463b82e4b8dc23f9d12c0be331f4782e19108/bcrypt-5.0.0-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0c8e093ea2532601a6f686edbc2c6b2ec24131ff5c52f7610dd64fa4553b5464", size = 278412, upload-time = "2025-09-25T19:49:57.356Z" }, + { url = "https://files.pythonhosted.org/packages/45/2b/77424511adb11e6a99e3a00dcc7745034bee89036ad7d7e255a7e47be7d8/bcrypt-5.0.0-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:5b1589f4839a0899c146e8892efe320c0fa096568abd9b95593efac50a87cb75", size = 275486, upload-time = "2025-09-25T19:49:59.116Z" }, + { url = "https://files.pythonhosted.org/packages/43/0a/405c753f6158e0f3f14b00b462d8bca31296f7ecfc8fc8bc7919c0c7d73a/bcrypt-5.0.0-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:89042e61b5e808b67daf24a434d89bab164d4de1746b37a8d173b6b14f3db9ff", size = 277940, upload-time = "2025-09-25T19:50:00.869Z" }, + { url = "https://files.pythonhosted.org/packages/62/83/b3efc285d4aadc1fa83db385ec64dcfa1707e890eb42f03b127d66ac1b7b/bcrypt-5.0.0-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:e3cf5b2560c7b5a142286f69bde914494b6d8f901aaa71e453078388a50881c4", size = 310776, upload-time = "2025-09-25T19:50:02.393Z" }, + { url = "https://files.pythonhosted.org/packages/95/7d/47ee337dacecde6d234890fe929936cb03ebc4c3a7460854bbd9c97780b8/bcrypt-5.0.0-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f632fd56fc4e61564f78b46a2269153122db34988e78b6be8b32d28507b7eaeb", size = 312922, upload-time = "2025-09-25T19:50:04.232Z" }, + { url = "https://files.pythonhosted.org/packages/d6/3a/43d494dfb728f55f4e1cf8fd435d50c16a2d75493225b54c8d06122523c6/bcrypt-5.0.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:801cad5ccb6b87d1b430f183269b94c24f248dddbbc5c1f78b6ed231743e001c", size = 341367, upload-time = "2025-09-25T19:50:05.559Z" }, + { url = "https://files.pythonhosted.org/packages/55/ab/a0727a4547e383e2e22a630e0f908113db37904f58719dc48d4622139b5c/bcrypt-5.0.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3cf67a804fc66fc217e6914a5635000259fbbbb12e78a99488e4d5ba445a71eb", size = 359187, upload-time = "2025-09-25T19:50:06.916Z" }, + { url = "https://files.pythonhosted.org/packages/1b/bb/461f352fdca663524b4643d8b09e8435b4990f17fbf4fea6bc2a90aa0cc7/bcrypt-5.0.0-cp38-abi3-win32.whl", hash = "sha256:3abeb543874b2c0524ff40c57a4e14e5d3a66ff33fb423529c88f180fd756538", size = 153752, upload-time = "2025-09-25T19:50:08.515Z" }, + { url = "https://files.pythonhosted.org/packages/41/aa/4190e60921927b7056820291f56fc57d00d04757c8b316b2d3c0d1d6da2c/bcrypt-5.0.0-cp38-abi3-win_amd64.whl", hash = "sha256:35a77ec55b541e5e583eb3436ffbbf53b0ffa1fa16ca6782279daf95d146dcd9", size = 150881, upload-time = "2025-09-25T19:50:09.742Z" }, + { url = "https://files.pythonhosted.org/packages/54/12/cd77221719d0b39ac0b55dbd39358db1cd1246e0282e104366ebbfb8266a/bcrypt-5.0.0-cp38-abi3-win_arm64.whl", hash = "sha256:cde08734f12c6a4e28dc6755cd11d3bdfea608d93d958fffbe95a7026ebe4980", size = 144931, upload-time = "2025-09-25T19:50:11.016Z" }, + { url = "https://files.pythonhosted.org/packages/5d/ba/2af136406e1c3839aea9ecadc2f6be2bcd1eff255bd451dd39bcf302c47a/bcrypt-5.0.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0c418ca99fd47e9c59a301744d63328f17798b5947b0f791e9af3c1c499c2d0a", size = 495313, upload-time = "2025-09-25T19:50:12.309Z" }, + { url = "https://files.pythonhosted.org/packages/ac/ee/2f4985dbad090ace5ad1f7dd8ff94477fe089b5fab2040bd784a3d5f187b/bcrypt-5.0.0-cp39-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddb4e1500f6efdd402218ffe34d040a1196c072e07929b9820f363a1fd1f4191", size = 275290, upload-time = "2025-09-25T19:50:13.673Z" }, + { url = "https://files.pythonhosted.org/packages/e4/6e/b77ade812672d15cf50842e167eead80ac3514f3beacac8902915417f8b7/bcrypt-5.0.0-cp39-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7aeef54b60ceddb6f30ee3db090351ecf0d40ec6e2abf41430997407a46d2254", size = 278253, upload-time = "2025-09-25T19:50:15.089Z" }, + { url = "https://files.pythonhosted.org/packages/36/c4/ed00ed32f1040f7990dac7115f82273e3c03da1e1a1587a778d8cea496d8/bcrypt-5.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f0ce778135f60799d89c9693b9b398819d15f1921ba15fe719acb3178215a7db", size = 276084, upload-time = "2025-09-25T19:50:16.699Z" }, + { url = "https://files.pythonhosted.org/packages/e7/c4/fa6e16145e145e87f1fa351bbd54b429354fd72145cd3d4e0c5157cf4c70/bcrypt-5.0.0-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a71f70ee269671460b37a449f5ff26982a6f2ba493b3eabdd687b4bf35f875ac", size = 297185, upload-time = "2025-09-25T19:50:18.525Z" }, + { url = "https://files.pythonhosted.org/packages/24/b4/11f8a31d8b67cca3371e046db49baa7c0594d71eb40ac8121e2fc0888db0/bcrypt-5.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f8429e1c410b4073944f03bd778a9e066e7fad723564a52ff91841d278dfc822", size = 278656, upload-time = "2025-09-25T19:50:19.809Z" }, + { url = "https://files.pythonhosted.org/packages/ac/31/79f11865f8078e192847d2cb526e3fa27c200933c982c5b2869720fa5fce/bcrypt-5.0.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:edfcdcedd0d0f05850c52ba3127b1fce70b9f89e0fe5ff16517df7e81fa3cbb8", size = 275662, upload-time = "2025-09-25T19:50:21.567Z" }, + { url = "https://files.pythonhosted.org/packages/d4/8d/5e43d9584b3b3591a6f9b68f755a4da879a59712981ef5ad2a0ac1379f7a/bcrypt-5.0.0-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:611f0a17aa4a25a69362dcc299fda5c8a3d4f160e2abb3831041feb77393a14a", size = 278240, upload-time = "2025-09-25T19:50:23.305Z" }, + { url = "https://files.pythonhosted.org/packages/89/48/44590e3fc158620f680a978aafe8f87a4c4320da81ed11552f0323aa9a57/bcrypt-5.0.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:db99dca3b1fdc3db87d7c57eac0c82281242d1eabf19dcb8a6b10eb29a2e72d1", size = 311152, upload-time = "2025-09-25T19:50:24.597Z" }, + { url = "https://files.pythonhosted.org/packages/5f/85/e4fbfc46f14f47b0d20493669a625da5827d07e8a88ee460af6cd9768b44/bcrypt-5.0.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:5feebf85a9cefda32966d8171f5db7e3ba964b77fdfe31919622256f80f9cf42", size = 313284, upload-time = "2025-09-25T19:50:26.268Z" }, + { url = "https://files.pythonhosted.org/packages/25/ae/479f81d3f4594456a01ea2f05b132a519eff9ab5768a70430fa1132384b1/bcrypt-5.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3ca8a166b1140436e058298a34d88032ab62f15aae1c598580333dc21d27ef10", size = 341643, upload-time = "2025-09-25T19:50:28.02Z" }, + { url = "https://files.pythonhosted.org/packages/df/d2/36a086dee1473b14276cd6ea7f61aef3b2648710b5d7f1c9e032c29b859f/bcrypt-5.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:61afc381250c3182d9078551e3ac3a41da14154fbff647ddf52a769f588c4172", size = 359698, upload-time = "2025-09-25T19:50:31.347Z" }, + { url = "https://files.pythonhosted.org/packages/c0/f6/688d2cd64bfd0b14d805ddb8a565e11ca1fb0fd6817175d58b10052b6d88/bcrypt-5.0.0-cp39-abi3-win32.whl", hash = "sha256:64d7ce196203e468c457c37ec22390f1a61c85c6f0b8160fd752940ccfb3a683", size = 153725, upload-time = "2025-09-25T19:50:34.384Z" }, + { url = "https://files.pythonhosted.org/packages/9f/b9/9d9a641194a730bda138b3dfe53f584d61c58cd5230e37566e83ec2ffa0d/bcrypt-5.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:64ee8434b0da054d830fa8e89e1c8bf30061d539044a39524ff7dec90481e5c2", size = 150912, upload-time = "2025-09-25T19:50:35.69Z" }, + { url = "https://files.pythonhosted.org/packages/27/44/d2ef5e87509158ad2187f4dd0852df80695bb1ee0cfe0a684727b01a69e0/bcrypt-5.0.0-cp39-abi3-win_arm64.whl", hash = "sha256:f2347d3534e76bf50bca5500989d6c1d05ed64b440408057a37673282c654927", size = 144953, upload-time = "2025-09-25T19:50:37.32Z" }, +] + +[[package]] +name = "beautifulsoup4" +version = "4.14.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "soupsieve" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c3/b0/1c6a16426d389813b48d95e26898aff79abbde42ad353958ad95cc8c9b21/beautifulsoup4-4.14.3.tar.gz", hash = "sha256:6292b1c5186d356bba669ef9f7f051757099565ad9ada5dd630bd9de5fa7fb86", size = 627737, upload-time = "2025-11-30T15:08:26.084Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1a/39/47f9197bdd44df24d67ac8893641e16f386c984a0619ef2ee4c51fbbc019/beautifulsoup4-4.14.3-py3-none-any.whl", hash = "sha256:0918bfe44902e6ad8d57732ba310582e98da931428d231a5ecb9e7c703a735bb", size = 107721, upload-time = "2025-11-30T15:08:24.087Z" }, ] [[package]] name = "billiard" -version = "4.2.1" +version = "4.2.4" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7c/58/1546c970afcd2a2428b1bfafecf2371d8951cc34b46701bea73f4280989e/billiard-4.2.1.tar.gz", hash = "sha256:12b641b0c539073fc8d3f5b8b7be998956665c4233c7c1fcd66a7e677c4fb36f", size = 155031, upload-time = "2024-09-21T13:40:22.491Z" } +sdist = { url = "https://files.pythonhosted.org/packages/58/23/b12ac0bcdfb7360d664f40a00b1bda139cbbbced012c34e375506dbd0143/billiard-4.2.4.tar.gz", hash = "sha256:55f542c371209e03cd5862299b74e52e4fbcba8250ba611ad94276b369b6a85f", size = 156537, upload-time = "2025-11-30T13:28:48.52Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/30/da/43b15f28fe5f9e027b41c539abc5469052e9d48fd75f8ff094ba2a0ae767/billiard-4.2.1-py3-none-any.whl", hash = "sha256:40b59a4ac8806ba2c2369ea98d876bc6108b051c227baffd928c644d15d8f3cb", size = 86766, upload-time = "2024-09-21T13:40:20.188Z" }, + { url = "https://files.pythonhosted.org/packages/cb/87/8bab77b323f16d67be364031220069f79159117dd5e43eeb4be2fef1ac9b/billiard-4.2.4-py3-none-any.whl", hash = "sha256:525b42bdec68d2b983347ac312f892db930858495db601b5836ac24e6477cde5", size = 87070, upload-time = "2025-11-30T13:28:47.016Z" }, ] [[package]] name = "brotli" -version = "1.1.0" +version = "1.2.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/2f/c2/f9e977608bdf958650638c3f1e28f85a1b075f075ebbe77db8555463787b/Brotli-1.1.0.tar.gz", hash = "sha256:81de08ac11bcb85841e440c13611c00b67d3bf82698314928d0b676362546724", size = 7372270, upload-time = "2023-09-07T14:05:41.643Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f7/16/c92ca344d646e71a43b8bb353f0a6490d7f6e06210f8554c8f874e454285/brotli-1.2.0.tar.gz", hash = "sha256:e310f77e41941c13340a95976fe66a8a95b01e783d430eeaf7a2f87e0a57dd0a", size = 7388632, upload-time = "2025-11-05T18:39:42.86Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5c/d0/5373ae13b93fe00095a58efcbce837fd470ca39f703a235d2a999baadfbc/Brotli-1.1.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:32d95b80260d79926f5fab3c41701dbb818fde1c9da590e77e571eefd14abe28", size = 815693, upload-time = "2024-10-18T12:32:23.824Z" }, - { url = "https://files.pythonhosted.org/packages/8e/48/f6e1cdf86751300c288c1459724bfa6917a80e30dbfc326f92cea5d3683a/Brotli-1.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b760c65308ff1e462f65d69c12e4ae085cff3b332d894637f6273a12a482d09f", size = 422489, upload-time = "2024-10-18T12:32:25.641Z" }, - { url = "https://files.pythonhosted.org/packages/06/88/564958cedce636d0f1bed313381dfc4b4e3d3f6015a63dae6146e1b8c65c/Brotli-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:316cc9b17edf613ac76b1f1f305d2a748f1b976b033b049a6ecdfd5612c70409", size = 873081, upload-time = "2023-09-07T14:03:57.967Z" }, - { url = "https://files.pythonhosted.org/packages/58/79/b7026a8bb65da9a6bb7d14329fd2bd48d2b7f86d7329d5cc8ddc6a90526f/Brotli-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:caf9ee9a5775f3111642d33b86237b05808dafcd6268faa492250e9b78046eb2", size = 446244, upload-time = "2023-09-07T14:03:59.319Z" }, - { url = "https://files.pythonhosted.org/packages/e5/18/c18c32ecea41b6c0004e15606e274006366fe19436b6adccc1ae7b2e50c2/Brotli-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70051525001750221daa10907c77830bc889cb6d865cc0b813d9db7fefc21451", size = 2906505, upload-time = "2023-09-07T14:04:01.327Z" }, - { url = "https://files.pythonhosted.org/packages/08/c8/69ec0496b1ada7569b62d85893d928e865df29b90736558d6c98c2031208/Brotli-1.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7f4bf76817c14aa98cc6697ac02f3972cb8c3da93e9ef16b9c66573a68014f91", size = 2944152, upload-time = "2023-09-07T14:04:03.033Z" }, - { url = "https://files.pythonhosted.org/packages/ab/fb/0517cea182219d6768113a38167ef6d4eb157a033178cc938033a552ed6d/Brotli-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0c5516f0aed654134a2fc936325cc2e642f8a0e096d075209672eb321cff408", size = 2919252, upload-time = "2023-09-07T14:04:04.675Z" }, - { url = "https://files.pythonhosted.org/packages/c7/53/73a3431662e33ae61a5c80b1b9d2d18f58dfa910ae8dd696e57d39f1a2f5/Brotli-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c3020404e0b5eefd7c9485ccf8393cfb75ec38ce75586e046573c9dc29967a0", size = 2845955, upload-time = "2023-09-07T14:04:06.585Z" }, - { url = "https://files.pythonhosted.org/packages/55/ac/bd280708d9c5ebdbf9de01459e625a3e3803cce0784f47d633562cf40e83/Brotli-1.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4ed11165dd45ce798d99a136808a794a748d5dc38511303239d4e2363c0695dc", size = 2914304, upload-time = "2023-09-07T14:04:08.668Z" }, - { url = "https://files.pythonhosted.org/packages/76/58/5c391b41ecfc4527d2cc3350719b02e87cb424ef8ba2023fb662f9bf743c/Brotli-1.1.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4093c631e96fdd49e0377a9c167bfd75b6d0bad2ace734c6eb20b348bc3ea180", size = 2814452, upload-time = "2023-09-07T14:04:10.736Z" }, - { url = "https://files.pythonhosted.org/packages/c7/4e/91b8256dfe99c407f174924b65a01f5305e303f486cc7a2e8a5d43c8bec3/Brotli-1.1.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e4c4629ddad63006efa0ef968c8e4751c5868ff0b1c5c40f76524e894c50248", size = 2938751, upload-time = "2023-09-07T14:04:12.875Z" }, - { url = "https://files.pythonhosted.org/packages/5a/a6/e2a39a5d3b412938362bbbeba5af904092bf3f95b867b4a3eb856104074e/Brotli-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:861bf317735688269936f755fa136a99d1ed526883859f86e41a5d43c61d8966", size = 2933757, upload-time = "2023-09-07T14:04:14.551Z" }, - { url = "https://files.pythonhosted.org/packages/13/f0/358354786280a509482e0e77c1a5459e439766597d280f28cb097642fc26/Brotli-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:87a3044c3a35055527ac75e419dfa9f4f3667a1e887ee80360589eb8c90aabb9", size = 2936146, upload-time = "2024-10-18T12:32:27.257Z" }, - { url = "https://files.pythonhosted.org/packages/80/f7/daf538c1060d3a88266b80ecc1d1c98b79553b3f117a485653f17070ea2a/Brotli-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c5529b34c1c9d937168297f2c1fde7ebe9ebdd5e121297ff9c043bdb2ae3d6fb", size = 2848055, upload-time = "2024-10-18T12:32:29.376Z" }, - { url = "https://files.pythonhosted.org/packages/ad/cf/0eaa0585c4077d3c2d1edf322d8e97aabf317941d3a72d7b3ad8bce004b0/Brotli-1.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ca63e1890ede90b2e4454f9a65135a4d387a4585ff8282bb72964fab893f2111", size = 3035102, upload-time = "2024-10-18T12:32:31.371Z" }, - { url = "https://files.pythonhosted.org/packages/d8/63/1c1585b2aa554fe6dbce30f0c18bdbc877fa9a1bf5ff17677d9cca0ac122/Brotli-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e79e6520141d792237c70bcd7a3b122d00f2613769ae0cb61c52e89fd3443839", size = 2930029, upload-time = "2024-10-18T12:32:33.293Z" }, - { url = "https://files.pythonhosted.org/packages/5f/3b/4e3fd1893eb3bbfef8e5a80d4508bec17a57bb92d586c85c12d28666bb13/Brotli-1.1.0-cp312-cp312-win32.whl", hash = "sha256:5f4d5ea15c9382135076d2fb28dde923352fe02951e66935a9efaac8f10e81b0", size = 333276, upload-time = "2023-09-07T14:04:16.49Z" }, - { url = "https://files.pythonhosted.org/packages/3d/d5/942051b45a9e883b5b6e98c041698b1eb2012d25e5948c58d6bf85b1bb43/Brotli-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:906bc3a79de8c4ae5b86d3d75a8b77e44404b0f4261714306e3ad248d8ab0951", size = 357255, upload-time = "2023-09-07T14:04:17.83Z" }, - { url = "https://files.pythonhosted.org/packages/0a/9f/fb37bb8ffc52a8da37b1c03c459a8cd55df7a57bdccd8831d500e994a0ca/Brotli-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8bf32b98b75c13ec7cf774164172683d6e7891088f6316e54425fde1efc276d5", size = 815681, upload-time = "2024-10-18T12:32:34.942Z" }, - { url = "https://files.pythonhosted.org/packages/06/b3/dbd332a988586fefb0aa49c779f59f47cae76855c2d00f450364bb574cac/Brotli-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7bc37c4d6b87fb1017ea28c9508b36bbcb0c3d18b4260fcdf08b200c74a6aee8", size = 422475, upload-time = "2024-10-18T12:32:36.485Z" }, - { url = "https://files.pythonhosted.org/packages/bb/80/6aaddc2f63dbcf2d93c2d204e49c11a9ec93a8c7c63261e2b4bd35198283/Brotli-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c0ef38c7a7014ffac184db9e04debe495d317cc9c6fb10071f7fefd93100a4f", size = 2906173, upload-time = "2024-10-18T12:32:37.978Z" }, - { url = "https://files.pythonhosted.org/packages/ea/1d/e6ca79c96ff5b641df6097d299347507d39a9604bde8915e76bf026d6c77/Brotli-1.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91d7cc2a76b5567591d12c01f019dd7afce6ba8cba6571187e21e2fc418ae648", size = 2943803, upload-time = "2024-10-18T12:32:39.606Z" }, - { url = "https://files.pythonhosted.org/packages/ac/a3/d98d2472e0130b7dd3acdbb7f390d478123dbf62b7d32bda5c830a96116d/Brotli-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a93dde851926f4f2678e704fadeb39e16c35d8baebd5252c9fd94ce8ce68c4a0", size = 2918946, upload-time = "2024-10-18T12:32:41.679Z" }, - { url = "https://files.pythonhosted.org/packages/c4/a5/c69e6d272aee3e1423ed005d8915a7eaa0384c7de503da987f2d224d0721/Brotli-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f0db75f47be8b8abc8d9e31bc7aad0547ca26f24a54e6fd10231d623f183d089", size = 2845707, upload-time = "2024-10-18T12:32:43.478Z" }, - { url = "https://files.pythonhosted.org/packages/58/9f/4149d38b52725afa39067350696c09526de0125ebfbaab5acc5af28b42ea/Brotli-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6967ced6730aed543b8673008b5a391c3b1076d834ca438bbd70635c73775368", size = 2936231, upload-time = "2024-10-18T12:32:45.224Z" }, - { url = "https://files.pythonhosted.org/packages/5a/5a/145de884285611838a16bebfdb060c231c52b8f84dfbe52b852a15780386/Brotli-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7eedaa5d036d9336c95915035fb57422054014ebdeb6f3b42eac809928e40d0c", size = 2848157, upload-time = "2024-10-18T12:32:46.894Z" }, - { url = "https://files.pythonhosted.org/packages/50/ae/408b6bfb8525dadebd3b3dd5b19d631da4f7d46420321db44cd99dcf2f2c/Brotli-1.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d487f5432bf35b60ed625d7e1b448e2dc855422e87469e3f450aa5552b0eb284", size = 3035122, upload-time = "2024-10-18T12:32:48.844Z" }, - { url = "https://files.pythonhosted.org/packages/af/85/a94e5cfaa0ca449d8f91c3d6f78313ebf919a0dbd55a100c711c6e9655bc/Brotli-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:832436e59afb93e1836081a20f324cb185836c617659b07b129141a8426973c7", size = 2930206, upload-time = "2024-10-18T12:32:51.198Z" }, - { url = "https://files.pythonhosted.org/packages/c2/f0/a61d9262cd01351df22e57ad7c34f66794709acab13f34be2675f45bf89d/Brotli-1.1.0-cp313-cp313-win32.whl", hash = "sha256:43395e90523f9c23a3d5bdf004733246fba087f2948f87ab28015f12359ca6a0", size = 333804, upload-time = "2024-10-18T12:32:52.661Z" }, - { url = "https://files.pythonhosted.org/packages/7e/c1/ec214e9c94000d1c1974ec67ced1c970c148aa6b8d8373066123fc3dbf06/Brotli-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:9011560a466d2eb3f5a6e4929cf4a09be405c64154e12df0dd72713f6500e32b", size = 358517, upload-time = "2024-10-18T12:32:54.066Z" }, + { url = "https://files.pythonhosted.org/packages/11/ee/b0a11ab2315c69bb9b45a2aaed022499c9c24a205c3a49c3513b541a7967/brotli-1.2.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:35d382625778834a7f3061b15423919aa03e4f5da34ac8e02c074e4b75ab4f84", size = 861543, upload-time = "2025-11-05T18:38:24.183Z" }, + { url = "https://files.pythonhosted.org/packages/e1/2f/29c1459513cd35828e25531ebfcbf3e92a5e49f560b1777a9af7203eb46e/brotli-1.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7a61c06b334bd99bc5ae84f1eeb36bfe01400264b3c352f968c6e30a10f9d08b", size = 444288, upload-time = "2025-11-05T18:38:25.139Z" }, + { url = "https://files.pythonhosted.org/packages/3d/6f/feba03130d5fceadfa3a1bb102cb14650798c848b1df2a808356f939bb16/brotli-1.2.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:acec55bb7c90f1dfc476126f9711a8e81c9af7fb617409a9ee2953115343f08d", size = 1528071, upload-time = "2025-11-05T18:38:26.081Z" }, + { url = "https://files.pythonhosted.org/packages/2b/38/f3abb554eee089bd15471057ba85f47e53a44a462cfce265d9bf7088eb09/brotli-1.2.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:260d3692396e1895c5034f204f0db022c056f9e2ac841593a4cf9426e2a3faca", size = 1626913, upload-time = "2025-11-05T18:38:27.284Z" }, + { url = "https://files.pythonhosted.org/packages/03/a7/03aa61fbc3c5cbf99b44d158665f9b0dd3d8059be16c460208d9e385c837/brotli-1.2.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:072e7624b1fc4d601036ab3f4f27942ef772887e876beff0301d261210bca97f", size = 1419762, upload-time = "2025-11-05T18:38:28.295Z" }, + { url = "https://files.pythonhosted.org/packages/21/1b/0374a89ee27d152a5069c356c96b93afd1b94eae83f1e004b57eb6ce2f10/brotli-1.2.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:adedc4a67e15327dfdd04884873c6d5a01d3e3b6f61406f99b1ed4865a2f6d28", size = 1484494, upload-time = "2025-11-05T18:38:29.29Z" }, + { url = "https://files.pythonhosted.org/packages/cf/57/69d4fe84a67aef4f524dcd075c6eee868d7850e85bf01d778a857d8dbe0a/brotli-1.2.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7a47ce5c2288702e09dc22a44d0ee6152f2c7eda97b3c8482d826a1f3cfc7da7", size = 1593302, upload-time = "2025-11-05T18:38:30.639Z" }, + { url = "https://files.pythonhosted.org/packages/d5/3b/39e13ce78a8e9a621c5df3aeb5fd181fcc8caba8c48a194cd629771f6828/brotli-1.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:af43b8711a8264bb4e7d6d9a6d004c3a2019c04c01127a868709ec29962b6036", size = 1487913, upload-time = "2025-11-05T18:38:31.618Z" }, + { url = "https://files.pythonhosted.org/packages/62/28/4d00cb9bd76a6357a66fcd54b4b6d70288385584063f4b07884c1e7286ac/brotli-1.2.0-cp312-cp312-win32.whl", hash = "sha256:e99befa0b48f3cd293dafeacdd0d191804d105d279e0b387a32054c1180f3161", size = 334362, upload-time = "2025-11-05T18:38:32.939Z" }, + { url = "https://files.pythonhosted.org/packages/1c/4e/bc1dcac9498859d5e353c9b153627a3752868a9d5f05ce8dedd81a2354ab/brotli-1.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:b35c13ce241abdd44cb8ca70683f20c0c079728a36a996297adb5334adfc1c44", size = 369115, upload-time = "2025-11-05T18:38:33.765Z" }, ] +[[package]] +name = "bs4" +version = "0.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "beautifulsoup4" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/aa/4acaf814ff901145da37332e05bb510452ebed97bc9602695059dd46ef39/bs4-0.0.2.tar.gz", hash = "sha256:a48685c58f50fe127722417bae83fe6badf500d54b55f7e39ffe43b798653925", size = 698, upload-time = "2024-01-17T18:15:47.371Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/bb/bf7aab772a159614954d84aa832c129624ba6c32faa559dfb200a534e50b/bs4-0.0.2-py2.py3-none-any.whl", hash = "sha256:abf8742c0805ef7f662dce4b51cca104cffe52b835238afc169142ab9b3fbccc", size = 1189, upload-time = "2024-01-17T18:15:48.613Z" }, +] + +[[package]] +name = "build" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "(os_name == 'nt' and platform_machine != 'aarch64' and sys_platform == 'linux') or (os_name == 'nt' and sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "packaging" }, + { name = "pyproject-hooks" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/25/1c/23e33405a7c9eac261dff640926b8b5adaed6a6eb3e1767d441ed611d0c0/build-1.3.0.tar.gz", hash = "sha256:698edd0ea270bde950f53aed21f3a0135672206f3911e0176261a31e0e07b397", size = 48544, upload-time = "2025-08-01T21:27:09.268Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/8c/2b30c12155ad8de0cf641d76a8b396a16d2c36bc6d50b621a62b7c4567c1/build-1.3.0-py3-none-any.whl", hash = "sha256:7145f0b5061ba90a1500d60bd1b13ca0a8a4cebdd0cc16ed8adf1c0e739f43b4", size = 23382, upload-time = "2025-08-01T21:27:07.844Z" }, +] + +[[package]] +name = "cachetools" +version = "6.2.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bc/1d/ede8680603f6016887c062a2cf4fc8fdba905866a3ab8831aa8aa651320c/cachetools-6.2.4.tar.gz", hash = "sha256:82c5c05585e70b6ba2d3ae09ea60b79548872185d2f24ae1f2709d37299fd607", size = 31731, upload-time = "2025-12-15T18:24:53.744Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/fc/1d7b80d0eb7b714984ce40efc78859c022cd930e402f599d8ca9e39c78a4/cachetools-6.2.4-py3-none-any.whl", hash = "sha256:69a7a52634fed8b8bf6e24a050fb60bff1c9bd8f6d24572b99c32d4e71e62a51", size = 11551, upload-time = "2025-12-15T18:24:52.332Z" }, +] + +[[package]] +name = "callbacks" +version = "0.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/15/c71150189ea1cfd04a5853a40e2e074ec777755bd3924980fe51e5c6dd44/callbacks-0.3.0.tar.gz", hash = "sha256:16248b2570394e06cc9c78f586f2582c36a677163e80f8735625d3904b3a5b65", size = 9385, upload-time = "2018-10-09T13:08:30.4Z" } + [[package]] name = "celery" -version = "5.5.3" +version = "5.6.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "billiard" }, @@ -166,82 +395,135 @@ dependencies = [ { name = "click-repl" }, { name = "kombu" }, { name = "python-dateutil" }, + { name = "tzlocal" }, { name = "vine" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/bb/7d/6c289f407d219ba36d8b384b42489ebdd0c84ce9c413875a8aae0c85f35b/celery-5.5.3.tar.gz", hash = "sha256:6c972ae7968c2b5281227f01c3a3f984037d21c5129d07bf3550cc2afc6b10a5", size = 1667144, upload-time = "2025-06-01T11:08:12.563Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e2/1b/b9bbe49b1f799d0ee3de91c66e6b61d095139721f5a2ae25585f49d7c7a9/celery-5.6.1.tar.gz", hash = "sha256:bdc9e02b1480dd137f2df392358c3e94bb623d4f47ae1bc0a7dc5821c90089c7", size = 1716388, upload-time = "2025-12-29T21:48:50.805Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c9/af/0dcccc7fdcdf170f9a1585e5e96b6fb0ba1749ef6be8c89a6202284759bd/celery-5.5.3-py3-none-any.whl", hash = "sha256:0b5761a07057acee94694464ca482416b959568904c9dfa41ce8413a7d65d525", size = 438775, upload-time = "2025-06-01T11:08:09.94Z" }, + { url = "https://files.pythonhosted.org/packages/87/b1/7b7d1e0bc2a3f7ee01576008e3c943f3f23a56809b63f4140ddc96f201c1/celery-5.6.1-py3-none-any.whl", hash = "sha256:ee87aa14d344c655fe83bfc44b2c93bbb7cba39ae11e58b88279523506159d44", size = 445358, upload-time = "2025-12-29T21:48:48.894Z" }, +] + +[[package]] +name = "celery-types" +version = "0.24.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/25/2276a1f00f8ab9fc88128c939333933a24db7df1d75aa57ecc27b7dd3a22/celery_types-0.24.0.tar.gz", hash = "sha256:c93fbcd0b04a9e9c2f55d5540aca4aa1ea4cc06a870c0c8dee5062fdd59663fe", size = 33148, upload-time = "2025-12-23T17:16:30.847Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/7e/3252cba5f5c9a65a3f52a69734d8e51e023db8981022b503e8183cf0225e/celery_types-0.24.0-py3-none-any.whl", hash = "sha256:a21e04681e68719a208335e556a79909da4be9c5e0d6d2fd0dd4c5615954b3fd", size = 60473, upload-time = "2025-12-23T17:16:29.89Z" }, ] [[package]] name = "certifi" -version = "2025.6.15" +version = "2025.11.12" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/73/f7/f14b46d4bcd21092d7d3ccef689615220d8a08fb25e564b65d20738e672e/certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b", size = 158753, upload-time = "2025-06-15T02:45:51.329Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/84/ae/320161bd181fc06471eed047ecce67b693fd7515b16d495d8932db763426/certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057", size = 157650, upload-time = "2025-06-15T02:45:49.977Z" }, + { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, ] [[package]] name = "cffi" -version = "1.17.1" +version = "2.0.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pycparser" }, + { name = "pycparser", marker = "implementation_name != 'PyPy'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621, upload-time = "2024-09-04T20:45:21.852Z" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448, upload-time = "2024-09-04T20:44:26.208Z" }, - { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976, upload-time = "2024-09-04T20:44:27.578Z" }, - { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475, upload-time = "2024-09-04T20:44:43.733Z" }, - { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009, upload-time = "2024-09-04T20:44:45.309Z" }, + { url = "https://files.pythonhosted.org/packages/ea/47/4f61023ea636104d4f16ab488e268b93008c3d0bb76893b1b31db1f96802/cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d", size = 185271, upload-time = "2025-09-08T23:22:44.795Z" }, + { url = "https://files.pythonhosted.org/packages/df/a2/781b623f57358e360d62cdd7a8c681f074a71d445418a776eef0aadb4ab4/cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c", size = 181048, upload-time = "2025-09-08T23:22:45.938Z" }, + { url = "https://files.pythonhosted.org/packages/ff/df/a4f0fbd47331ceeba3d37c2e51e9dfc9722498becbeec2bd8bc856c9538a/cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe", size = 212529, upload-time = "2025-09-08T23:22:47.349Z" }, + { url = "https://files.pythonhosted.org/packages/d5/72/12b5f8d3865bf0f87cf1404d8c374e7487dcf097a1c91c436e72e6badd83/cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062", size = 220097, upload-time = "2025-09-08T23:22:48.677Z" }, + { url = "https://files.pythonhosted.org/packages/c2/95/7a135d52a50dfa7c882ab0ac17e8dc11cec9d55d2c18dda414c051c5e69e/cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e", size = 207983, upload-time = "2025-09-08T23:22:50.06Z" }, + { url = "https://files.pythonhosted.org/packages/3a/c8/15cb9ada8895957ea171c62dc78ff3e99159ee7adb13c0123c001a2546c1/cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037", size = 206519, upload-time = "2025-09-08T23:22:51.364Z" }, + { url = "https://files.pythonhosted.org/packages/78/2d/7fa73dfa841b5ac06c7b8855cfc18622132e365f5b81d02230333ff26e9e/cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba", size = 219572, upload-time = "2025-09-08T23:22:52.902Z" }, + { url = "https://files.pythonhosted.org/packages/07/e0/267e57e387b4ca276b90f0434ff88b2c2241ad72b16d31836adddfd6031b/cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94", size = 222963, upload-time = "2025-09-08T23:22:54.518Z" }, + { url = "https://files.pythonhosted.org/packages/b6/75/1f2747525e06f53efbd878f4d03bac5b859cbc11c633d0fb81432d98a795/cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187", size = 221361, upload-time = "2025-09-08T23:22:55.867Z" }, + { url = "https://files.pythonhosted.org/packages/7b/2b/2b6435f76bfeb6bbf055596976da087377ede68df465419d192acf00c437/cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18", size = 172932, upload-time = "2025-09-08T23:22:57.188Z" }, + { url = "https://files.pythonhosted.org/packages/f8/ed/13bd4418627013bec4ed6e54283b1959cf6db888048c7cf4b4c3b5b36002/cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5", size = 183557, upload-time = "2025-09-08T23:22:58.351Z" }, + { url = "https://files.pythonhosted.org/packages/95/31/9f7f93ad2f8eff1dbc1c3656d7ca5bfd8fb52c9d786b4dcf19b2d02217fa/cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6", size = 177762, upload-time = "2025-09-08T23:22:59.668Z" }, ] [[package]] name = "charset-normalizer" -version = "3.4.2" +version = "3.4.4" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e4/33/89c2ced2b67d1c2a61c19c6751aa8902d46ce3dacb23600a283619f5a12d/charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63", size = 126367, upload-time = "2025-05-02T08:34:42.01Z" } +sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d7/a4/37f4d6035c89cac7930395a35cc0f1b872e652eaafb76a6075943754f095/charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7", size = 199936, upload-time = "2025-05-02T08:32:33.712Z" }, - { url = "https://files.pythonhosted.org/packages/ee/8a/1a5e33b73e0d9287274f899d967907cd0bf9c343e651755d9307e0dbf2b3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3", size = 143790, upload-time = "2025-05-02T08:32:35.768Z" }, - { url = "https://files.pythonhosted.org/packages/66/52/59521f1d8e6ab1482164fa21409c5ef44da3e9f653c13ba71becdd98dec3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a", size = 153924, upload-time = "2025-05-02T08:32:37.284Z" }, - { url = "https://files.pythonhosted.org/packages/86/2d/fb55fdf41964ec782febbf33cb64be480a6b8f16ded2dbe8db27a405c09f/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214", size = 146626, upload-time = "2025-05-02T08:32:38.803Z" }, - { url = "https://files.pythonhosted.org/packages/8c/73/6ede2ec59bce19b3edf4209d70004253ec5f4e319f9a2e3f2f15601ed5f7/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a", size = 148567, upload-time = "2025-05-02T08:32:40.251Z" }, - { url = "https://files.pythonhosted.org/packages/09/14/957d03c6dc343c04904530b6bef4e5efae5ec7d7990a7cbb868e4595ee30/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd", size = 150957, upload-time = "2025-05-02T08:32:41.705Z" }, - { url = "https://files.pythonhosted.org/packages/0d/c8/8174d0e5c10ccebdcb1b53cc959591c4c722a3ad92461a273e86b9f5a302/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981", size = 145408, upload-time = "2025-05-02T08:32:43.709Z" }, - { url = "https://files.pythonhosted.org/packages/58/aa/8904b84bc8084ac19dc52feb4f5952c6df03ffb460a887b42615ee1382e8/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c", size = 153399, upload-time = "2025-05-02T08:32:46.197Z" }, - { url = "https://files.pythonhosted.org/packages/c2/26/89ee1f0e264d201cb65cf054aca6038c03b1a0c6b4ae998070392a3ce605/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b", size = 156815, upload-time = "2025-05-02T08:32:48.105Z" }, - { url = "https://files.pythonhosted.org/packages/fd/07/68e95b4b345bad3dbbd3a8681737b4338ff2c9df29856a6d6d23ac4c73cb/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d", size = 154537, upload-time = "2025-05-02T08:32:49.719Z" }, - { url = "https://files.pythonhosted.org/packages/77/1a/5eefc0ce04affb98af07bc05f3bac9094513c0e23b0562d64af46a06aae4/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f", size = 149565, upload-time = "2025-05-02T08:32:51.404Z" }, - { url = "https://files.pythonhosted.org/packages/37/a0/2410e5e6032a174c95e0806b1a6585eb21e12f445ebe239fac441995226a/charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c", size = 98357, upload-time = "2025-05-02T08:32:53.079Z" }, - { url = "https://files.pythonhosted.org/packages/6c/4f/c02d5c493967af3eda9c771ad4d2bbc8df6f99ddbeb37ceea6e8716a32bc/charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e", size = 105776, upload-time = "2025-05-02T08:32:54.573Z" }, - { url = "https://files.pythonhosted.org/packages/ea/12/a93df3366ed32db1d907d7593a94f1fe6293903e3e92967bebd6950ed12c/charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0", size = 199622, upload-time = "2025-05-02T08:32:56.363Z" }, - { url = "https://files.pythonhosted.org/packages/04/93/bf204e6f344c39d9937d3c13c8cd5bbfc266472e51fc8c07cb7f64fcd2de/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf", size = 143435, upload-time = "2025-05-02T08:32:58.551Z" }, - { url = "https://files.pythonhosted.org/packages/22/2a/ea8a2095b0bafa6c5b5a55ffdc2f924455233ee7b91c69b7edfcc9e02284/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e", size = 153653, upload-time = "2025-05-02T08:33:00.342Z" }, - { url = "https://files.pythonhosted.org/packages/b6/57/1b090ff183d13cef485dfbe272e2fe57622a76694061353c59da52c9a659/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1", size = 146231, upload-time = "2025-05-02T08:33:02.081Z" }, - { url = "https://files.pythonhosted.org/packages/e2/28/ffc026b26f441fc67bd21ab7f03b313ab3fe46714a14b516f931abe1a2d8/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c", size = 148243, upload-time = "2025-05-02T08:33:04.063Z" }, - { url = "https://files.pythonhosted.org/packages/c0/0f/9abe9bd191629c33e69e47c6ef45ef99773320e9ad8e9cb08b8ab4a8d4cb/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691", size = 150442, upload-time = "2025-05-02T08:33:06.418Z" }, - { url = "https://files.pythonhosted.org/packages/67/7c/a123bbcedca91d5916c056407f89a7f5e8fdfce12ba825d7d6b9954a1a3c/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0", size = 145147, upload-time = "2025-05-02T08:33:08.183Z" }, - { url = "https://files.pythonhosted.org/packages/ec/fe/1ac556fa4899d967b83e9893788e86b6af4d83e4726511eaaad035e36595/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b", size = 153057, upload-time = "2025-05-02T08:33:09.986Z" }, - { url = "https://files.pythonhosted.org/packages/2b/ff/acfc0b0a70b19e3e54febdd5301a98b72fa07635e56f24f60502e954c461/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff", size = 156454, upload-time = "2025-05-02T08:33:11.814Z" }, - { url = "https://files.pythonhosted.org/packages/92/08/95b458ce9c740d0645feb0e96cea1f5ec946ea9c580a94adfe0b617f3573/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b", size = 154174, upload-time = "2025-05-02T08:33:13.707Z" }, - { url = "https://files.pythonhosted.org/packages/78/be/8392efc43487ac051eee6c36d5fbd63032d78f7728cb37aebcc98191f1ff/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148", size = 149166, upload-time = "2025-05-02T08:33:15.458Z" }, - { url = "https://files.pythonhosted.org/packages/44/96/392abd49b094d30b91d9fbda6a69519e95802250b777841cf3bda8fe136c/charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7", size = 98064, upload-time = "2025-05-02T08:33:17.06Z" }, - { url = "https://files.pythonhosted.org/packages/e9/b0/0200da600134e001d91851ddc797809e2fe0ea72de90e09bec5a2fbdaccb/charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980", size = 105641, upload-time = "2025-05-02T08:33:18.753Z" }, - { url = "https://files.pythonhosted.org/packages/20/94/c5790835a017658cbfabd07f3bfb549140c3ac458cfc196323996b10095a/charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0", size = 52626, upload-time = "2025-05-02T08:34:40.053Z" }, + { url = "https://files.pythonhosted.org/packages/f3/85/1637cd4af66fa687396e757dec650f28025f2a2f5a5531a3208dc0ec43f2/charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394", size = 208425, upload-time = "2025-10-14T04:40:53.353Z" }, + { url = "https://files.pythonhosted.org/packages/9d/6a/04130023fef2a0d9c62d0bae2649b69f7b7d8d24ea5536feef50551029df/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25", size = 148162, upload-time = "2025-10-14T04:40:54.558Z" }, + { url = "https://files.pythonhosted.org/packages/78/29/62328d79aa60da22c9e0b9a66539feae06ca0f5a4171ac4f7dc285b83688/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef", size = 144558, upload-time = "2025-10-14T04:40:55.677Z" }, + { url = "https://files.pythonhosted.org/packages/86/bb/b32194a4bf15b88403537c2e120b817c61cd4ecffa9b6876e941c3ee38fe/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d", size = 161497, upload-time = "2025-10-14T04:40:57.217Z" }, + { url = "https://files.pythonhosted.org/packages/19/89/a54c82b253d5b9b111dc74aca196ba5ccfcca8242d0fb64146d4d3183ff1/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8", size = 159240, upload-time = "2025-10-14T04:40:58.358Z" }, + { url = "https://files.pythonhosted.org/packages/c0/10/d20b513afe03acc89ec33948320a5544d31f21b05368436d580dec4e234d/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86", size = 153471, upload-time = "2025-10-14T04:40:59.468Z" }, + { url = "https://files.pythonhosted.org/packages/61/fa/fbf177b55bdd727010f9c0a3c49eefa1d10f960e5f09d1d887bf93c2e698/charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a", size = 150864, upload-time = "2025-10-14T04:41:00.623Z" }, + { url = "https://files.pythonhosted.org/packages/05/12/9fbc6a4d39c0198adeebbde20b619790e9236557ca59fc40e0e3cebe6f40/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f", size = 150647, upload-time = "2025-10-14T04:41:01.754Z" }, + { url = "https://files.pythonhosted.org/packages/ad/1f/6a9a593d52e3e8c5d2b167daf8c6b968808efb57ef4c210acb907c365bc4/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc", size = 145110, upload-time = "2025-10-14T04:41:03.231Z" }, + { url = "https://files.pythonhosted.org/packages/30/42/9a52c609e72471b0fc54386dc63c3781a387bb4fe61c20231a4ebcd58bdd/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf", size = 162839, upload-time = "2025-10-14T04:41:04.715Z" }, + { url = "https://files.pythonhosted.org/packages/c4/5b/c0682bbf9f11597073052628ddd38344a3d673fda35a36773f7d19344b23/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15", size = 150667, upload-time = "2025-10-14T04:41:05.827Z" }, + { url = "https://files.pythonhosted.org/packages/e4/24/a41afeab6f990cf2daf6cb8c67419b63b48cf518e4f56022230840c9bfb2/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9", size = 160535, upload-time = "2025-10-14T04:41:06.938Z" }, + { url = "https://files.pythonhosted.org/packages/2a/e5/6a4ce77ed243c4a50a1fecca6aaaab419628c818a49434be428fe24c9957/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0", size = 154816, upload-time = "2025-10-14T04:41:08.101Z" }, + { url = "https://files.pythonhosted.org/packages/a8/ef/89297262b8092b312d29cdb2517cb1237e51db8ecef2e9af5edbe7b683b1/charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26", size = 99694, upload-time = "2025-10-14T04:41:09.23Z" }, + { url = "https://files.pythonhosted.org/packages/3d/2d/1e5ed9dd3b3803994c155cd9aacb60c82c331bad84daf75bcb9c91b3295e/charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525", size = 107131, upload-time = "2025-10-14T04:41:10.467Z" }, + { url = "https://files.pythonhosted.org/packages/d0/d9/0ed4c7098a861482a7b6a95603edce4c0d9db2311af23da1fb2b75ec26fc/charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3", size = 100390, upload-time = "2025-10-14T04:41:11.915Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, +] + +[[package]] +name = "chromadb" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "bcrypt" }, + { name = "build" }, + { name = "grpcio" }, + { name = "httpx" }, + { name = "importlib-resources" }, + { name = "jsonschema" }, + { name = "kubernetes" }, + { name = "mmh3" }, + { name = "numpy" }, + { name = "onnxruntime" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-grpc" }, + { name = "opentelemetry-sdk" }, + { name = "orjson" }, + { name = "overrides" }, + { name = "posthog" }, + { name = "pybase64" }, + { name = "pydantic" }, + { name = "pypika" }, + { name = "pyyaml" }, + { name = "rich" }, + { name = "tenacity" }, + { name = "tokenizers" }, + { name = "tqdm" }, + { name = "typer" }, + { name = "typing-extensions" }, + { name = "uvicorn", extra = ["standard"] }, +] +sdist = { url = "https://files.pythonhosted.org/packages/43/54/2bc73eac5d8fd7ffc41f8e6e4dd13ad0fd916f8973f85b1411011ba1e05b/chromadb-1.4.0.tar.gz", hash = "sha256:5b4e6d1ede4faaaf12ec772c3c603ea19f39b255ef0795855b40dd79f00a4183", size = 2001752, upload-time = "2025-12-24T02:58:18.326Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a6/d5/7ce34021304bdf1a5eefaaf434d2be078828dd71aa3871d89eeeecedfb19/chromadb-1.4.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:ab4ad96c21d0038f6d8d84b9cac2010ce1f448926e9a2ee35251552f2e85da07", size = 20882057, upload-time = "2025-12-24T02:58:15.916Z" }, + { url = "https://files.pythonhosted.org/packages/76/6d/9fbf794f3672bfaf227b0e8642b1af6e1ef7d5f5b20f7505ac684ff0b155/chromadb-1.4.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:4d3c8abd762f092f73482e3eb1dae560a8a1c2674575d11eaac0dddf35e9cc6d", size = 20148106, upload-time = "2025-12-24T02:58:12.915Z" }, + { url = "https://files.pythonhosted.org/packages/1f/cc/d33e24258027c6a14a49a5abf94c75dd6f82e5ab5ed44fe622c0de303420/chromadb-1.4.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29fe47563c460a6cadbdc481b503c520ab4e424730c97d6a85d488a13009b6ce", size = 20759866, upload-time = "2025-12-24T02:58:06.987Z" }, + { url = "https://files.pythonhosted.org/packages/96/da/048ea86c7cb04a873aaab912be62d90b403a8b15a98ae7781ea777371373/chromadb-1.4.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1942e1ee074c7d1e421ea04391a1fccfd18a4b3b94a8e61e853d88dc6924abfa", size = 21666411, upload-time = "2025-12-24T02:58:10.044Z" }, + { url = "https://files.pythonhosted.org/packages/a0/49/933091cf12ee4ce4527a8e99b778f768f63df67e7d3ed9c20eecc0385169/chromadb-1.4.0-cp39-abi3-win_amd64.whl", hash = "sha256:2ec0485e715357a41078c20ebed65d5d5b941bf2fff418c6f1c64176dc36f837", size = 21930010, upload-time = "2025-12-24T02:58:20.138Z" }, ] [[package]] name = "click" -version = "8.2.1" +version = "8.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342, upload-time = "2025-05-20T23:19:49.832Z" } +sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215, upload-time = "2025-05-20T23:19:47.796Z" }, + { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" }, ] [[package]] @@ -258,14 +540,14 @@ wheels = [ [[package]] name = "click-plugins" -version = "1.1.1" +version = "1.1.1.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5f/1d/45434f64ed749540af821fd7e42b8e4d23ac04b1eda7c26613288d6cd8a8/click-plugins-1.1.1.tar.gz", hash = "sha256:46ab999744a9d831159c3411bb0c79346d94a444df9a3a3742e9ed63645f264b", size = 8164, upload-time = "2019-04-04T04:27:04.82Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c3/a4/34847b59150da33690a36da3681d6bbc2ec14ee9a846bc30a6746e5984e4/click_plugins-1.1.1.2.tar.gz", hash = "sha256:d7af3984a99d243c131aa1a828331e7630f4a88a9741fd05c927b204bcf92261", size = 8343, upload-time = "2025-06-25T00:47:37.555Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e9/da/824b92d9942f4e472702488857914bdd50f73021efea15b4cad9aca8ecef/click_plugins-1.1.1-py2.py3-none-any.whl", hash = "sha256:5d262006d3222f5057fd81e1623d4443e41dcda5dc815c06b442aa3c02889fc8", size = 7497, upload-time = "2019-04-04T04:27:03.36Z" }, + { url = "https://files.pythonhosted.org/packages/3d/9a/2abecb28ae875e39c8cad711eb1186d8d14eab564705325e77e4e6ab9ae5/click_plugins-1.1.1.2-py2.py3-none-any.whl", hash = "sha256:008d65743833ffc1f5417bf0e78e8d2c23aab04d9745ba817bd3e71b0feb6aa6", size = 11051, upload-time = "2025-06-25T00:47:36.731Z" }, ] [[package]] @@ -291,104 +573,402 @@ wheels = [ ] [[package]] -name = "cuda-bindings" -version = "12.9.0" +name = "coloredlogs" +version = "15.0.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "humanfriendly" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cc/c7/eed8f27100517e8c0e6b923d5f0845d0cb99763da6fdee00478f91db7325/coloredlogs-15.0.1.tar.gz", hash = "sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0", size = 278520, upload-time = "2021-06-11T10:22:45.202Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/06/3d6badcf13db419e25b07041d9c7b4a2c331d3f4e7134445ec5df57714cd/coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934", size = 46018, upload-time = "2021-06-11T10:22:42.561Z" }, +] + +[[package]] +name = "contourpy" +version = "1.3.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/58/01/1253e6698a07380cd31a736d248a3f2a50a7c88779a1813da27503cadc2a/contourpy-1.3.3.tar.gz", hash = "sha256:083e12155b210502d0bca491432bb04d56dc3432f95a979b429f2848c3dbe880", size = 13466174, upload-time = "2025-07-26T12:03:12.549Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/45/adfee365d9ea3d853550b2e735f9d66366701c65db7855cd07621732ccfc/contourpy-1.3.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b08a32ea2f8e42cf1d4be3169a98dd4be32bafe4f22b6c4cb4ba810fa9e5d2cb", size = 293419, upload-time = "2025-07-26T12:01:21.16Z" }, + { url = "https://files.pythonhosted.org/packages/53/3e/405b59cfa13021a56bba395a6b3aca8cec012b45bf177b0eaf7a202cde2c/contourpy-1.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:556dba8fb6f5d8742f2923fe9457dbdd51e1049c4a43fd3986a0b14a1d815fc6", size = 273979, upload-time = "2025-07-26T12:01:22.448Z" }, + { url = "https://files.pythonhosted.org/packages/d4/1c/a12359b9b2ca3a845e8f7f9ac08bdf776114eb931392fcad91743e2ea17b/contourpy-1.3.3-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:92d9abc807cf7d0e047b95ca5d957cf4792fcd04e920ca70d48add15c1a90ea7", size = 332653, upload-time = "2025-07-26T12:01:24.155Z" }, + { url = "https://files.pythonhosted.org/packages/63/12/897aeebfb475b7748ea67b61e045accdfcf0d971f8a588b67108ed7f5512/contourpy-1.3.3-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b2e8faa0ed68cb29af51edd8e24798bb661eac3bd9f65420c1887b6ca89987c8", size = 379536, upload-time = "2025-07-26T12:01:25.91Z" }, + { url = "https://files.pythonhosted.org/packages/43/8a/a8c584b82deb248930ce069e71576fc09bd7174bbd35183b7943fb1064fd/contourpy-1.3.3-cp312-cp312-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:626d60935cf668e70a5ce6ff184fd713e9683fb458898e4249b63be9e28286ea", size = 384397, upload-time = "2025-07-26T12:01:27.152Z" }, + { url = "https://files.pythonhosted.org/packages/cc/8f/ec6289987824b29529d0dfda0d74a07cec60e54b9c92f3c9da4c0ac732de/contourpy-1.3.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4d00e655fcef08aba35ec9610536bfe90267d7ab5ba944f7032549c55a146da1", size = 362601, upload-time = "2025-07-26T12:01:28.808Z" }, + { url = "https://files.pythonhosted.org/packages/05/0a/a3fe3be3ee2dceb3e615ebb4df97ae6f3828aa915d3e10549ce016302bd1/contourpy-1.3.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:451e71b5a7d597379ef572de31eeb909a87246974d960049a9848c3bc6c41bf7", size = 1331288, upload-time = "2025-07-26T12:01:31.198Z" }, + { url = "https://files.pythonhosted.org/packages/33/1d/acad9bd4e97f13f3e2b18a3977fe1b4a37ecf3d38d815333980c6c72e963/contourpy-1.3.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:459c1f020cd59fcfe6650180678a9993932d80d44ccde1fa1868977438f0b411", size = 1403386, upload-time = "2025-07-26T12:01:33.947Z" }, + { url = "https://files.pythonhosted.org/packages/cf/8f/5847f44a7fddf859704217a99a23a4f6417b10e5ab1256a179264561540e/contourpy-1.3.3-cp312-cp312-win32.whl", hash = "sha256:023b44101dfe49d7d53932be418477dba359649246075c996866106da069af69", size = 185018, upload-time = "2025-07-26T12:01:35.64Z" }, + { url = "https://files.pythonhosted.org/packages/19/e8/6026ed58a64563186a9ee3f29f41261fd1828f527dd93d33b60feca63352/contourpy-1.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:8153b8bfc11e1e4d75bcb0bff1db232f9e10b274e0929de9d608027e0d34ff8b", size = 226567, upload-time = "2025-07-26T12:01:36.804Z" }, + { url = "https://files.pythonhosted.org/packages/d1/e2/f05240d2c39a1ed228d8328a78b6f44cd695f7ef47beb3e684cf93604f86/contourpy-1.3.3-cp312-cp312-win_arm64.whl", hash = "sha256:07ce5ed73ecdc4a03ffe3e1b3e3c1166db35ae7584be76f65dbbe28a7791b0cc", size = 193655, upload-time = "2025-07-26T12:01:37.999Z" }, +] + +[[package]] +name = "cryptography" +version = "46.0.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/33/c00162f49c0e2fe8064a62cb92b93e50c74a72bc370ab92f86112b33ff62/cryptography-46.0.3.tar.gz", hash = "sha256:a8b17438104fed022ce745b362294d9ce35b4c2e45c1d958ad4a4b019285f4a1", size = 749258, upload-time = "2025-10-15T23:18:31.74Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/42/9c391dd801d6cf0d561b5890549d4b27bafcc53b39c31a817e69d87c625b/cryptography-46.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:109d4ddfadf17e8e7779c39f9b18111a09efb969a301a31e987416a0191ed93a", size = 7225004, upload-time = "2025-10-15T23:16:52.239Z" }, + { url = "https://files.pythonhosted.org/packages/1c/67/38769ca6b65f07461eb200e85fc1639b438bdc667be02cf7f2cd6a64601c/cryptography-46.0.3-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:09859af8466b69bc3c27bdf4f5d84a665e0f7ab5088412e9e2ec49758eca5cbc", size = 4296667, upload-time = "2025-10-15T23:16:54.369Z" }, + { url = "https://files.pythonhosted.org/packages/5c/49/498c86566a1d80e978b42f0d702795f69887005548c041636df6ae1ca64c/cryptography-46.0.3-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:01ca9ff2885f3acc98c29f1860552e37f6d7c7d013d7334ff2a9de43a449315d", size = 4450807, upload-time = "2025-10-15T23:16:56.414Z" }, + { url = "https://files.pythonhosted.org/packages/4b/0a/863a3604112174c8624a2ac3c038662d9e59970c7f926acdcfaed8d61142/cryptography-46.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6eae65d4c3d33da080cff9c4ab1f711b15c1d9760809dad6ea763f3812d254cb", size = 4299615, upload-time = "2025-10-15T23:16:58.442Z" }, + { url = "https://files.pythonhosted.org/packages/64/02/b73a533f6b64a69f3cd3872acb6ebc12aef924d8d103133bb3ea750dc703/cryptography-46.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5bf0ed4490068a2e72ac03d786693adeb909981cc596425d09032d372bcc849", size = 4016800, upload-time = "2025-10-15T23:17:00.378Z" }, + { url = "https://files.pythonhosted.org/packages/25/d5/16e41afbfa450cde85a3b7ec599bebefaef16b5c6ba4ec49a3532336ed72/cryptography-46.0.3-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5ecfccd2329e37e9b7112a888e76d9feca2347f12f37918facbb893d7bb88ee8", size = 4984707, upload-time = "2025-10-15T23:17:01.98Z" }, + { url = "https://files.pythonhosted.org/packages/c9/56/e7e69b427c3878352c2fb9b450bd0e19ed552753491d39d7d0a2f5226d41/cryptography-46.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a2c0cd47381a3229c403062f764160d57d4d175e022c1df84e168c6251a22eec", size = 4482541, upload-time = "2025-10-15T23:17:04.078Z" }, + { url = "https://files.pythonhosted.org/packages/78/f6/50736d40d97e8483172f1bb6e698895b92a223dba513b0ca6f06b2365339/cryptography-46.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:549e234ff32571b1f4076ac269fcce7a808d3bf98b76c8dd560e42dbc66d7d91", size = 4299464, upload-time = "2025-10-15T23:17:05.483Z" }, + { url = "https://files.pythonhosted.org/packages/00/de/d8e26b1a855f19d9994a19c702fa2e93b0456beccbcfe437eda00e0701f2/cryptography-46.0.3-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:c0a7bb1a68a5d3471880e264621346c48665b3bf1c3759d682fc0864c540bd9e", size = 4950838, upload-time = "2025-10-15T23:17:07.425Z" }, + { url = "https://files.pythonhosted.org/packages/8f/29/798fc4ec461a1c9e9f735f2fc58741b0daae30688f41b2497dcbc9ed1355/cryptography-46.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:10b01676fc208c3e6feeb25a8b83d81767e8059e1fe86e1dc62d10a3018fa926", size = 4481596, upload-time = "2025-10-15T23:17:09.343Z" }, + { url = "https://files.pythonhosted.org/packages/15/8d/03cd48b20a573adfff7652b76271078e3045b9f49387920e7f1f631d125e/cryptography-46.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0abf1ffd6e57c67e92af68330d05760b7b7efb243aab8377e583284dbab72c71", size = 4426782, upload-time = "2025-10-15T23:17:11.22Z" }, + { url = "https://files.pythonhosted.org/packages/fa/b1/ebacbfe53317d55cf33165bda24c86523497a6881f339f9aae5c2e13e57b/cryptography-46.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a04bee9ab6a4da801eb9b51f1b708a1b5b5c9eb48c03f74198464c66f0d344ac", size = 4698381, upload-time = "2025-10-15T23:17:12.829Z" }, + { url = "https://files.pythonhosted.org/packages/96/92/8a6a9525893325fc057a01f654d7efc2c64b9de90413adcf605a85744ff4/cryptography-46.0.3-cp311-abi3-win32.whl", hash = "sha256:f260d0d41e9b4da1ed1e0f1ce571f97fe370b152ab18778e9e8f67d6af432018", size = 3055988, upload-time = "2025-10-15T23:17:14.65Z" }, + { url = "https://files.pythonhosted.org/packages/7e/bf/80fbf45253ea585a1e492a6a17efcb93467701fa79e71550a430c5e60df0/cryptography-46.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:a9a3008438615669153eb86b26b61e09993921ebdd75385ddd748702c5adfddb", size = 3514451, upload-time = "2025-10-15T23:17:16.142Z" }, + { url = "https://files.pythonhosted.org/packages/2e/af/9b302da4c87b0beb9db4e756386a7c6c5b8003cd0e742277888d352ae91d/cryptography-46.0.3-cp311-abi3-win_arm64.whl", hash = "sha256:5d7f93296ee28f68447397bf5198428c9aeeab45705a55d53a6343455dcb2c3c", size = 2928007, upload-time = "2025-10-15T23:17:18.04Z" }, + { url = "https://files.pythonhosted.org/packages/fd/23/45fe7f376a7df8daf6da3556603b36f53475a99ce4faacb6ba2cf3d82021/cryptography-46.0.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:cb3d760a6117f621261d662bccc8ef5bc32ca673e037c83fbe565324f5c46936", size = 7218248, upload-time = "2025-10-15T23:17:46.294Z" }, + { url = "https://files.pythonhosted.org/packages/27/32/b68d27471372737054cbd34c84981f9edbc24fe67ca225d389799614e27f/cryptography-46.0.3-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4b7387121ac7d15e550f5cb4a43aef2559ed759c35df7336c402bb8275ac9683", size = 4294089, upload-time = "2025-10-15T23:17:48.269Z" }, + { url = "https://files.pythonhosted.org/packages/26/42/fa8389d4478368743e24e61eea78846a0006caffaf72ea24a15159215a14/cryptography-46.0.3-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:15ab9b093e8f09daab0f2159bb7e47532596075139dd74365da52ecc9cb46c5d", size = 4440029, upload-time = "2025-10-15T23:17:49.837Z" }, + { url = "https://files.pythonhosted.org/packages/5f/eb/f483db0ec5ac040824f269e93dd2bd8a21ecd1027e77ad7bdf6914f2fd80/cryptography-46.0.3-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:46acf53b40ea38f9c6c229599a4a13f0d46a6c3fa9ef19fc1a124d62e338dfa0", size = 4297222, upload-time = "2025-10-15T23:17:51.357Z" }, + { url = "https://files.pythonhosted.org/packages/fd/cf/da9502c4e1912cb1da3807ea3618a6829bee8207456fbbeebc361ec38ba3/cryptography-46.0.3-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:10ca84c4668d066a9878890047f03546f3ae0a6b8b39b697457b7757aaf18dbc", size = 4012280, upload-time = "2025-10-15T23:17:52.964Z" }, + { url = "https://files.pythonhosted.org/packages/6b/8f/9adb86b93330e0df8b3dcf03eae67c33ba89958fc2e03862ef1ac2b42465/cryptography-46.0.3-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:36e627112085bb3b81b19fed209c05ce2a52ee8b15d161b7c643a7d5a88491f3", size = 4978958, upload-time = "2025-10-15T23:17:54.965Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a0/5fa77988289c34bdb9f913f5606ecc9ada1adb5ae870bd0d1054a7021cc4/cryptography-46.0.3-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1000713389b75c449a6e979ffc7dcc8ac90b437048766cef052d4d30b8220971", size = 4473714, upload-time = "2025-10-15T23:17:56.754Z" }, + { url = "https://files.pythonhosted.org/packages/14/e5/fc82d72a58d41c393697aa18c9abe5ae1214ff6f2a5c18ac470f92777895/cryptography-46.0.3-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:b02cf04496f6576afffef5ddd04a0cb7d49cf6be16a9059d793a30b035f6b6ac", size = 4296970, upload-time = "2025-10-15T23:17:58.588Z" }, + { url = "https://files.pythonhosted.org/packages/78/06/5663ed35438d0b09056973994f1aec467492b33bd31da36e468b01ec1097/cryptography-46.0.3-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:71e842ec9bc7abf543b47cf86b9a743baa95f4677d22baa4c7d5c69e49e9bc04", size = 4940236, upload-time = "2025-10-15T23:18:00.897Z" }, + { url = "https://files.pythonhosted.org/packages/fc/59/873633f3f2dcd8a053b8dd1d38f783043b5fce589c0f6988bf55ef57e43e/cryptography-46.0.3-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:402b58fc32614f00980b66d6e56a5b4118e6cb362ae8f3fda141ba4689bd4506", size = 4472642, upload-time = "2025-10-15T23:18:02.749Z" }, + { url = "https://files.pythonhosted.org/packages/3d/39/8e71f3930e40f6877737d6f69248cf74d4e34b886a3967d32f919cc50d3b/cryptography-46.0.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ef639cb3372f69ec44915fafcd6698b6cc78fbe0c2ea41be867f6ed612811963", size = 4423126, upload-time = "2025-10-15T23:18:04.85Z" }, + { url = "https://files.pythonhosted.org/packages/cd/c7/f65027c2810e14c3e7268353b1681932b87e5a48e65505d8cc17c99e36ae/cryptography-46.0.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3b51b8ca4f1c6453d8829e1eb7299499ca7f313900dd4d89a24b8b87c0a780d4", size = 4686573, upload-time = "2025-10-15T23:18:06.908Z" }, + { url = "https://files.pythonhosted.org/packages/0a/6e/1c8331ddf91ca4730ab3086a0f1be19c65510a33b5a441cb334e7a2d2560/cryptography-46.0.3-cp38-abi3-win32.whl", hash = "sha256:6276eb85ef938dc035d59b87c8a7dc559a232f954962520137529d77b18ff1df", size = 3036695, upload-time = "2025-10-15T23:18:08.672Z" }, + { url = "https://files.pythonhosted.org/packages/90/45/b0d691df20633eff80955a0fc7695ff9051ffce8b69741444bd9ed7bd0db/cryptography-46.0.3-cp38-abi3-win_amd64.whl", hash = "sha256:416260257577718c05135c55958b674000baef9a1c7d9e8f306ec60d71db850f", size = 3501720, upload-time = "2025-10-15T23:18:10.632Z" }, + { url = "https://files.pythonhosted.org/packages/e8/cb/2da4cc83f5edb9c3257d09e1e7ab7b23f049c7962cae8d842bbef0a9cec9/cryptography-46.0.3-cp38-abi3-win_arm64.whl", hash = "sha256:d89c3468de4cdc4f08a57e214384d0471911a3830fcdaf7a8cc587e42a866372", size = 2918740, upload-time = "2025-10-15T23:18:12.277Z" }, +] + +[[package]] +name = "cuda-bindings" +version = "13.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cuda-pathfinder" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/5f/bf/23a583b8453f580bb1c7749c7abf57017176e0053197384ce81e73977ab3/cuda_bindings-12.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34318c3a1b442854f072f5bb410aea6834172fd1ee7a5ecf49f1d125ea7498a0", size = 11820737, upload-time = "2025-05-06T19:10:38.601Z" }, - { url = "https://files.pythonhosted.org/packages/e3/03/40fc1488727a8d72ecc35f58f9df4939277892a837614339c3366d520426/cuda_bindings-12.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff0e28d1e34758654b9c961e1f55e4786e49aee6a4dbceaf3cc24c46c672df7e", size = 12154006, upload-time = "2025-05-06T19:10:41.642Z" }, - { url = "https://files.pythonhosted.org/packages/2c/6a/2808871d0b519364db2b460dc1b17d4fff3e340d5875144a303254f996e5/cuda_bindings-12.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:57bdaa778116ee50fdcdd31810e0f345c23549ffb045452dc88d5c63601d35d4", size = 12223544, upload-time = "2025-05-06T19:10:43.928Z" }, - { url = "https://files.pythonhosted.org/packages/a0/29/7b9e64e3078e31516dad683d6a23f5e5a0d5c2b642c58fb23786ec4bfac6/cuda_bindings-12.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b9d039981412575c1713915a889934ec750b8c2ed3dbfaa739292e0478a3f6f", size = 11810588, upload-time = "2025-05-06T19:10:46.653Z" }, - { url = "https://files.pythonhosted.org/packages/01/fd/1c30778265488c6797c6c17a69c09ba5636df6dc6b0ebfc96d950be2f9e7/cuda_bindings-12.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6d7314b2e5db025bb88ddba4df6db2127cc39610ccf4f74c0e1ead05241da29", size = 12149149, upload-time = "2025-05-06T19:10:49.334Z" }, - { url = "https://files.pythonhosted.org/packages/d0/86/fdf309b334db8c6555f303c0f6a1538db53135103d13a78d8445b4981f15/cuda_bindings-12.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:44eae9c854a55c7f464f08fa895a1fe0846e36097697d8c255051789d59bf55b", size = 12188603, upload-time = "2025-05-06T19:10:52.066Z" }, + { url = "https://files.pythonhosted.org/packages/53/3d/c8ed9d169843091f3f0d6b8218e826fd59520a37e0434c204feada597988/cuda_bindings-13.1.1-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1e75ad0cb863330df784236d289612d71ca855c013d19ae00e5693574abd6915", size = 15530160, upload-time = "2025-12-09T22:05:55.386Z" }, + { url = "https://files.pythonhosted.org/packages/4a/8e/368295623ee43fba622909d780fbb6863efc1638dff55f67a0f04eac6470/cuda_bindings-13.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:25785d1a3cdcd98f151240fd5efd025609319a6720a217dee2a929241749d488", size = 16110386, upload-time = "2025-12-09T22:05:57.71Z" }, + { url = "https://files.pythonhosted.org/packages/60/1f/ecc4701ade3e85f091c625a920574527b9daf7fb354189fbfbc5516af6cd/cuda_bindings-13.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:ccde9c95c0e953b31fe7731bb08da9d0a34b1770498df9a3c156fdfdbe3951ad", size = 15250028, upload-time = "2025-12-09T22:06:00.346Z" }, +] + +[[package]] +name = "cuda-pathfinder" +version = "1.3.3" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0b/02/4dbe7568a42e46582248942f54dc64ad094769532adbe21e525e4edf7bc4/cuda_pathfinder-1.3.3-py3-none-any.whl", hash = "sha256:9984b664e404f7c134954a771be8775dfd6180ea1e1aef4a5a37d4be05d9bbb1", size = 27154, upload-time = "2025-12-04T22:35:08.996Z" }, ] [[package]] name = "cuda-python" -version = "12.9.0" +version = "13.1.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cuda-bindings" }, + { name = "cuda-pathfinder" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/24/3c/4475aebeaab9651f2e61000fbe76f91a476d371dbfbf0a1cf46e689af253/cuda_python-12.9.0-py3-none-any.whl", hash = "sha256:926acba49b2c0a0374c61b7c98f337c085199cf51cdfe4d6423c4129c20547a7", size = 7532, upload-time = "2025-05-06T19:14:07.771Z" }, + { url = "https://files.pythonhosted.org/packages/cd/08/b5e3b9822662d72d540d830531e3ab6a7cabbda3dd56175696aabccfeb76/cuda_python-13.1.1-py3-none-any.whl", hash = "sha256:944cc4fe6482673d28dd545797a28840945a1668739328fa2ad1e9be4f7050d9", size = 8038, upload-time = "2025-12-09T22:13:10.719Z" }, +] + +[[package]] +name = "cycler" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a9/95/a3dbbb5028f35eafb79008e7522a75244477d2838f38cbb722248dabc2a8/cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c", size = 7615, upload-time = "2023-10-07T05:32:18.335Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/05/c19819d5e3d95294a6f5947fb9b9629efb316b96de511b418c53d245aae6/cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30", size = 8321, upload-time = "2023-10-07T05:32:16.783Z" }, +] + +[[package]] +name = "dashscope" +version = "1.25.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "certifi" }, + { name = "cryptography" }, + { name = "requests" }, + { name = "websocket-client" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/c5/91/60f5353c8752d8ce489f4baeb252999d4cfb1a784c0beda34b5287135d65/dashscope-1.25.5-py3-none-any.whl", hash = "sha256:1be9eebaf1e7327317a22db9233770f4252463b926c84071ffd8805ae06cf998", size = 1323186, upload-time = "2025-12-18T02:15:26.462Z" }, +] + +[[package]] +name = "dataclasses-json" +version = "0.6.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "marshmallow" }, + { name = "typing-inspect" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/64/a4/f71d9cf3a5ac257c993b5ca3f93df5f7fb395c725e7f1e6479d2514173c3/dataclasses_json-0.6.7.tar.gz", hash = "sha256:b6b3e528266ea45b9535223bc53ca645f5208833c29229e847b3f26a1cc55fc0", size = 32227, upload-time = "2024-06-09T16:20:19.103Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c3/be/d0d44e092656fe7a06b55e6103cbce807cdbdee17884a5367c68c9860853/dataclasses_json-0.6.7-py3-none-any.whl", hash = "sha256:0dbf33f26c8d5305befd61b39d2b3414e8a407bedc2834dea9b8d642666fb40a", size = 28686, upload-time = "2024-06-09T16:20:16.715Z" }, ] [[package]] name = "decorator" -version = "5.2.1" +version = "4.4.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/43/fa/6d96a0978d19e17b68d634497769987b16c8f4cd0a7a05048bec693caa6b/decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360", size = 56711, upload-time = "2025-02-24T04:41:34.073Z" } +sdist = { url = "https://files.pythonhosted.org/packages/da/93/84fa12f2dc341f8cf5f022ee09e109961055749df2d0c75c5f98746cfe6c/decorator-4.4.2.tar.gz", hash = "sha256:e3a62f0520172440ca0dcc823749319382e377f37f140a0b99ef45fecb84bfe7", size = 33629, upload-time = "2020-02-29T05:24:43.312Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" }, + { url = "https://files.pythonhosted.org/packages/ed/1b/72a1821152d07cf1d8b6fce298aeb06a7eb90f4d6d41acec9861e7cc6df0/decorator-4.4.2-py2.py3-none-any.whl", hash = "sha256:41fa54c2a0cc4ba648be4fd43cff00aedf5b9465c9bf18d64325bc225f08f760", size = 9239, upload-time = "2020-02-29T05:24:45.993Z" }, +] + +[[package]] +name = "distro" +version = "1.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722, upload-time = "2023-12-24T09:54:32.31Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277, upload-time = "2023-12-24T09:54:30.421Z" }, +] + +[[package]] +name = "django" +version = "6.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asgiref" }, + { name = "sqlparse" }, + { name = "tzdata", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/15/75/19762bfc4ea556c303d9af8e36f0cd910ab17dff6c8774644314427a2120/django-6.0.tar.gz", hash = "sha256:7b0c1f50c0759bbe6331c6a39c89ae022a84672674aeda908784617ef47d8e26", size = 10932418, upload-time = "2025-12-03T16:26:21.878Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d7/ae/f19e24789a5ad852670d6885f5480f5e5895576945fcc01817dfd9bc002a/django-6.0-py3-none-any.whl", hash = "sha256:1cc2c7344303bbfb7ba5070487c17f7fc0b7174bbb0a38cebf03c675f5f19b6d", size = 8339181, upload-time = "2025-12-03T16:26:16.231Z" }, +] + +[[package]] +name = "dnspython" +version = "2.8.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8c/8b/57666417c0f90f08bcafa776861060426765fdb422eb10212086fb811d26/dnspython-2.8.0.tar.gz", hash = "sha256:181d3c6996452cb1189c4046c61599b84a5a86e099562ffde77d26984ff26d0f", size = 368251, upload-time = "2025-09-07T18:58:00.022Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ba/5a/18ad964b0086c6e62e2e7500f7edc89e3faa45033c71c1893d34eed2b2de/dnspython-2.8.0-py3-none-any.whl", hash = "sha256:01d9bbc4a2d76bf0db7c1f729812ded6d912bd318d3b1cf81d30c0f845dbf3af", size = 331094, upload-time = "2025-09-07T18:57:58.071Z" }, +] + +[[package]] +name = "dominate" +version = "2.9.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/f3/1c8088ff19a0fcd9c3234802a0ee47006ea64bd8852f1019194f0e3583ff/dominate-2.9.1.tar.gz", hash = "sha256:558284687d9b8aae1904e3d6051ad132dd4a8c0cf551b37ea4e7e42a31d19dc4", size = 37715, upload-time = "2023-12-24T20:45:19.192Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/58/19/0380af745f151a1648657bbcef0fb49ac28bf09083d94498163ffd9b32dc/dominate-2.9.1-py2.py3-none-any.whl", hash = "sha256:cb7b6b79d33b15ae0a6e87856b984879927c7c2ebb29522df4c75b28ffd9b989", size = 29976, upload-time = "2023-12-24T20:45:17.154Z" }, +] + +[[package]] +name = "dotenv" +version = "0.9.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "python-dotenv" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/b2/b7/545d2c10c1fc15e48653c91efde329a790f2eecfbbf2bd16003b5db2bab0/dotenv-0.9.9-py2.py3-none-any.whl", hash = "sha256:29cf74a087b31dafdb5a446b6d7e11cbce8ed2741540e2339c69fbef92c94ce9", size = 1892, upload-time = "2025-02-19T22:15:01.647Z" }, +] + +[[package]] +name = "durationpy" +version = "0.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9d/a4/e44218c2b394e31a6dd0d6b095c4e1f32d0be54c2a4b250032d717647bab/durationpy-0.10.tar.gz", hash = "sha256:1fa6893409a6e739c9c72334fc65cca1f355dbdd93405d30f726deb5bde42fba", size = 3335, upload-time = "2025-05-17T13:52:37.26Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b0/0d/9feae160378a3553fa9a339b0e9c1a048e147a4127210e286ef18b730f03/durationpy-0.10-py3-none-any.whl", hash = "sha256:3b41e1b601234296b4fb368338fdcd3e13e0b4fb5b67345948f4f2bf9868b286", size = 3922, upload-time = "2025-05-17T13:52:36.463Z" }, +] + +[[package]] +name = "email-validator" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dnspython" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f5/22/900cb125c76b7aaa450ce02fd727f452243f2e91a61af068b40adba60ea9/email_validator-2.3.0.tar.gz", hash = "sha256:9fc05c37f2f6cf439ff414f8fc46d917929974a82244c20eb10231ba60c54426", size = 51238, upload-time = "2025-08-26T13:09:06.831Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/de/15/545e2b6cf2e3be84bc1ed85613edd75b8aea69807a71c26f4ca6a9258e82/email_validator-2.3.0-py3-none-any.whl", hash = "sha256:80f13f623413e6b197ae73bb10bf4eb0908faf509ad8362c5edeb0be7fd450b4", size = 35604, upload-time = "2025-08-26T13:09:05.858Z" }, +] + +[[package]] +name = "fastapi" +version = "0.128.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-doc" }, + { name = "pydantic" }, + { name = "starlette" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/52/08/8c8508db6c7b9aae8f7175046af41baad690771c9bcde676419965e338c7/fastapi-0.128.0.tar.gz", hash = "sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a", size = 365682, upload-time = "2025-12-27T15:21:13.714Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/05/5cbb59154b093548acd0f4c7c474a118eda06da25aa75c616b72d8fcd92a/fastapi-0.128.0-py3-none-any.whl", hash = "sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d", size = 103094, upload-time = "2025-12-27T15:21:12.154Z" }, +] + +[package.optional-dependencies] +standard = [ + { name = "email-validator" }, + { name = "fastapi-cli", extra = ["standard"] }, + { name = "httpx" }, + { name = "jinja2" }, + { name = "pydantic-extra-types" }, + { name = "pydantic-settings" }, + { name = "python-multipart" }, + { name = "uvicorn", extra = ["standard"] }, +] + +[[package]] +name = "fastapi-cli" +version = "0.0.20" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "rich-toolkit" }, + { name = "typer" }, + { name = "uvicorn", extra = ["standard"] }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d3/ca/d90fb3bfbcbd6e56c77afd9d114dd6ce8955d8bb90094399d1c70e659e40/fastapi_cli-0.0.20.tar.gz", hash = "sha256:d17c2634f7b96b6b560bc16b0035ed047d523c912011395f49f00a421692bc3a", size = 19786, upload-time = "2025-12-22T17:13:33.794Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/89/5c4eef60524d0fd704eb0706885b82cd5623a43396b94e4a5b17d3a3f516/fastapi_cli-0.0.20-py3-none-any.whl", hash = "sha256:e58b6a0038c0b1532b7a0af690656093dee666201b6b19d3c87175b358e9f783", size = 12390, upload-time = "2025-12-22T17:13:31.708Z" }, +] + +[package.optional-dependencies] +standard = [ + { name = "fastapi-cloud-cli" }, + { name = "uvicorn", extra = ["standard"] }, +] + +[[package]] +name = "fastapi-cloud-cli" +version = "0.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "fastar" }, + { name = "httpx" }, + { name = "pydantic", extra = ["email"] }, + { name = "rich-toolkit" }, + { name = "rignore" }, + { name = "sentry-sdk" }, + { name = "typer" }, + { name = "uvicorn", extra = ["standard"] }, +] +sdist = { url = "https://files.pythonhosted.org/packages/51/5d/3b33438de35521fab4968b232caa9a4bd568a5078f2b2dfb7bb8a4528603/fastapi_cloud_cli-0.8.0.tar.gz", hash = "sha256:cf07c502528bfd9e6b184776659f05d9212811d76bbec9fbb6bf34bed4c7456f", size = 30257, upload-time = "2025-12-23T12:08:33.904Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dd/8e/abb95ef59e91bb5adaa2d18fbf9ea70fd524010bb03f406a2dd2a4775ef9/fastapi_cloud_cli-0.8.0-py3-none-any.whl", hash = "sha256:e9f40bee671d985fd25d7a5409b56d4f103777bf8a0c6d746ea5fbf97a8186d9", size = 22306, upload-time = "2025-12-23T12:08:32.68Z" }, +] + +[[package]] +name = "fastar" +version = "0.8.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/69/e7/f89d54fb04104114dd0552836dc2b47914f416cc0e200b409dd04a33de5e/fastar-0.8.0.tar.gz", hash = "sha256:f4d4d68dbf1c4c2808f0e730fac5843493fc849f70fe3ad3af60dfbaf68b9a12", size = 68524, upload-time = "2025-11-26T02:36:00.72Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/58/f1/5b2ff898abac7f1a418284aad285e3a4f68d189c572ab2db0f6c9079dd16/fastar-0.8.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0f10d2adfe40f47ff228f4efaa32d409d732ded98580e03ed37c9535b5fc923d", size = 706369, upload-time = "2025-11-26T02:34:37.783Z" }, + { url = "https://files.pythonhosted.org/packages/23/60/8046a386dca39154f80c927cbbeeb4b1c1267a3271bffe61552eb9995757/fastar-0.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b930da9d598e3bc69513d131f397e6d6be4643926ef3de5d33d1e826631eb036", size = 629097, upload-time = "2025-11-26T02:34:21.888Z" }, + { url = "https://files.pythonhosted.org/packages/22/7e/1ae005addc789924a9268da2394d3bb5c6f96836f7e37b7e3d23c2362675/fastar-0.8.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9d210da2de733ca801de83e931012349d209f38b92d9630ccaa94bd445bdc9b8", size = 868938, upload-time = "2025-11-26T02:33:51.119Z" }, + { url = "https://files.pythonhosted.org/packages/a6/77/290a892b073b84bf82e6b2259708dfe79c54f356e252c2dd40180b16fe07/fastar-0.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa02270721517078a5bd61a38719070ac2537a4aa6b6c48cf369cf2abc59174a", size = 765204, upload-time = "2025-11-26T02:32:47.02Z" }, + { url = "https://files.pythonhosted.org/packages/d0/00/c3155171b976003af3281f5258189f1935b15d1221bfc7467b478c631216/fastar-0.8.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:83c391e5b789a720e4d0029b9559f5d6dee3226693c5b39c0eab8eaece997e0f", size = 764717, upload-time = "2025-11-26T02:33:02.453Z" }, + { url = "https://files.pythonhosted.org/packages/b7/43/405b7ad76207b2c11b7b59335b70eac19e4a2653977f5588a1ac8fed54f4/fastar-0.8.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3258d7a78a72793cdd081545da61cabe85b1f37634a1d0b97ffee0ff11d105ef", size = 931502, upload-time = "2025-11-26T02:33:18.619Z" }, + { url = "https://files.pythonhosted.org/packages/da/8a/a3dde6d37cc3da4453f2845cdf16675b5686b73b164f37e2cc579b057c2c/fastar-0.8.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6eab95dd985cdb6a50666cbeb9e4814676e59cfe52039c880b69d67cfd44767", size = 821454, upload-time = "2025-11-26T02:33:33.427Z" }, + { url = "https://files.pythonhosted.org/packages/da/c1/904fe2468609c8990dce9fe654df3fbc7324a8d8e80d8240ae2c89757064/fastar-0.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:829b1854166141860887273c116c94e31357213fa8e9fe8baeb18bd6c38aa8d9", size = 821647, upload-time = "2025-11-26T02:34:07Z" }, + { url = "https://files.pythonhosted.org/packages/c8/73/a0642ab7a400bc07528091785e868ace598fde06fcd139b8f865ec1b6f3c/fastar-0.8.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b1667eae13f9457a3c737f4376d68e8c3e548353538b28f7e4273a30cb3965cd", size = 986342, upload-time = "2025-11-26T02:34:53.371Z" }, + { url = "https://files.pythonhosted.org/packages/af/af/60c1bfa6edab72366461a95f053d0f5f7ab1825fe65ca2ca367432cd8629/fastar-0.8.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:b864a95229a7db0814cd9ef7987cb713fd43dce1b0d809dd17d9cd6f02fdde3e", size = 1040207, upload-time = "2025-11-26T02:35:10.65Z" }, + { url = "https://files.pythonhosted.org/packages/f6/a0/0d624290dec622e7fa084b6881f456809f68777d54a314f5dde932714506/fastar-0.8.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c05fbc5618ce17675a42576fa49858d79734627f0a0c74c0875ab45ee8de340c", size = 1045031, upload-time = "2025-11-26T02:35:28.108Z" }, + { url = "https://files.pythonhosted.org/packages/a7/74/cf663af53c4706ba88e6b4af44a6b0c3bd7d7ca09f079dc40647a8f06585/fastar-0.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7f41c51ee96f338662ee3c3df4840511ba3f9969606840f1b10b7cb633a3c716", size = 994877, upload-time = "2025-11-26T02:35:45.797Z" }, + { url = "https://files.pythonhosted.org/packages/52/17/444c8be6e77206050e350da7c338102b6cab384be937fa0b1d6d1f9ede73/fastar-0.8.0-cp312-cp312-win32.whl", hash = "sha256:d949a1a2ea7968b734632c009df0571c94636a5e1622c87a6e2bf712a7334f47", size = 455996, upload-time = "2025-11-26T02:36:26.938Z" }, + { url = "https://files.pythonhosted.org/packages/dc/34/fc3b5e56d71a17b1904800003d9251716e8fd65f662e1b10a26881698a74/fastar-0.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:fc645994d5b927d769121094e8a649b09923b3c13a8b0b98696d8f853f23c532", size = 490429, upload-time = "2025-11-26T02:36:12.707Z" }, + { url = "https://files.pythonhosted.org/packages/35/a8/5608cc837417107c594e2e7be850b9365bcb05e99645966a5d6a156285fe/fastar-0.8.0-cp312-cp312-win_arm64.whl", hash = "sha256:d81ee82e8dc78a0adb81728383bd39611177d642a8fa2d601d4ad5ad59e5f3bd", size = 461297, upload-time = "2025-11-26T02:36:03.546Z" }, +] + +[[package]] +name = "filelock" +version = "3.20.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a7/23/ce7a1126827cedeb958fc043d61745754464eb56c5937c35bbf2b8e26f34/filelock-3.20.1.tar.gz", hash = "sha256:b8360948b351b80f420878d8516519a2204b07aefcdcfd24912a5d33127f188c", size = 19476, upload-time = "2025-12-15T23:54:28.027Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/7f/a1a97644e39e7316d850784c642093c99df1290a460df4ede27659056834/filelock-3.20.1-py3-none-any.whl", hash = "sha256:15d9e9a67306188a44baa72f569d2bfd803076269365fdea0934385da4dc361a", size = 16666, upload-time = "2025-12-15T23:54:26.874Z" }, +] + +[[package]] +name = "flatbuffers" +version = "25.12.19" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e8/2d/d2a548598be01649e2d46231d151a6c56d10b964d94043a335ae56ea2d92/flatbuffers-25.12.19-py2.py3-none-any.whl", hash = "sha256:7634f50c427838bb021c2d66a3d1168e9d199b0607e6329399f04846d42e20b4", size = 26661, upload-time = "2025-12-19T23:16:13.622Z" }, +] + +[[package]] +name = "fonttools" +version = "4.61.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ec/ca/cf17b88a8df95691275a3d77dc0a5ad9907f328ae53acbe6795da1b2f5ed/fonttools-4.61.1.tar.gz", hash = "sha256:6675329885c44657f826ef01d9e4fb33b9158e9d93c537d84ad8399539bc6f69", size = 3565756, upload-time = "2025-12-12T17:31:24.246Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6f/16/7decaa24a1bd3a70c607b2e29f0adc6159f36a7e40eaba59846414765fd4/fonttools-4.61.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f3cb4a569029b9f291f88aafc927dd53683757e640081ca8c412781ea144565e", size = 2851593, upload-time = "2025-12-12T17:30:04.225Z" }, + { url = "https://files.pythonhosted.org/packages/94/98/3c4cb97c64713a8cf499b3245c3bf9a2b8fd16a3e375feff2aed78f96259/fonttools-4.61.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:41a7170d042e8c0024703ed13b71893519a1a6d6e18e933e3ec7507a2c26a4b2", size = 2400231, upload-time = "2025-12-12T17:30:06.47Z" }, + { url = "https://files.pythonhosted.org/packages/b7/37/82dbef0f6342eb01f54bca073ac1498433d6ce71e50c3c3282b655733b31/fonttools-4.61.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:10d88e55330e092940584774ee5e8a6971b01fc2f4d3466a1d6c158230880796", size = 4954103, upload-time = "2025-12-12T17:30:08.432Z" }, + { url = "https://files.pythonhosted.org/packages/6c/44/f3aeac0fa98e7ad527f479e161aca6c3a1e47bb6996b053d45226fe37bf2/fonttools-4.61.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:15acc09befd16a0fb8a8f62bc147e1a82817542d72184acca9ce6e0aeda9fa6d", size = 5004295, upload-time = "2025-12-12T17:30:10.56Z" }, + { url = "https://files.pythonhosted.org/packages/14/e8/7424ced75473983b964d09f6747fa09f054a6d656f60e9ac9324cf40c743/fonttools-4.61.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e6bcdf33aec38d16508ce61fd81838f24c83c90a1d1b8c68982857038673d6b8", size = 4944109, upload-time = "2025-12-12T17:30:12.874Z" }, + { url = "https://files.pythonhosted.org/packages/c8/8b/6391b257fa3d0b553d73e778f953a2f0154292a7a7a085e2374b111e5410/fonttools-4.61.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5fade934607a523614726119164ff621e8c30e8fa1ffffbbd358662056ba69f0", size = 5093598, upload-time = "2025-12-12T17:30:15.79Z" }, + { url = "https://files.pythonhosted.org/packages/d9/71/fd2ea96cdc512d92da5678a1c98c267ddd4d8c5130b76d0f7a80f9a9fde8/fonttools-4.61.1-cp312-cp312-win32.whl", hash = "sha256:75da8f28eff26defba42c52986de97b22106cb8f26515b7c22443ebc9c2d3261", size = 2269060, upload-time = "2025-12-12T17:30:18.058Z" }, + { url = "https://files.pythonhosted.org/packages/80/3b/a3e81b71aed5a688e89dfe0e2694b26b78c7d7f39a5ffd8a7d75f54a12a8/fonttools-4.61.1-cp312-cp312-win_amd64.whl", hash = "sha256:497c31ce314219888c0e2fce5ad9178ca83fe5230b01a5006726cdf3ac9f24d9", size = 2319078, upload-time = "2025-12-12T17:30:22.862Z" }, + { url = "https://files.pythonhosted.org/packages/c7/4e/ce75a57ff3aebf6fc1f4e9d508b8e5810618a33d900ad6c19eb30b290b97/fonttools-4.61.1-py3-none-any.whl", hash = "sha256:17d2bf5d541add43822bcf0c43d7d847b160c9bb01d15d5007d84e2217aaa371", size = 1148996, upload-time = "2025-12-12T17:31:21.03Z" }, ] [[package]] name = "frozenlist" -version = "1.7.0" +version = "1.8.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/79/b1/b64018016eeb087db503b038296fd782586432b9c077fc5c7839e9cb6ef6/frozenlist-1.7.0.tar.gz", hash = "sha256:2e310d81923c2437ea8670467121cc3e9b0f76d3043cc1d2331d56c7fb7a3a8f", size = 45078, upload-time = "2025-06-09T23:02:35.538Z" } +sdist = { url = "https://files.pythonhosted.org/packages/2d/f5/c831fac6cc817d26fd54c7eaccd04ef7e0288806943f7cc5bbf69f3ac1f0/frozenlist-1.8.0.tar.gz", hash = "sha256:3ede829ed8d842f6cd48fc7081d7a41001a56f1f38603f9d49bf3020d59a31ad", size = 45875, upload-time = "2025-10-06T05:38:17.865Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ef/a2/c8131383f1e66adad5f6ecfcce383d584ca94055a34d683bbb24ac5f2f1c/frozenlist-1.7.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3dbf9952c4bb0e90e98aec1bd992b3318685005702656bc6f67c1a32b76787f2", size = 81424, upload-time = "2025-06-09T23:00:42.24Z" }, - { url = "https://files.pythonhosted.org/packages/4c/9d/02754159955088cb52567337d1113f945b9e444c4960771ea90eb73de8db/frozenlist-1.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1f5906d3359300b8a9bb194239491122e6cf1444c2efb88865426f170c262cdb", size = 47952, upload-time = "2025-06-09T23:00:43.481Z" }, - { url = "https://files.pythonhosted.org/packages/01/7a/0046ef1bd6699b40acd2067ed6d6670b4db2f425c56980fa21c982c2a9db/frozenlist-1.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3dabd5a8f84573c8d10d8859a50ea2dec01eea372031929871368c09fa103478", size = 46688, upload-time = "2025-06-09T23:00:44.793Z" }, - { url = "https://files.pythonhosted.org/packages/d6/a2/a910bafe29c86997363fb4c02069df4ff0b5bc39d33c5198b4e9dd42d8f8/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa57daa5917f1738064f302bf2626281a1cb01920c32f711fbc7bc36111058a8", size = 243084, upload-time = "2025-06-09T23:00:46.125Z" }, - { url = "https://files.pythonhosted.org/packages/64/3e/5036af9d5031374c64c387469bfcc3af537fc0f5b1187d83a1cf6fab1639/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c193dda2b6d49f4c4398962810fa7d7c78f032bf45572b3e04dd5249dff27e08", size = 233524, upload-time = "2025-06-09T23:00:47.73Z" }, - { url = "https://files.pythonhosted.org/packages/06/39/6a17b7c107a2887e781a48ecf20ad20f1c39d94b2a548c83615b5b879f28/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfe2b675cf0aaa6d61bf8fbffd3c274b3c9b7b1623beb3809df8a81399a4a9c4", size = 248493, upload-time = "2025-06-09T23:00:49.742Z" }, - { url = "https://files.pythonhosted.org/packages/be/00/711d1337c7327d88c44d91dd0f556a1c47fb99afc060ae0ef66b4d24793d/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8fc5d5cda37f62b262405cf9652cf0856839c4be8ee41be0afe8858f17f4c94b", size = 244116, upload-time = "2025-06-09T23:00:51.352Z" }, - { url = "https://files.pythonhosted.org/packages/24/fe/74e6ec0639c115df13d5850e75722750adabdc7de24e37e05a40527ca539/frozenlist-1.7.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0d5ce521d1dd7d620198829b87ea002956e4319002ef0bc8d3e6d045cb4646e", size = 224557, upload-time = "2025-06-09T23:00:52.855Z" }, - { url = "https://files.pythonhosted.org/packages/8d/db/48421f62a6f77c553575201e89048e97198046b793f4a089c79a6e3268bd/frozenlist-1.7.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:488d0a7d6a0008ca0db273c542098a0fa9e7dfaa7e57f70acef43f32b3f69dca", size = 241820, upload-time = "2025-06-09T23:00:54.43Z" }, - { url = "https://files.pythonhosted.org/packages/1d/fa/cb4a76bea23047c8462976ea7b7a2bf53997a0ca171302deae9d6dd12096/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:15a7eaba63983d22c54d255b854e8108e7e5f3e89f647fc854bd77a237e767df", size = 236542, upload-time = "2025-06-09T23:00:56.409Z" }, - { url = "https://files.pythonhosted.org/packages/5d/32/476a4b5cfaa0ec94d3f808f193301debff2ea42288a099afe60757ef6282/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1eaa7e9c6d15df825bf255649e05bd8a74b04a4d2baa1ae46d9c2d00b2ca2cb5", size = 249350, upload-time = "2025-06-09T23:00:58.468Z" }, - { url = "https://files.pythonhosted.org/packages/8d/ba/9a28042f84a6bf8ea5dbc81cfff8eaef18d78b2a1ad9d51c7bc5b029ad16/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4389e06714cfa9d47ab87f784a7c5be91d3934cd6e9a7b85beef808297cc025", size = 225093, upload-time = "2025-06-09T23:01:00.015Z" }, - { url = "https://files.pythonhosted.org/packages/bc/29/3a32959e68f9cf000b04e79ba574527c17e8842e38c91d68214a37455786/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:73bd45e1488c40b63fe5a7df892baf9e2a4d4bb6409a2b3b78ac1c6236178e01", size = 245482, upload-time = "2025-06-09T23:01:01.474Z" }, - { url = "https://files.pythonhosted.org/packages/80/e8/edf2f9e00da553f07f5fa165325cfc302dead715cab6ac8336a5f3d0adc2/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:99886d98e1643269760e5fe0df31e5ae7050788dd288947f7f007209b8c33f08", size = 249590, upload-time = "2025-06-09T23:01:02.961Z" }, - { url = "https://files.pythonhosted.org/packages/1c/80/9a0eb48b944050f94cc51ee1c413eb14a39543cc4f760ed12657a5a3c45a/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:290a172aae5a4c278c6da8a96222e6337744cd9c77313efe33d5670b9f65fc43", size = 237785, upload-time = "2025-06-09T23:01:05.095Z" }, - { url = "https://files.pythonhosted.org/packages/f3/74/87601e0fb0369b7a2baf404ea921769c53b7ae00dee7dcfe5162c8c6dbf0/frozenlist-1.7.0-cp312-cp312-win32.whl", hash = "sha256:426c7bc70e07cfebc178bc4c2bf2d861d720c4fff172181eeb4a4c41d4ca2ad3", size = 39487, upload-time = "2025-06-09T23:01:06.54Z" }, - { url = "https://files.pythonhosted.org/packages/0b/15/c026e9a9fc17585a9d461f65d8593d281fedf55fbf7eb53f16c6df2392f9/frozenlist-1.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:563b72efe5da92e02eb68c59cb37205457c977aa7a449ed1b37e6939e5c47c6a", size = 43874, upload-time = "2025-06-09T23:01:07.752Z" }, - { url = "https://files.pythonhosted.org/packages/24/90/6b2cebdabdbd50367273c20ff6b57a3dfa89bd0762de02c3a1eb42cb6462/frozenlist-1.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee80eeda5e2a4e660651370ebffd1286542b67e268aa1ac8d6dbe973120ef7ee", size = 79791, upload-time = "2025-06-09T23:01:09.368Z" }, - { url = "https://files.pythonhosted.org/packages/83/2e/5b70b6a3325363293fe5fc3ae74cdcbc3e996c2a11dde2fd9f1fb0776d19/frozenlist-1.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d1a81c85417b914139e3a9b995d4a1c84559afc839a93cf2cb7f15e6e5f6ed2d", size = 47165, upload-time = "2025-06-09T23:01:10.653Z" }, - { url = "https://files.pythonhosted.org/packages/f4/25/a0895c99270ca6966110f4ad98e87e5662eab416a17e7fd53c364bf8b954/frozenlist-1.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cbb65198a9132ebc334f237d7b0df163e4de83fb4f2bdfe46c1e654bdb0c5d43", size = 45881, upload-time = "2025-06-09T23:01:12.296Z" }, - { url = "https://files.pythonhosted.org/packages/19/7c/71bb0bbe0832793c601fff68cd0cf6143753d0c667f9aec93d3c323f4b55/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dab46c723eeb2c255a64f9dc05b8dd601fde66d6b19cdb82b2e09cc6ff8d8b5d", size = 232409, upload-time = "2025-06-09T23:01:13.641Z" }, - { url = "https://files.pythonhosted.org/packages/c0/45/ed2798718910fe6eb3ba574082aaceff4528e6323f9a8570be0f7028d8e9/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6aeac207a759d0dedd2e40745575ae32ab30926ff4fa49b1635def65806fddee", size = 225132, upload-time = "2025-06-09T23:01:15.264Z" }, - { url = "https://files.pythonhosted.org/packages/ba/e2/8417ae0f8eacb1d071d4950f32f229aa6bf68ab69aab797b72a07ea68d4f/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bd8c4e58ad14b4fa7802b8be49d47993182fdd4023393899632c88fd8cd994eb", size = 237638, upload-time = "2025-06-09T23:01:16.752Z" }, - { url = "https://files.pythonhosted.org/packages/f8/b7/2ace5450ce85f2af05a871b8c8719b341294775a0a6c5585d5e6170f2ce7/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04fb24d104f425da3540ed83cbfc31388a586a7696142004c577fa61c6298c3f", size = 233539, upload-time = "2025-06-09T23:01:18.202Z" }, - { url = "https://files.pythonhosted.org/packages/46/b9/6989292c5539553dba63f3c83dc4598186ab2888f67c0dc1d917e6887db6/frozenlist-1.7.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a5c505156368e4ea6b53b5ac23c92d7edc864537ff911d2fb24c140bb175e60", size = 215646, upload-time = "2025-06-09T23:01:19.649Z" }, - { url = "https://files.pythonhosted.org/packages/72/31/bc8c5c99c7818293458fe745dab4fd5730ff49697ccc82b554eb69f16a24/frozenlist-1.7.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bd7eb96a675f18aa5c553eb7ddc24a43c8c18f22e1f9925528128c052cdbe00", size = 232233, upload-time = "2025-06-09T23:01:21.175Z" }, - { url = "https://files.pythonhosted.org/packages/59/52/460db4d7ba0811b9ccb85af996019f5d70831f2f5f255f7cc61f86199795/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:05579bf020096fe05a764f1f84cd104a12f78eaab68842d036772dc6d4870b4b", size = 227996, upload-time = "2025-06-09T23:01:23.098Z" }, - { url = "https://files.pythonhosted.org/packages/ba/c9/f4b39e904c03927b7ecf891804fd3b4df3db29b9e487c6418e37988d6e9d/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:376b6222d114e97eeec13d46c486facd41d4f43bab626b7c3f6a8b4e81a5192c", size = 242280, upload-time = "2025-06-09T23:01:24.808Z" }, - { url = "https://files.pythonhosted.org/packages/b8/33/3f8d6ced42f162d743e3517781566b8481322be321b486d9d262adf70bfb/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0aa7e176ebe115379b5b1c95b4096fb1c17cce0847402e227e712c27bdb5a949", size = 217717, upload-time = "2025-06-09T23:01:26.28Z" }, - { url = "https://files.pythonhosted.org/packages/3e/e8/ad683e75da6ccef50d0ab0c2b2324b32f84fc88ceee778ed79b8e2d2fe2e/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3fbba20e662b9c2130dc771e332a99eff5da078b2b2648153a40669a6d0e36ca", size = 236644, upload-time = "2025-06-09T23:01:27.887Z" }, - { url = "https://files.pythonhosted.org/packages/b2/14/8d19ccdd3799310722195a72ac94ddc677541fb4bef4091d8e7775752360/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:f3f4410a0a601d349dd406b5713fec59b4cee7e71678d5b17edda7f4655a940b", size = 238879, upload-time = "2025-06-09T23:01:29.524Z" }, - { url = "https://files.pythonhosted.org/packages/ce/13/c12bf657494c2fd1079a48b2db49fa4196325909249a52d8f09bc9123fd7/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e2cdfaaec6a2f9327bf43c933c0319a7c429058e8537c508964a133dffee412e", size = 232502, upload-time = "2025-06-09T23:01:31.287Z" }, - { url = "https://files.pythonhosted.org/packages/d7/8b/e7f9dfde869825489382bc0d512c15e96d3964180c9499efcec72e85db7e/frozenlist-1.7.0-cp313-cp313-win32.whl", hash = "sha256:5fc4df05a6591c7768459caba1b342d9ec23fa16195e744939ba5914596ae3e1", size = 39169, upload-time = "2025-06-09T23:01:35.503Z" }, - { url = "https://files.pythonhosted.org/packages/35/89/a487a98d94205d85745080a37860ff5744b9820a2c9acbcdd9440bfddf98/frozenlist-1.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:52109052b9791a3e6b5d1b65f4b909703984b770694d3eb64fad124c835d7cba", size = 43219, upload-time = "2025-06-09T23:01:36.784Z" }, - { url = "https://files.pythonhosted.org/packages/56/d5/5c4cf2319a49eddd9dd7145e66c4866bdc6f3dbc67ca3d59685149c11e0d/frozenlist-1.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:a6f86e4193bb0e235ef6ce3dde5cbabed887e0b11f516ce8a0f4d3b33078ec2d", size = 84345, upload-time = "2025-06-09T23:01:38.295Z" }, - { url = "https://files.pythonhosted.org/packages/a4/7d/ec2c1e1dc16b85bc9d526009961953df9cec8481b6886debb36ec9107799/frozenlist-1.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:82d664628865abeb32d90ae497fb93df398a69bb3434463d172b80fc25b0dd7d", size = 48880, upload-time = "2025-06-09T23:01:39.887Z" }, - { url = "https://files.pythonhosted.org/packages/69/86/f9596807b03de126e11e7d42ac91e3d0b19a6599c714a1989a4e85eeefc4/frozenlist-1.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:912a7e8375a1c9a68325a902f3953191b7b292aa3c3fb0d71a216221deca460b", size = 48498, upload-time = "2025-06-09T23:01:41.318Z" }, - { url = "https://files.pythonhosted.org/packages/5e/cb/df6de220f5036001005f2d726b789b2c0b65f2363b104bbc16f5be8084f8/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9537c2777167488d539bc5de2ad262efc44388230e5118868e172dd4a552b146", size = 292296, upload-time = "2025-06-09T23:01:42.685Z" }, - { url = "https://files.pythonhosted.org/packages/83/1f/de84c642f17c8f851a2905cee2dae401e5e0daca9b5ef121e120e19aa825/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f34560fb1b4c3e30ba35fa9a13894ba39e5acfc5f60f57d8accde65f46cc5e74", size = 273103, upload-time = "2025-06-09T23:01:44.166Z" }, - { url = "https://files.pythonhosted.org/packages/88/3c/c840bfa474ba3fa13c772b93070893c6e9d5c0350885760376cbe3b6c1b3/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:acd03d224b0175f5a850edc104ac19040d35419eddad04e7cf2d5986d98427f1", size = 292869, upload-time = "2025-06-09T23:01:45.681Z" }, - { url = "https://files.pythonhosted.org/packages/a6/1c/3efa6e7d5a39a1d5ef0abeb51c48fb657765794a46cf124e5aca2c7a592c/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2038310bc582f3d6a09b3816ab01737d60bf7b1ec70f5356b09e84fb7408ab1", size = 291467, upload-time = "2025-06-09T23:01:47.234Z" }, - { url = "https://files.pythonhosted.org/packages/4f/00/d5c5e09d4922c395e2f2f6b79b9a20dab4b67daaf78ab92e7729341f61f6/frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8c05e4c8e5f36e5e088caa1bf78a687528f83c043706640a92cb76cd6999384", size = 266028, upload-time = "2025-06-09T23:01:48.819Z" }, - { url = "https://files.pythonhosted.org/packages/4e/27/72765be905619dfde25a7f33813ac0341eb6b076abede17a2e3fbfade0cb/frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:765bb588c86e47d0b68f23c1bee323d4b703218037765dcf3f25c838c6fecceb", size = 284294, upload-time = "2025-06-09T23:01:50.394Z" }, - { url = "https://files.pythonhosted.org/packages/88/67/c94103a23001b17808eb7dd1200c156bb69fb68e63fcf0693dde4cd6228c/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:32dc2e08c67d86d0969714dd484fd60ff08ff81d1a1e40a77dd34a387e6ebc0c", size = 281898, upload-time = "2025-06-09T23:01:52.234Z" }, - { url = "https://files.pythonhosted.org/packages/42/34/a3e2c00c00f9e2a9db5653bca3fec306349e71aff14ae45ecc6d0951dd24/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:c0303e597eb5a5321b4de9c68e9845ac8f290d2ab3f3e2c864437d3c5a30cd65", size = 290465, upload-time = "2025-06-09T23:01:53.788Z" }, - { url = "https://files.pythonhosted.org/packages/bb/73/f89b7fbce8b0b0c095d82b008afd0590f71ccb3dee6eee41791cf8cd25fd/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:a47f2abb4e29b3a8d0b530f7c3598badc6b134562b1a5caee867f7c62fee51e3", size = 266385, upload-time = "2025-06-09T23:01:55.769Z" }, - { url = "https://files.pythonhosted.org/packages/cd/45/e365fdb554159462ca12df54bc59bfa7a9a273ecc21e99e72e597564d1ae/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:3d688126c242a6fabbd92e02633414d40f50bb6002fa4cf995a1d18051525657", size = 288771, upload-time = "2025-06-09T23:01:57.4Z" }, - { url = "https://files.pythonhosted.org/packages/00/11/47b6117002a0e904f004d70ec5194fe9144f117c33c851e3d51c765962d0/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:4e7e9652b3d367c7bd449a727dc79d5043f48b88d0cbfd4f9f1060cf2b414104", size = 288206, upload-time = "2025-06-09T23:01:58.936Z" }, - { url = "https://files.pythonhosted.org/packages/40/37/5f9f3c3fd7f7746082ec67bcdc204db72dad081f4f83a503d33220a92973/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1a85e345b4c43db8b842cab1feb41be5cc0b10a1830e6295b69d7310f99becaf", size = 282620, upload-time = "2025-06-09T23:02:00.493Z" }, - { url = "https://files.pythonhosted.org/packages/0b/31/8fbc5af2d183bff20f21aa743b4088eac4445d2bb1cdece449ae80e4e2d1/frozenlist-1.7.0-cp313-cp313t-win32.whl", hash = "sha256:3a14027124ddb70dfcee5148979998066897e79f89f64b13328595c4bdf77c81", size = 43059, upload-time = "2025-06-09T23:02:02.072Z" }, - { url = "https://files.pythonhosted.org/packages/bb/ed/41956f52105b8dbc26e457c5705340c67c8cc2b79f394b79bffc09d0e938/frozenlist-1.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3bf8010d71d4507775f658e9823210b7427be36625b387221642725b515dcf3e", size = 47516, upload-time = "2025-06-09T23:02:03.779Z" }, - { url = "https://files.pythonhosted.org/packages/ee/45/b82e3c16be2182bff01179db177fe144d58b5dc787a7d4492c6ed8b9317f/frozenlist-1.7.0-py3-none-any.whl", hash = "sha256:9a5af342e34f7e97caf8c995864c7a396418ae2859cc6fdf1b1073020d516a7e", size = 13106, upload-time = "2025-06-09T23:02:34.204Z" }, + { url = "https://files.pythonhosted.org/packages/69/29/948b9aa87e75820a38650af445d2ef2b6b8a6fab1a23b6bb9e4ef0be2d59/frozenlist-1.8.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:78f7b9e5d6f2fdb88cdde9440dc147259b62b9d3b019924def9f6478be254ac1", size = 87782, upload-time = "2025-10-06T05:36:06.649Z" }, + { url = "https://files.pythonhosted.org/packages/64/80/4f6e318ee2a7c0750ed724fa33a4bdf1eacdc5a39a7a24e818a773cd91af/frozenlist-1.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:229bf37d2e4acdaf808fd3f06e854a4a7a3661e871b10dc1f8f1896a3b05f18b", size = 50594, upload-time = "2025-10-06T05:36:07.69Z" }, + { url = "https://files.pythonhosted.org/packages/2b/94/5c8a2b50a496b11dd519f4a24cb5496cf125681dd99e94c604ccdea9419a/frozenlist-1.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f833670942247a14eafbb675458b4e61c82e002a148f49e68257b79296e865c4", size = 50448, upload-time = "2025-10-06T05:36:08.78Z" }, + { url = "https://files.pythonhosted.org/packages/6a/bd/d91c5e39f490a49df14320f4e8c80161cfcce09f1e2cde1edd16a551abb3/frozenlist-1.8.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:494a5952b1c597ba44e0e78113a7266e656b9794eec897b19ead706bd7074383", size = 242411, upload-time = "2025-10-06T05:36:09.801Z" }, + { url = "https://files.pythonhosted.org/packages/8f/83/f61505a05109ef3293dfb1ff594d13d64a2324ac3482be2cedc2be818256/frozenlist-1.8.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:96f423a119f4777a4a056b66ce11527366a8bb92f54e541ade21f2374433f6d4", size = 243014, upload-time = "2025-10-06T05:36:11.394Z" }, + { url = "https://files.pythonhosted.org/packages/d8/cb/cb6c7b0f7d4023ddda30cf56b8b17494eb3a79e3fda666bf735f63118b35/frozenlist-1.8.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3462dd9475af2025c31cc61be6652dfa25cbfb56cbbf52f4ccfe029f38decaf8", size = 234909, upload-time = "2025-10-06T05:36:12.598Z" }, + { url = "https://files.pythonhosted.org/packages/31/c5/cd7a1f3b8b34af009fb17d4123c5a778b44ae2804e3ad6b86204255f9ec5/frozenlist-1.8.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c4c800524c9cd9bac5166cd6f55285957fcfc907db323e193f2afcd4d9abd69b", size = 250049, upload-time = "2025-10-06T05:36:14.065Z" }, + { url = "https://files.pythonhosted.org/packages/c0/01/2f95d3b416c584a1e7f0e1d6d31998c4a795f7544069ee2e0962a4b60740/frozenlist-1.8.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d6a5df73acd3399d893dafc71663ad22534b5aa4f94e8a2fabfe856c3c1b6a52", size = 256485, upload-time = "2025-10-06T05:36:15.39Z" }, + { url = "https://files.pythonhosted.org/packages/ce/03/024bf7720b3abaebcff6d0793d73c154237b85bdf67b7ed55e5e9596dc9a/frozenlist-1.8.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:405e8fe955c2280ce66428b3ca55e12b3c4e9c336fb2103a4937e891c69a4a29", size = 237619, upload-time = "2025-10-06T05:36:16.558Z" }, + { url = "https://files.pythonhosted.org/packages/69/fa/f8abdfe7d76b731f5d8bd217827cf6764d4f1d9763407e42717b4bed50a0/frozenlist-1.8.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:908bd3f6439f2fef9e85031b59fd4f1297af54415fb60e4254a95f75b3cab3f3", size = 250320, upload-time = "2025-10-06T05:36:17.821Z" }, + { url = "https://files.pythonhosted.org/packages/f5/3c/b051329f718b463b22613e269ad72138cc256c540f78a6de89452803a47d/frozenlist-1.8.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:294e487f9ec720bd8ffcebc99d575f7eff3568a08a253d1ee1a0378754b74143", size = 246820, upload-time = "2025-10-06T05:36:19.046Z" }, + { url = "https://files.pythonhosted.org/packages/0f/ae/58282e8f98e444b3f4dd42448ff36fa38bef29e40d40f330b22e7108f565/frozenlist-1.8.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:74c51543498289c0c43656701be6b077f4b265868fa7f8a8859c197006efb608", size = 250518, upload-time = "2025-10-06T05:36:20.763Z" }, + { url = "https://files.pythonhosted.org/packages/8f/96/007e5944694d66123183845a106547a15944fbbb7154788cbf7272789536/frozenlist-1.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:776f352e8329135506a1d6bf16ac3f87bc25b28e765949282dcc627af36123aa", size = 239096, upload-time = "2025-10-06T05:36:22.129Z" }, + { url = "https://files.pythonhosted.org/packages/66/bb/852b9d6db2fa40be96f29c0d1205c306288f0684df8fd26ca1951d461a56/frozenlist-1.8.0-cp312-cp312-win32.whl", hash = "sha256:433403ae80709741ce34038da08511d4a77062aa924baf411ef73d1146e74faf", size = 39985, upload-time = "2025-10-06T05:36:23.661Z" }, + { url = "https://files.pythonhosted.org/packages/b8/af/38e51a553dd66eb064cdf193841f16f077585d4d28394c2fa6235cb41765/frozenlist-1.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:34187385b08f866104f0c0617404c8eb08165ab1272e884abc89c112e9c00746", size = 44591, upload-time = "2025-10-06T05:36:24.958Z" }, + { url = "https://files.pythonhosted.org/packages/a7/06/1dc65480ab147339fecc70797e9c2f69d9cea9cf38934ce08df070fdb9cb/frozenlist-1.8.0-cp312-cp312-win_arm64.whl", hash = "sha256:fe3c58d2f5db5fbd18c2987cba06d51b0529f52bc3a6cdc33d3f4eab725104bd", size = 40102, upload-time = "2025-10-06T05:36:26.333Z" }, + { url = "https://files.pythonhosted.org/packages/9a/9a/e35b4a917281c0b8419d4207f4334c8e8c5dbf4f3f5f9ada73958d937dcc/frozenlist-1.8.0-py3-none-any.whl", hash = "sha256:0c18a16eab41e82c295618a77502e17b195883241c563b00f0aa5106fc4eaa0d", size = 13409, upload-time = "2025-10-06T05:38:16.721Z" }, +] + +[[package]] +name = "fsspec" +version = "2025.12.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b6/27/954057b0d1f53f086f681755207dda6de6c660ce133c829158e8e8fe7895/fsspec-2025.12.0.tar.gz", hash = "sha256:c505de011584597b1060ff778bb664c1bc022e87921b0e4f10cc9c44f9635973", size = 309748, upload-time = "2025-12-03T15:23:42.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/c7/b64cae5dba3a1b138d7123ec36bb5ccd39d39939f18454407e5468f4763f/fsspec-2025.12.0-py3-none-any.whl", hash = "sha256:8bf1fe301b7d8acfa6e8571e3b1c3d158f909666642431cc78a1b7b4dbc5ec5b", size = 201422, upload-time = "2025-12-03T15:23:41.434Z" }, +] + +[[package]] +name = "future" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a7/b2/4140c69c6a66432916b26158687e821ba631a4c9273c474343badf84d3ba/future-1.0.0.tar.gz", hash = "sha256:bd2968309307861edae1458a4f8a4f3598c03be43b97521076aebf5d94c07b05", size = 1228490, upload-time = "2024-02-21T11:52:38.461Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/da/71/ae30dadffc90b9006d77af76b393cb9dfbfc9629f339fc1574a1c52e6806/future-1.0.0-py3-none-any.whl", hash = "sha256:929292d34f5872e70396626ef385ec22355a1fae8ad29e1a734c3e43f9fbc216", size = 491326, upload-time = "2024-02-21T11:52:35.956Z" }, ] [[package]] name = "gevent" -version = "25.5.1" +version = "25.9.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cffi", marker = "platform_python_implementation == 'CPython' and sys_platform == 'win32'" }, @@ -396,30 +976,21 @@ dependencies = [ { name = "zope-event" }, { name = "zope-interface" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f1/58/267e8160aea00ab00acd2de97197eecfe307064a376fb5c892870a8a6159/gevent-25.5.1.tar.gz", hash = "sha256:582c948fa9a23188b890d0bc130734a506d039a2e5ad87dae276a456cc683e61", size = 6388207, upload-time = "2025-05-12T12:57:59.833Z" } +sdist = { url = "https://files.pythonhosted.org/packages/9e/48/b3ef2673ffb940f980966694e40d6d32560f3ffa284ecaeb5ea3a90a6d3f/gevent-25.9.1.tar.gz", hash = "sha256:adf9cd552de44a4e6754c51ff2e78d9193b7fa6eab123db9578a210e657235dd", size = 5059025, upload-time = "2025-09-17T16:15:34.528Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/58/c5/cf71423666a0b83db3d7e3f85788bc47d573fca5fe62b798fe2c4273de7c/gevent-25.5.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:d87c0a1bd809d8f70f96b9b229779ec6647339830b8888a192beed33ac8d129f", size = 2909333, upload-time = "2025-05-12T11:11:34.883Z" }, - { url = "https://files.pythonhosted.org/packages/26/7e/d2f174ee8bec6eb85d961ca203bc599d059c857b8412e367b8fa206603a5/gevent-25.5.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b87a4b66edb3808d4d07bbdb0deed5a710cf3d3c531e082759afd283758bb649", size = 1788420, upload-time = "2025-05-12T11:52:30.306Z" }, - { url = "https://files.pythonhosted.org/packages/fe/f3/3aba8c147b9108e62ba348c726fe38ae69735a233db425565227336e8ce6/gevent-25.5.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f076779050029a82feb0cb1462021d3404d22f80fa76a181b1a7889cd4d6b519", size = 1868854, upload-time = "2025-05-12T11:54:21.564Z" }, - { url = "https://files.pythonhosted.org/packages/c6/b1/11a5453f8fcebe90a456471fad48bd154c6a62fcb96e3475a5e408d05fc8/gevent-25.5.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bb673eb291c19370f69295f7a881a536451408481e2e3deec3f41dedb7c281ec", size = 1833946, upload-time = "2025-05-12T12:00:05.514Z" }, - { url = "https://files.pythonhosted.org/packages/70/1c/37d4a62303f86e6af67660a8df38c1171b7290df61b358e618c6fea79567/gevent-25.5.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1325ed44225c8309c0dd188bdbbbee79e1df8c11ceccac226b861c7d52e4837", size = 2070583, upload-time = "2025-05-12T11:33:02.803Z" }, - { url = "https://files.pythonhosted.org/packages/4b/8f/3b14929ff28263aba1d268ea97bcf104be1a86ba6f6bb4633838e7a1905e/gevent-25.5.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fcd5bcad3102bde686d0adcc341fade6245186050ce14386d547ccab4bd54310", size = 1808341, upload-time = "2025-05-12T11:59:59.154Z" }, - { url = "https://files.pythonhosted.org/packages/2f/fc/674ec819fb8a96e482e4d21f8baa43d34602dba09dfce7bbdc8700899d1b/gevent-25.5.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1a93062609e8fa67ec97cd5fb9206886774b2a09b24887f40148c9c37e6fb71c", size = 2137974, upload-time = "2025-05-12T11:40:54.78Z" }, - { url = "https://files.pythonhosted.org/packages/05/9a/048b7f5e28c54e4595ad4a8ad3c338fa89560e558db2bbe8273f44f030de/gevent-25.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:2534c23dc32bed62b659ed4fd9e198906179e68b26c9276a897e04163bdde806", size = 1638344, upload-time = "2025-05-12T12:08:31.776Z" }, - { url = "https://files.pythonhosted.org/packages/10/25/2162b38d7b48e08865db6772d632bd1648136ce2bb50e340565e45607cad/gevent-25.5.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:a022a9de9275ce0b390b7315595454258c525dc8287a03f1a6cacc5878ab7cbc", size = 2928044, upload-time = "2025-05-12T11:11:36.33Z" }, - { url = "https://files.pythonhosted.org/packages/1b/e0/dbd597a964ed00176da122ea759bf2a6c1504f1e9f08e185379f92dc355f/gevent-25.5.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fae8533f9d0ef3348a1f503edcfb531ef7a0236b57da1e24339aceb0ce52922", size = 1788751, upload-time = "2025-05-12T11:52:32.643Z" }, - { url = "https://files.pythonhosted.org/packages/f1/74/960cc4cf4c9c90eafbe0efc238cdf588862e8e278d0b8c0d15a0da4ed480/gevent-25.5.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c7b32d9c3b5294b39ea9060e20c582e49e1ec81edbfeae6cf05f8ad0829cb13d", size = 1869766, upload-time = "2025-05-12T11:54:23.903Z" }, - { url = "https://files.pythonhosted.org/packages/56/78/fa84b1c7db79b156929685db09a7c18c3127361dca18a09e998e98118506/gevent-25.5.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b95815fe44f318ebbfd733b6428b4cb18cc5e68f1c40e8501dd69cc1f42a83d", size = 1835358, upload-time = "2025-05-12T12:00:06.794Z" }, - { url = "https://files.pythonhosted.org/packages/00/5c/bfefe3822bbca5b83bfad256c82251b3f5be13d52d14e17a786847b9b625/gevent-25.5.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d316529b70d325b183b2f3f5cde958911ff7be12eb2b532b5c301f915dbbf1e", size = 2073071, upload-time = "2025-05-12T11:33:04.2Z" }, - { url = "https://files.pythonhosted.org/packages/20/e4/08a77a3839a37db96393dea952e992d5846a881b887986dde62ead6b48a1/gevent-25.5.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f6ba33c13db91ffdbb489a4f3d177a261ea1843923e1d68a5636c53fe98fa5ce", size = 1809805, upload-time = "2025-05-12T12:00:00.537Z" }, - { url = "https://files.pythonhosted.org/packages/2b/ac/28848348f790c1283df74b0fc0a554271d0606676470f848eccf84eae42a/gevent-25.5.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37ee34b77c7553777c0b8379915f75934c3f9c8cd32f7cd098ea43c9323c2276", size = 2138305, upload-time = "2025-05-12T11:40:56.566Z" }, - { url = "https://files.pythonhosted.org/packages/52/9e/0e9e40facd2d714bfb00f71fc6dacaacc82c24c1c2e097bf6461e00dec9f/gevent-25.5.1-cp313-cp313-win_amd64.whl", hash = "sha256:9fa6aa0da224ed807d3b76cdb4ee8b54d4d4d5e018aed2478098e685baae7896", size = 1637444, upload-time = "2025-05-12T12:17:45.995Z" }, - { url = "https://files.pythonhosted.org/packages/60/16/b71171e97ec7b4ded8669542f4369d88d5a289e2704efbbde51e858e062a/gevent-25.5.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:0bacf89a65489d26c7087669af89938d5bfd9f7afb12a07b57855b9fad6ccbd0", size = 2937113, upload-time = "2025-05-12T11:12:03.191Z" }, + { url = "https://files.pythonhosted.org/packages/f7/49/e55930ba5259629eb28ac7ee1abbca971996a9165f902f0249b561602f24/gevent-25.9.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:46b188248c84ffdec18a686fcac5dbb32365d76912e14fda350db5dc0bfd4f86", size = 2955991, upload-time = "2025-09-17T14:52:30.568Z" }, + { url = "https://files.pythonhosted.org/packages/aa/88/63dc9e903980e1da1e16541ec5c70f2b224ec0a8e34088cb42794f1c7f52/gevent-25.9.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f2b54ea3ca6f0c763281cd3f96010ac7e98c2e267feb1221b5a26e2ca0b9a692", size = 1808503, upload-time = "2025-09-17T15:41:25.59Z" }, + { url = "https://files.pythonhosted.org/packages/7a/8d/7236c3a8f6ef7e94c22e658397009596fa90f24c7d19da11ad7ab3a9248e/gevent-25.9.1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:7a834804ac00ed8a92a69d3826342c677be651b1c3cd66cc35df8bc711057aa2", size = 1890001, upload-time = "2025-09-17T15:49:01.227Z" }, + { url = "https://files.pythonhosted.org/packages/4f/63/0d7f38c4a2085ecce26b50492fc6161aa67250d381e26d6a7322c309b00f/gevent-25.9.1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:323a27192ec4da6b22a9e51c3d9d896ff20bc53fdc9e45e56eaab76d1c39dd74", size = 1855335, upload-time = "2025-09-17T15:49:20.582Z" }, + { url = "https://files.pythonhosted.org/packages/95/18/da5211dfc54c7a57e7432fd9a6ffeae1ce36fe5a313fa782b1c96529ea3d/gevent-25.9.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6ea78b39a2c51d47ff0f130f4c755a9a4bbb2dd9721149420ad4712743911a51", size = 2109046, upload-time = "2025-09-17T15:15:13.817Z" }, + { url = "https://files.pythonhosted.org/packages/a6/5a/7bb5ec8e43a2c6444853c4a9f955f3e72f479d7c24ea86c95fb264a2de65/gevent-25.9.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:dc45cd3e1cc07514a419960af932a62eb8515552ed004e56755e4bf20bad30c5", size = 1827099, upload-time = "2025-09-17T15:52:41.384Z" }, + { url = "https://files.pythonhosted.org/packages/ca/d4/b63a0a60635470d7d986ef19897e893c15326dd69e8fb342c76a4f07fe9e/gevent-25.9.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:34e01e50c71eaf67e92c186ee0196a039d6e4f4b35670396baed4a2d8f1b347f", size = 2172623, upload-time = "2025-09-17T15:24:12.03Z" }, + { url = "https://files.pythonhosted.org/packages/d5/98/caf06d5d22a7c129c1fb2fc1477306902a2c8ddfd399cd26bbbd4caf2141/gevent-25.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:4acd6bcd5feabf22c7c5174bd3b9535ee9f088d2bbce789f740ad8d6554b18f3", size = 1682837, upload-time = "2025-09-17T19:48:47.318Z" }, ] [[package]] name = "geventhttpclient" -version = "2.3.4" +version = "2.3.7" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "brotli" }, @@ -427,76 +998,61 @@ dependencies = [ { name = "gevent" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/89/19/1ca8de73dcc0596d3df01be299e940d7fc3bccbeb6f62bb8dd2d427a3a50/geventhttpclient-2.3.4.tar.gz", hash = "sha256:1749f75810435a001fc6d4d7526c92cf02b39b30ab6217a886102f941c874222", size = 83545, upload-time = "2025-06-11T13:18:14.144Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ac/48/4bca27d59960fc1f41b783ea7d6aa2477f8ff573eced7914ec57e61d7059/geventhttpclient-2.3.7.tar.gz", hash = "sha256:06c28d3d1aabddbaaf61721401a0e5852b216a1845ef2580f3819161e44e9b1c", size = 83708, upload-time = "2025-12-07T19:48:53.153Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/4f/72/dcbc6dbf838549b7b0c2c18c1365d2580eb7456939e4b608c3ab213fce78/geventhttpclient-2.3.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9ac30c38d86d888b42bb2ab2738ab9881199609e9fa9a153eb0c66fc9188c6cb", size = 71984, upload-time = "2025-06-11T13:17:09.126Z" }, - { url = "https://files.pythonhosted.org/packages/4c/f9/74aa8c556364ad39b238919c954a0da01a6154ad5e85a1d1ab5f9f5ac186/geventhttpclient-2.3.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4b802000a4fad80fa57e895009671d6e8af56777e3adf0d8aee0807e96188fd9", size = 52631, upload-time = "2025-06-11T13:17:10.061Z" }, - { url = "https://files.pythonhosted.org/packages/11/1a/bc4b70cba8b46be8b2c6ca5b8067c4f086f8c90915eb68086ab40ff6243d/geventhttpclient-2.3.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:461e4d9f4caee481788ec95ac64e0a4a087c1964ddbfae9b6f2dc51715ba706c", size = 51991, upload-time = "2025-06-11T13:17:11.049Z" }, - { url = "https://files.pythonhosted.org/packages/b0/f5/8d0f1e998f6d933c251b51ef92d11f7eb5211e3cd579018973a2b455f7c5/geventhttpclient-2.3.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41f2dcc0805551ea9d49f9392c3b9296505a89b9387417b148655d0d8251b36e", size = 119012, upload-time = "2025-06-11T13:17:11.956Z" }, - { url = "https://files.pythonhosted.org/packages/ea/0e/59e4ab506b3c19fc72e88ca344d150a9028a00c400b1099637100bec26fc/geventhttpclient-2.3.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:62f3a29bf242ecca6360d497304900683fd8f42cbf1de8d0546c871819251dad", size = 124565, upload-time = "2025-06-11T13:17:12.896Z" }, - { url = "https://files.pythonhosted.org/packages/39/5d/dcbd34dfcda0c016b4970bd583cb260cc5ebfc35b33d0ec9ccdb2293587a/geventhttpclient-2.3.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8714a3f2c093aeda3ffdb14c03571d349cb3ed1b8b461d9f321890659f4a5dbf", size = 115573, upload-time = "2025-06-11T13:17:13.937Z" }, - { url = "https://files.pythonhosted.org/packages/03/51/89af99e4805e9ce7f95562dfbd23c0b0391830831e43d58f940ec74489ac/geventhttpclient-2.3.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b11f38b74bab75282db66226197024a731250dcbe25542fd4e85ac5313547332", size = 114260, upload-time = "2025-06-11T13:17:14.913Z" }, - { url = "https://files.pythonhosted.org/packages/b3/ec/3a3000bda432953abcc6f51d008166fa7abc1eeddd1f0246933d83854f73/geventhttpclient-2.3.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fccc2023a89dfbce2e1b1409b967011e45d41808df81b7fa0259397db79ba647", size = 111592, upload-time = "2025-06-11T13:17:15.879Z" }, - { url = "https://files.pythonhosted.org/packages/d8/a3/88fd71fe6bbe1315a2d161cbe2cc7810c357d99bced113bea1668ede8bcf/geventhttpclient-2.3.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9d54b8e9a44890159ae36ba4ae44efd8bb79ff519055137a340d357538a68aa3", size = 113216, upload-time = "2025-06-11T13:17:16.883Z" }, - { url = "https://files.pythonhosted.org/packages/52/eb/20435585a6911b26e65f901a827ef13551c053133926f8c28a7cca0fb08e/geventhttpclient-2.3.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:407cb68a3c3a2c4f5d503930298f2b26ae68137d520e8846d8e230a9981d9334", size = 118450, upload-time = "2025-06-11T13:17:17.968Z" }, - { url = "https://files.pythonhosted.org/packages/2f/79/82782283d613570373990b676a0966c1062a38ca8f41a0f20843c5808e01/geventhttpclient-2.3.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:54fbbcca2dcf06f12a337dd8f98417a09a49aa9d9706aa530fc93acb59b7d83c", size = 112226, upload-time = "2025-06-11T13:17:18.942Z" }, - { url = "https://files.pythonhosted.org/packages/9c/c4/417d12fc2a31ad93172b03309c7f8c3a8bbd0cf25b95eb7835de26b24453/geventhttpclient-2.3.4-cp312-cp312-win32.whl", hash = "sha256:83143b41bde2eb010c7056f142cb764cfbf77f16bf78bda2323a160767455cf5", size = 48365, upload-time = "2025-06-11T13:17:20.096Z" }, - { url = "https://files.pythonhosted.org/packages/cf/f4/7e5ee2f460bbbd09cb5d90ff63a1cf80d60f1c60c29dac20326324242377/geventhttpclient-2.3.4-cp312-cp312-win_amd64.whl", hash = "sha256:46eda9a9137b0ca7886369b40995d2a43a5dff033d0a839a54241015d1845d41", size = 48961, upload-time = "2025-06-11T13:17:21.111Z" }, - { url = "https://files.pythonhosted.org/packages/ff/ad/132fddde6e2dca46d6a86316962437acd2bfaeb264db4e0fae83c529eb04/geventhttpclient-2.3.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:be64c5583884c407fc748dedbcb083475d5b138afb23c6bc0836cbad228402cc", size = 71967, upload-time = "2025-06-11T13:17:22.121Z" }, - { url = "https://files.pythonhosted.org/packages/f4/34/5e77d9a31d93409a8519cf573843288565272ae5a016be9c9293f56c50a1/geventhttpclient-2.3.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:15b2567137734183efda18e4d6245b18772e648b6a25adea0eba8b3a8b0d17e8", size = 52632, upload-time = "2025-06-11T13:17:23.016Z" }, - { url = "https://files.pythonhosted.org/packages/47/d2/cf0dbc333304700e68cee9347f654b56e8b0f93a341b8b0d027ee96800d6/geventhttpclient-2.3.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a4bca1151b8cd207eef6d5cb3c720c562b2aa7293cf113a68874e235cfa19c31", size = 51980, upload-time = "2025-06-11T13:17:23.933Z" }, - { url = "https://files.pythonhosted.org/packages/ec/5b/c0c30ccd9d06c603add3f2d6abd68bd98430ee9730dc5478815759cf07f7/geventhttpclient-2.3.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b50d9daded5d36193d67e2fc30e59752262fcbbdc86e8222c7df6b93af0346a", size = 118987, upload-time = "2025-06-11T13:17:24.97Z" }, - { url = "https://files.pythonhosted.org/packages/4f/56/095a46af86476372064128162eccbd2ba4a7721503759890d32ea701d5fd/geventhttpclient-2.3.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fe705e7656bc6982a463a4ed7f9b1db8c78c08323f1d45d0d1d77063efa0ce96", size = 124519, upload-time = "2025-06-11T13:17:25.933Z" }, - { url = "https://files.pythonhosted.org/packages/ae/12/7c9ba94b58f7954a83d33183152ce6bf5bda10c08ebe47d79a314cd33e29/geventhttpclient-2.3.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:69668589359db4cbb9efa327dda5735d1e74145e6f0a9ffa50236d15cf904053", size = 115574, upload-time = "2025-06-11T13:17:27.331Z" }, - { url = "https://files.pythonhosted.org/packages/73/77/c4e7c5bce0199428fdb811d6adf6e347180d89eaa1b9b723f711f6bbc830/geventhttpclient-2.3.4-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9ba526e07ccaf4f1c2cd3395dda221139f01468b6eee1190d4a616f187a0378", size = 114222, upload-time = "2025-06-11T13:17:28.289Z" }, - { url = "https://files.pythonhosted.org/packages/a3/79/58802d300950dbd7d4e31eb24afd7c270fc7900ff3923fd266cc915bb086/geventhttpclient-2.3.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:525bd192705b5cb41a7cc3fe41fca194bfd6b5b59997ab9fe68fe0a82dab6140", size = 111682, upload-time = "2025-06-11T13:17:29.291Z" }, - { url = "https://files.pythonhosted.org/packages/d3/9c/ae04e4033459b8142788dad80d8d0b42d460bc6db9150e0815c2d0a02cb4/geventhttpclient-2.3.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:42b6f6afb0d3aab6a013c9cdb97e19bf4fe08695975670d0a018113d24cb344c", size = 113252, upload-time = "2025-06-11T13:17:30.357Z" }, - { url = "https://files.pythonhosted.org/packages/d3/67/5ae5d5878b06397a7b54334d1d31bb78cefc950ae890c2b8f5c917eb271e/geventhttpclient-2.3.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:227579b703085c4e5c6d5217ad6565b19ac8d1164404133e5874efaae1905114", size = 118426, upload-time = "2025-06-11T13:17:31.363Z" }, - { url = "https://files.pythonhosted.org/packages/ca/36/9065bb51f261950c42eddf8718e01a9ff344d8082e31317a8b6677be9bd6/geventhttpclient-2.3.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8d1d0db89c1c8f3282eac9a22fda2b4082e1ed62a2107f70e3f1de1872c7919f", size = 112245, upload-time = "2025-06-11T13:17:32.331Z" }, - { url = "https://files.pythonhosted.org/packages/21/7e/08a615bec095c288f997951e42e48b262d43c6081bef33cfbfad96ab9658/geventhttpclient-2.3.4-cp313-cp313-win32.whl", hash = "sha256:4e492b9ab880f98f8a9cc143b96ea72e860946eae8ad5fb2837cede2a8f45154", size = 48360, upload-time = "2025-06-11T13:17:33.349Z" }, - { url = "https://files.pythonhosted.org/packages/ec/19/ef3cb21e7e95b14cfcd21e3ba7fe3d696e171682dfa43ab8c0a727cac601/geventhttpclient-2.3.4-cp313-cp313-win_amd64.whl", hash = "sha256:72575c5b502bf26ececccb905e4e028bb922f542946be701923e726acf305eb6", size = 48956, upload-time = "2025-06-11T13:17:34.956Z" }, + { url = "https://files.pythonhosted.org/packages/63/e7/597634914f0346faf5eb4f371f885add9873081cea921070d826c99b18f7/geventhttpclient-2.3.7-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0b1564f10fd46bf4fce9bf8b1c6952e2f1c7b88c62c86f2c45f7866bd341ba4b", size = 69756, upload-time = "2025-12-07T19:48:04.043Z" }, + { url = "https://files.pythonhosted.org/packages/6f/05/fe01ea721d5491f868ab1ed82e12306947c121a77583944234b8b840c17a/geventhttpclient-2.3.7-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4085d23c5b86993cdcef6a00e788cea4bcf6fedb2f2eb7c22c057716a02dc343", size = 51396, upload-time = "2025-12-07T19:48:04.787Z" }, + { url = "https://files.pythonhosted.org/packages/31/74/1c654bfeca910f7bd3998080e4f9c53799c396ec0558236b229fd706b54b/geventhttpclient-2.3.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:531dbf14baad90ad319db4d34afd91d01a3d14d947f26666b03f49c6c2082a8f", size = 51136, upload-time = "2025-12-07T19:48:05.564Z" }, + { url = "https://files.pythonhosted.org/packages/0a/a8/2bae3d6af26e345f3f53185885bbad19d902fa9364e255b5632f3de08d39/geventhttpclient-2.3.7-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:264de1e0902c93d7911b3235430f297a8a551e1bc8dd29692f8620f606d4cecf", size = 114992, upload-time = "2025-12-07T19:48:06.387Z" }, + { url = "https://files.pythonhosted.org/packages/ab/cb/65f59ebced7cfc0f7840a132a73aa67a57368034c37882a5212655f989df/geventhttpclient-2.3.7-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7b9a3a4938b5cc47f9330443e0bdd3fcdb850e6147147810fd88235b7bc5c4e8", size = 115664, upload-time = "2025-12-07T19:48:07.249Z" }, + { url = "https://files.pythonhosted.org/packages/f5/0f/076fba4792c00ace47d274f329cf4e1748faea30a79ff98b1c1dd780937d/geventhttpclient-2.3.7-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:fbad11254abdecf5edab4dae22642824aca5cbd258a2d14a79d8d9ab72223f9e", size = 121684, upload-time = "2025-12-07T19:48:08.069Z" }, + { url = "https://files.pythonhosted.org/packages/81/48/f4d7418229ca7ae3ca1163c6c415675e536def90944ea16f5fb2f586663b/geventhttpclient-2.3.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:383d6f95683a2fe1009d6d4660631e1c8f04043876c48c06c2e0ad64e516db5d", size = 111581, upload-time = "2025-12-07T19:48:08.879Z" }, + { url = "https://files.pythonhosted.org/packages/98/5e/f1c17fce2b25b1782dd697f63df63709aaf03a904f46f21e9f631e6eea02/geventhttpclient-2.3.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5f9ef048b05c53085cfbd86277a00f18e99c614ce62b2b47ec3d85a76fdccb38", size = 118459, upload-time = "2025-12-07T19:48:10.021Z" }, + { url = "https://files.pythonhosted.org/packages/68/c9/b3b980afed693be43700322976953d3bc87e3fc843102584c493cf6cbce6/geventhttpclient-2.3.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:602de0f6e20e06078f87ca8011d658d80e07873b3c2c1aaa581cac5fc4d0762b", size = 112238, upload-time = "2025-12-07T19:48:10.875Z" }, + { url = "https://files.pythonhosted.org/packages/58/5c/04e46bccb8d4e5880bb0be379479374a6645cab8af9b14c0ccbbbedc68dd/geventhttpclient-2.3.7-cp312-cp312-win32.whl", hash = "sha256:0daa0afff191d52740dbbba62f589a352eedd52d82a83e4944ec97a0337505fa", size = 48371, upload-time = "2025-12-07T19:48:11.802Z" }, + { url = "https://files.pythonhosted.org/packages/4e/c5/8d2e1608644018232c77bf8d1e15525c307417a9cdefa3ed467aa9b39c04/geventhttpclient-2.3.7-cp312-cp312-win_amd64.whl", hash = "sha256:80199b556a6e226283a909a82090ed22408aa0572c8bfaa5d3c90aafa5df0a8b", size = 49008, upload-time = "2025-12-07T19:48:12.653Z" }, ] [[package]] -name = "google-search-results" -version = "2.4.2" +name = "google-auth" +version = "2.45.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "requests" }, + { name = "cachetools" }, + { name = "pyasn1-modules" }, + { name = "rsa" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/00/3c794502a8b892c404b2dea5b3650eb21bfc7069612fbfd15c7f17c1cb0d/google_auth-2.45.0.tar.gz", hash = "sha256:90d3f41b6b72ea72dd9811e765699ee491ab24139f34ebf1ca2b9cc0c38708f3", size = 320708, upload-time = "2025-12-15T22:58:42.889Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/97/451d55e05487a5cd6279a01a7e34921858b16f7dc8aa38a2c684743cd2b3/google_auth-2.45.0-py2.py3-none-any.whl", hash = "sha256:82344e86dc00410ef5382d99be677c6043d72e502b625aa4f4afa0bdacca0f36", size = 233312, upload-time = "2025-12-15T22:58:40.777Z" }, +] + +[[package]] +name = "googleapis-common-protos" +version = "1.72.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/7b/adfd75544c415c487b33061fe7ae526165241c1ea133f9a9125a56b39fd8/googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5", size = 147433, upload-time = "2025-11-06T18:29:24.087Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/77/30/b3a6f6a2e00f8153549c2fa345c58ae1ce8e5f3153c2fe0484d444c3abcb/google_search_results-2.4.2.tar.gz", hash = "sha256:603a30ecae2af8e600b22635757a6df275dad4b934f975e67878ccd640b78245", size = 18818, upload-time = "2023-03-10T11:13:09.953Z" } [[package]] name = "greenlet" -version = "3.2.3" +version = "3.3.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c9/92/bb85bd6e80148a4d2e0c59f7c0c2891029f8fd510183afc7d8d2feeed9b6/greenlet-3.2.3.tar.gz", hash = "sha256:8b0dd8ae4c0d6f5e54ee55ba935eeb3d735a9b58a8a1e5b5cbab64e01a39f365", size = 185752, upload-time = "2025-06-05T16:16:09.955Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c7/e5/40dbda2736893e3e53d25838e0f19a2b417dfc122b9989c91918db30b5d3/greenlet-3.3.0.tar.gz", hash = "sha256:a82bb225a4e9e4d653dd2fb7b8b2d36e4fb25bc0165422a11e48b88e9e6f78fb", size = 190651, upload-time = "2025-12-04T14:49:44.05Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f3/94/ad0d435f7c48debe960c53b8f60fb41c2026b1d0fa4a99a1cb17c3461e09/greenlet-3.2.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:25ad29caed5783d4bd7a85c9251c651696164622494c00802a139c00d639242d", size = 271992, upload-time = "2025-06-05T16:11:23.467Z" }, - { url = "https://files.pythonhosted.org/packages/93/5d/7c27cf4d003d6e77749d299c7c8f5fd50b4f251647b5c2e97e1f20da0ab5/greenlet-3.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:88cd97bf37fe24a6710ec6a3a7799f3f81d9cd33317dcf565ff9950c83f55e0b", size = 638820, upload-time = "2025-06-05T16:38:52.882Z" }, - { url = "https://files.pythonhosted.org/packages/c6/7e/807e1e9be07a125bb4c169144937910bf59b9d2f6d931578e57f0bce0ae2/greenlet-3.2.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:baeedccca94880d2f5666b4fa16fc20ef50ba1ee353ee2d7092b383a243b0b0d", size = 653046, upload-time = "2025-06-05T16:41:36.343Z" }, - { url = "https://files.pythonhosted.org/packages/9d/ab/158c1a4ea1068bdbc78dba5a3de57e4c7aeb4e7fa034320ea94c688bfb61/greenlet-3.2.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:be52af4b6292baecfa0f397f3edb3c6092ce071b499dd6fe292c9ac9f2c8f264", size = 647701, upload-time = "2025-06-05T16:48:19.604Z" }, - { url = "https://files.pythonhosted.org/packages/cc/0d/93729068259b550d6a0288da4ff72b86ed05626eaf1eb7c0d3466a2571de/greenlet-3.2.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0cc73378150b8b78b0c9fe2ce56e166695e67478550769536a6742dca3651688", size = 649747, upload-time = "2025-06-05T16:13:04.628Z" }, - { url = "https://files.pythonhosted.org/packages/f6/f6/c82ac1851c60851302d8581680573245c8fc300253fc1ff741ae74a6c24d/greenlet-3.2.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:706d016a03e78df129f68c4c9b4c4f963f7d73534e48a24f5f5a7101ed13dbbb", size = 605461, upload-time = "2025-06-05T16:12:50.792Z" }, - { url = "https://files.pythonhosted.org/packages/98/82/d022cf25ca39cf1200650fc58c52af32c90f80479c25d1cbf57980ec3065/greenlet-3.2.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:419e60f80709510c343c57b4bb5a339d8767bf9aef9b8ce43f4f143240f88b7c", size = 1121190, upload-time = "2025-06-05T16:36:48.59Z" }, - { url = "https://files.pythonhosted.org/packages/f5/e1/25297f70717abe8104c20ecf7af0a5b82d2f5a980eb1ac79f65654799f9f/greenlet-3.2.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:93d48533fade144203816783373f27a97e4193177ebaaf0fc396db19e5d61163", size = 1149055, upload-time = "2025-06-05T16:12:40.457Z" }, - { url = "https://files.pythonhosted.org/packages/1f/8f/8f9e56c5e82eb2c26e8cde787962e66494312dc8cb261c460e1f3a9c88bc/greenlet-3.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:7454d37c740bb27bdeddfc3f358f26956a07d5220818ceb467a483197d84f849", size = 297817, upload-time = "2025-06-05T16:29:49.244Z" }, - { url = "https://files.pythonhosted.org/packages/b1/cf/f5c0b23309070ae93de75c90d29300751a5aacefc0a3ed1b1d8edb28f08b/greenlet-3.2.3-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:500b8689aa9dd1ab26872a34084503aeddefcb438e2e7317b89b11eaea1901ad", size = 270732, upload-time = "2025-06-05T16:10:08.26Z" }, - { url = "https://files.pythonhosted.org/packages/48/ae/91a957ba60482d3fecf9be49bc3948f341d706b52ddb9d83a70d42abd498/greenlet-3.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a07d3472c2a93117af3b0136f246b2833fdc0b542d4a9799ae5f41c28323faef", size = 639033, upload-time = "2025-06-05T16:38:53.983Z" }, - { url = "https://files.pythonhosted.org/packages/6f/df/20ffa66dd5a7a7beffa6451bdb7400d66251374ab40b99981478c69a67a8/greenlet-3.2.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:8704b3768d2f51150626962f4b9a9e4a17d2e37c8a8d9867bbd9fa4eb938d3b3", size = 652999, upload-time = "2025-06-05T16:41:37.89Z" }, - { url = "https://files.pythonhosted.org/packages/51/b4/ebb2c8cb41e521f1d72bf0465f2f9a2fd803f674a88db228887e6847077e/greenlet-3.2.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5035d77a27b7c62db6cf41cf786cfe2242644a7a337a0e155c80960598baab95", size = 647368, upload-time = "2025-06-05T16:48:21.467Z" }, - { url = "https://files.pythonhosted.org/packages/8e/6a/1e1b5aa10dced4ae876a322155705257748108b7fd2e4fae3f2a091fe81a/greenlet-3.2.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2d8aa5423cd4a396792f6d4580f88bdc6efcb9205891c9d40d20f6e670992efb", size = 650037, upload-time = "2025-06-05T16:13:06.402Z" }, - { url = "https://files.pythonhosted.org/packages/26/f2/ad51331a157c7015c675702e2d5230c243695c788f8f75feba1af32b3617/greenlet-3.2.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2c724620a101f8170065d7dded3f962a2aea7a7dae133a009cada42847e04a7b", size = 608402, upload-time = "2025-06-05T16:12:51.91Z" }, - { url = "https://files.pythonhosted.org/packages/26/bc/862bd2083e6b3aff23300900a956f4ea9a4059de337f5c8734346b9b34fc/greenlet-3.2.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:873abe55f134c48e1f2a6f53f7d1419192a3d1a4e873bace00499a4e45ea6af0", size = 1119577, upload-time = "2025-06-05T16:36:49.787Z" }, - { url = "https://files.pythonhosted.org/packages/86/94/1fc0cc068cfde885170e01de40a619b00eaa8f2916bf3541744730ffb4c3/greenlet-3.2.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:024571bbce5f2c1cfff08bf3fbaa43bbc7444f580ae13b0099e95d0e6e67ed36", size = 1147121, upload-time = "2025-06-05T16:12:42.527Z" }, - { url = "https://files.pythonhosted.org/packages/27/1a/199f9587e8cb08a0658f9c30f3799244307614148ffe8b1e3aa22f324dea/greenlet-3.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:5195fb1e75e592dd04ce79881c8a22becdfa3e6f500e7feb059b1e6fdd54d3e3", size = 297603, upload-time = "2025-06-05T16:20:12.651Z" }, - { url = "https://files.pythonhosted.org/packages/d8/ca/accd7aa5280eb92b70ed9e8f7fd79dc50a2c21d8c73b9a0856f5b564e222/greenlet-3.2.3-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:3d04332dddb10b4a211b68111dabaee2e1a073663d117dc10247b5b1642bac86", size = 271479, upload-time = "2025-06-05T16:10:47.525Z" }, - { url = "https://files.pythonhosted.org/packages/55/71/01ed9895d9eb49223280ecc98a557585edfa56b3d0e965b9fa9f7f06b6d9/greenlet-3.2.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8186162dffde068a465deab08fc72c767196895c39db26ab1c17c0b77a6d8b97", size = 683952, upload-time = "2025-06-05T16:38:55.125Z" }, - { url = "https://files.pythonhosted.org/packages/ea/61/638c4bdf460c3c678a0a1ef4c200f347dff80719597e53b5edb2fb27ab54/greenlet-3.2.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f4bfbaa6096b1b7a200024784217defedf46a07c2eee1a498e94a1b5f8ec5728", size = 696917, upload-time = "2025-06-05T16:41:38.959Z" }, - { url = "https://files.pythonhosted.org/packages/22/cc/0bd1a7eb759d1f3e3cc2d1bc0f0b487ad3cc9f34d74da4b80f226fde4ec3/greenlet-3.2.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:ed6cfa9200484d234d8394c70f5492f144b20d4533f69262d530a1a082f6ee9a", size = 692443, upload-time = "2025-06-05T16:48:23.113Z" }, - { url = "https://files.pythonhosted.org/packages/67/10/b2a4b63d3f08362662e89c103f7fe28894a51ae0bc890fabf37d1d780e52/greenlet-3.2.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:02b0df6f63cd15012bed5401b47829cfd2e97052dc89da3cfaf2c779124eb892", size = 692995, upload-time = "2025-06-05T16:13:07.972Z" }, - { url = "https://files.pythonhosted.org/packages/5a/c6/ad82f148a4e3ce9564056453a71529732baf5448ad53fc323e37efe34f66/greenlet-3.2.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:86c2d68e87107c1792e2e8d5399acec2487a4e993ab76c792408e59394d52141", size = 655320, upload-time = "2025-06-05T16:12:53.453Z" }, - { url = "https://files.pythonhosted.org/packages/5c/4f/aab73ecaa6b3086a4c89863d94cf26fa84cbff63f52ce9bc4342b3087a06/greenlet-3.2.3-cp314-cp314-win_amd64.whl", hash = "sha256:8c47aae8fbbfcf82cc13327ae802ba13c9c36753b67e760023fd116bc124a62a", size = 301236, upload-time = "2025-06-05T16:15:20.111Z" }, + { url = "https://files.pythonhosted.org/packages/f8/0a/a3871375c7b9727edaeeea994bfff7c63ff7804c9829c19309ba2e058807/greenlet-3.3.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:b01548f6e0b9e9784a2c99c5651e5dc89ffcbe870bc5fb2e5ef864e9cc6b5dcb", size = 276379, upload-time = "2025-12-04T14:23:30.498Z" }, + { url = "https://files.pythonhosted.org/packages/43/ab/7ebfe34dce8b87be0d11dae91acbf76f7b8246bf9d6b319c741f99fa59c6/greenlet-3.3.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:349345b770dc88f81506c6861d22a6ccd422207829d2c854ae2af8025af303e3", size = 597294, upload-time = "2025-12-04T14:50:06.847Z" }, + { url = "https://files.pythonhosted.org/packages/a4/39/f1c8da50024feecd0793dbd5e08f526809b8ab5609224a2da40aad3a7641/greenlet-3.3.0-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e8e18ed6995e9e2c0b4ed264d2cf89260ab3ac7e13555b8032b25a74c6d18655", size = 607742, upload-time = "2025-12-04T14:57:42.349Z" }, + { url = "https://files.pythonhosted.org/packages/77/cb/43692bcd5f7a0da6ec0ec6d58ee7cddb606d055ce94a62ac9b1aa481e969/greenlet-3.3.0-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c024b1e5696626890038e34f76140ed1daf858e37496d33f2af57f06189e70d7", size = 622297, upload-time = "2025-12-04T15:07:13.552Z" }, + { url = "https://files.pythonhosted.org/packages/75/b0/6bde0b1011a60782108c01de5913c588cf51a839174538d266de15e4bf4d/greenlet-3.3.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:047ab3df20ede6a57c35c14bf5200fcf04039d50f908270d3f9a7a82064f543b", size = 609885, upload-time = "2025-12-04T14:26:02.368Z" }, + { url = "https://files.pythonhosted.org/packages/49/0e/49b46ac39f931f59f987b7cd9f34bfec8ef81d2a1e6e00682f55be5de9f4/greenlet-3.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2d9ad37fc657b1102ec880e637cccf20191581f75c64087a549e66c57e1ceb53", size = 1567424, upload-time = "2025-12-04T15:04:23.757Z" }, + { url = "https://files.pythonhosted.org/packages/05/f5/49a9ac2dff7f10091935def9165c90236d8f175afb27cbed38fb1d61ab6b/greenlet-3.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:83cd0e36932e0e7f36a64b732a6f60c2fc2df28c351bae79fbaf4f8092fe7614", size = 1636017, upload-time = "2025-12-04T14:27:29.688Z" }, + { url = "https://files.pythonhosted.org/packages/6c/79/3912a94cf27ec503e51ba493692d6db1e3cd8ac7ac52b0b47c8e33d7f4f9/greenlet-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a7a34b13d43a6b78abf828a6d0e87d3385680eaf830cd60d20d52f249faabf39", size = 301964, upload-time = "2025-12-04T14:36:58.316Z" }, ] [[package]] @@ -514,37 +1070,148 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d1/94/16550ad6b3f13b96f0856ee5dfc2554efac28539ee84a51d7b14526da985/grpcio-1.67.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:699e964923b70f3101393710793289e42845791ea07565654ada0969522d0a38", size = 6149369, upload-time = "2024-10-29T06:24:49.112Z" }, { url = "https://files.pythonhosted.org/packages/33/0d/4c3b2587e8ad7f121b597329e6c2620374fccbc2e4e1aa3c73ccc670fde4/grpcio-1.67.1-cp312-cp312-win32.whl", hash = "sha256:4e7b904484a634a0fff132958dabdb10d63e0927398273917da3ee103e8d1f78", size = 3599176, upload-time = "2024-10-29T06:24:51.443Z" }, { url = "https://files.pythonhosted.org/packages/7d/36/0c03e2d80db69e2472cf81c6123aa7d14741de7cf790117291a703ae6ae1/grpcio-1.67.1-cp312-cp312-win_amd64.whl", hash = "sha256:5721e66a594a6c4204458004852719b38f3d5522082be9061d6510b455c90afc", size = 4346574, upload-time = "2024-10-29T06:24:54.587Z" }, - { url = "https://files.pythonhosted.org/packages/12/d2/2f032b7a153c7723ea3dea08bffa4bcaca9e0e5bdf643ce565b76da87461/grpcio-1.67.1-cp313-cp313-linux_armv7l.whl", hash = "sha256:aa0162e56fd10a5547fac8774c4899fc3e18c1aa4a4759d0ce2cd00d3696ea6b", size = 5091487, upload-time = "2024-10-29T06:24:57.416Z" }, - { url = "https://files.pythonhosted.org/packages/d0/ae/ea2ff6bd2475a082eb97db1104a903cf5fc57c88c87c10b3c3f41a184fc0/grpcio-1.67.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:beee96c8c0b1a75d556fe57b92b58b4347c77a65781ee2ac749d550f2a365dc1", size = 10943530, upload-time = "2024-10-29T06:25:01.062Z" }, - { url = "https://files.pythonhosted.org/packages/07/62/646be83d1a78edf8d69b56647327c9afc223e3140a744c59b25fbb279c3b/grpcio-1.67.1-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:a93deda571a1bf94ec1f6fcda2872dad3ae538700d94dc283c672a3b508ba3af", size = 5589079, upload-time = "2024-10-29T06:25:04.254Z" }, - { url = "https://files.pythonhosted.org/packages/d0/25/71513d0a1b2072ce80d7f5909a93596b7ed10348b2ea4fdcbad23f6017bf/grpcio-1.67.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e6f255980afef598a9e64a24efce87b625e3e3c80a45162d111a461a9f92955", size = 6213542, upload-time = "2024-10-29T06:25:06.824Z" }, - { url = "https://files.pythonhosted.org/packages/76/9a/d21236297111052dcb5dc85cd77dc7bf25ba67a0f55ae028b2af19a704bc/grpcio-1.67.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e838cad2176ebd5d4a8bb03955138d6589ce9e2ce5d51c3ada34396dbd2dba8", size = 5850211, upload-time = "2024-10-29T06:25:10.149Z" }, - { url = "https://files.pythonhosted.org/packages/2d/fe/70b1da9037f5055be14f359026c238821b9bcf6ca38a8d760f59a589aacd/grpcio-1.67.1-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:a6703916c43b1d468d0756c8077b12017a9fcb6a1ef13faf49e67d20d7ebda62", size = 6572129, upload-time = "2024-10-29T06:25:12.853Z" }, - { url = "https://files.pythonhosted.org/packages/74/0d/7df509a2cd2a54814598caf2fb759f3e0b93764431ff410f2175a6efb9e4/grpcio-1.67.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:917e8d8994eed1d86b907ba2a61b9f0aef27a2155bca6cbb322430fc7135b7bb", size = 6149819, upload-time = "2024-10-29T06:25:15.803Z" }, - { url = "https://files.pythonhosted.org/packages/0a/08/bc3b0155600898fd10f16b79054e1cca6cb644fa3c250c0fe59385df5e6f/grpcio-1.67.1-cp313-cp313-win32.whl", hash = "sha256:e279330bef1744040db8fc432becc8a727b84f456ab62b744d3fdb83f327e121", size = 3596561, upload-time = "2024-10-29T06:25:19.348Z" }, - { url = "https://files.pythonhosted.org/packages/5a/96/44759eca966720d0f3e1b105c43f8ad4590c97bf8eb3cd489656e9590baa/grpcio-1.67.1-cp313-cp313-win_amd64.whl", hash = "sha256:fa0c739ad8b1996bd24823950e3cb5152ae91fca1c09cc791190bf1627ffefba", size = 4346042, upload-time = "2024-10-29T06:25:21.939Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "hf-xet" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5e/6e/0f11bacf08a67f7fb5ee09740f2ca54163863b07b70d579356e9222ce5d8/hf_xet-1.2.0.tar.gz", hash = "sha256:a8c27070ca547293b6890c4bf389f713f80e8c478631432962bb7f4bc0bd7d7f", size = 506020, upload-time = "2025-10-24T19:04:32.129Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/96/2d/22338486473df5923a9ab7107d375dbef9173c338ebef5098ef593d2b560/hf_xet-1.2.0-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:46740d4ac024a7ca9b22bebf77460ff43332868b661186a8e46c227fdae01848", size = 2866099, upload-time = "2025-10-24T19:04:15.366Z" }, + { url = "https://files.pythonhosted.org/packages/7f/8c/c5becfa53234299bc2210ba314eaaae36c2875e0045809b82e40a9544f0c/hf_xet-1.2.0-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:27df617a076420d8845bea087f59303da8be17ed7ec0cd7ee3b9b9f579dff0e4", size = 2722178, upload-time = "2025-10-24T19:04:13.695Z" }, + { url = "https://files.pythonhosted.org/packages/9a/92/cf3ab0b652b082e66876d08da57fcc6fa2f0e6c70dfbbafbd470bb73eb47/hf_xet-1.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3651fd5bfe0281951b988c0facbe726aa5e347b103a675f49a3fa8144c7968fd", size = 3320214, upload-time = "2025-10-24T19:04:03.596Z" }, + { url = "https://files.pythonhosted.org/packages/46/92/3f7ec4a1b6a65bf45b059b6d4a5d38988f63e193056de2f420137e3c3244/hf_xet-1.2.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d06fa97c8562fb3ee7a378dd9b51e343bc5bc8190254202c9771029152f5e08c", size = 3229054, upload-time = "2025-10-24T19:04:01.949Z" }, + { url = "https://files.pythonhosted.org/packages/0b/dd/7ac658d54b9fb7999a0ccb07ad863b413cbaf5cf172f48ebcd9497ec7263/hf_xet-1.2.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:4c1428c9ae73ec0939410ec73023c4f842927f39db09b063b9482dac5a3bb737", size = 3413812, upload-time = "2025-10-24T19:04:24.585Z" }, + { url = "https://files.pythonhosted.org/packages/92/68/89ac4e5b12a9ff6286a12174c8538a5930e2ed662091dd2572bbe0a18c8a/hf_xet-1.2.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a55558084c16b09b5ed32ab9ed38421e2d87cf3f1f89815764d1177081b99865", size = 3508920, upload-time = "2025-10-24T19:04:26.927Z" }, + { url = "https://files.pythonhosted.org/packages/cb/44/870d44b30e1dcfb6a65932e3e1506c103a8a5aea9103c337e7a53180322c/hf_xet-1.2.0-cp37-abi3-win_amd64.whl", hash = "sha256:e6584a52253f72c9f52f9e549d5895ca7a471608495c4ecaa6cc73dba2b24d69", size = 2905735, upload-time = "2025-10-24T19:04:35.928Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httptools" +version = "0.7.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b5/46/120a669232c7bdedb9d52d4aeae7e6c7dfe151e99dc70802e2fc7a5e1993/httptools-0.7.1.tar.gz", hash = "sha256:abd72556974f8e7c74a259655924a717a2365b236c882c3f6f8a45fe94703ac9", size = 258961, upload-time = "2025-10-10T03:55:08.559Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/53/7f/403e5d787dc4942316e515e949b0c8a013d84078a915910e9f391ba9b3ed/httptools-0.7.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:38e0c83a2ea9746ebbd643bdfb521b9aa4a91703e2cd705c20443405d2fd16a5", size = 206280, upload-time = "2025-10-10T03:54:39.274Z" }, + { url = "https://files.pythonhosted.org/packages/2a/0d/7f3fd28e2ce311ccc998c388dd1c53b18120fda3b70ebb022b135dc9839b/httptools-0.7.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f25bbaf1235e27704f1a7b86cd3304eabc04f569c828101d94a0e605ef7205a5", size = 110004, upload-time = "2025-10-10T03:54:40.403Z" }, + { url = "https://files.pythonhosted.org/packages/84/a6/b3965e1e146ef5762870bbe76117876ceba51a201e18cc31f5703e454596/httptools-0.7.1-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2c15f37ef679ab9ecc06bfc4e6e8628c32a8e4b305459de7cf6785acd57e4d03", size = 517655, upload-time = "2025-10-10T03:54:41.347Z" }, + { url = "https://files.pythonhosted.org/packages/11/7d/71fee6f1844e6fa378f2eddde6c3e41ce3a1fb4b2d81118dd544e3441ec0/httptools-0.7.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7fe6e96090df46b36ccfaf746f03034e5ab723162bc51b0a4cf58305324036f2", size = 511440, upload-time = "2025-10-10T03:54:42.452Z" }, + { url = "https://files.pythonhosted.org/packages/22/a5/079d216712a4f3ffa24af4a0381b108aa9c45b7a5cc6eb141f81726b1823/httptools-0.7.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f72fdbae2dbc6e68b8239defb48e6a5937b12218e6ffc2c7846cc37befa84362", size = 495186, upload-time = "2025-10-10T03:54:43.937Z" }, + { url = "https://files.pythonhosted.org/packages/e9/9e/025ad7b65278745dee3bd0ebf9314934c4592560878308a6121f7f812084/httptools-0.7.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e99c7b90a29fd82fea9ef57943d501a16f3404d7b9ee81799d41639bdaae412c", size = 499192, upload-time = "2025-10-10T03:54:45.003Z" }, + { url = "https://files.pythonhosted.org/packages/6d/de/40a8f202b987d43afc4d54689600ff03ce65680ede2f31df348d7f368b8f/httptools-0.7.1-cp312-cp312-win_amd64.whl", hash = "sha256:3e14f530fefa7499334a79b0cf7e7cd2992870eb893526fb097d51b4f2d0f321", size = 86694, upload-time = "2025-10-10T03:54:45.923Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "httpx-sse" +version = "0.4.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/4c/751061ffa58615a32c31b2d82e8482be8dd4a89154f003147acee90f2be9/httpx_sse-0.4.3.tar.gz", hash = "sha256:9b1ed0127459a66014aec3c56bebd93da3c1bc8bb6618c8082039a44889a755d", size = 15943, upload-time = "2025-10-10T21:48:22.271Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/fd/6668e5aec43ab844de6fc74927e155a3b37bf40d7c3790e49fc0406b6578/httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc", size = 8960, upload-time = "2025-10-10T21:48:21.158Z" }, +] + +[[package]] +name = "huggingface-hub" +version = "1.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "filelock" }, + { name = "fsspec" }, + { name = "hf-xet", marker = "platform_machine == 'AMD64' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'" }, + { name = "httpx" }, + { name = "packaging" }, + { name = "pyyaml" }, + { name = "shellingham" }, + { name = "tqdm" }, + { name = "typer-slim" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/c8/9cd2fcb670ba0e708bfdf95a1177b34ca62de2d3821df0773bc30559af80/huggingface_hub-1.2.3.tar.gz", hash = "sha256:4ba57f17004fd27bb176a6b7107df579865d4cde015112db59184c51f5602ba7", size = 614605, upload-time = "2025-12-12T15:31:42.161Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/df/8d/7ca723a884d55751b70479b8710f06a317296b1fa1c1dec01d0420d13e43/huggingface_hub-1.2.3-py3-none-any.whl", hash = "sha256:c9b7a91a9eedaa2149cdc12bdd8f5a11780e10de1f1024718becf9e41e5a4642", size = 520953, upload-time = "2025-12-12T15:31:40.339Z" }, +] + +[[package]] +name = "humanfriendly" +version = "10.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyreadline3", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cc/3f/2c29224acb2e2df4d2046e4c73ee2662023c58ff5b113c4c1adac0886c43/humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc", size = 360702, upload-time = "2021-09-17T21:40:43.31Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f0/0f/310fb31e39e2d734ccaa2c0fb981ee41f7bd5056ce9bc29b2248bd569169/humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477", size = 86794, upload-time = "2021-09-17T21:40:39.897Z" }, ] [[package]] name = "idna" -version = "3.10" +version = "3.11" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, ] +[[package]] +name = "image" +version = "1.5.33" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "django" }, + { name = "pillow" }, + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/84/be/961693ed384aa91bcc07525c90e3a34bc06c75f131655dfe21310234c933/image-1.5.33.tar.gz", hash = "sha256:baa2e09178277daa50f22fd6d1d51ec78f19c12688921cb9ab5808743f097126", size = 15975, upload-time = "2020-10-27T09:58:36.538Z" } + [[package]] name = "imageio" -version = "2.37.0" +version = "2.37.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy" }, { name = "pillow" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/0c/47/57e897fb7094afb2d26e8b2e4af9a45c7cf1a405acdeeca001fdf2c98501/imageio-2.37.0.tar.gz", hash = "sha256:71b57b3669666272c818497aebba2b4c5f20d5b37c81720e5e1a56d59c492996", size = 389963, upload-time = "2025-01-20T02:42:37.089Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a3/6f/606be632e37bf8d05b253e8626c2291d74c691ddc7bcdf7d6aaf33b32f6a/imageio-2.37.2.tar.gz", hash = "sha256:0212ef2727ac9caa5ca4b2c75ae89454312f440a756fcfc8ef1993e718f50f8a", size = 389600, upload-time = "2025-11-04T14:29:39.898Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cb/bd/b394387b598ed84d8d0fa90611a90bee0adc2021820ad5729f7ced74a8e2/imageio-2.37.0-py3-none-any.whl", hash = "sha256:11efa15b87bc7871b61590326b2d635439acc321cf7f8ce996f812543ce10eed", size = 315796, upload-time = "2025-01-20T02:42:34.931Z" }, + { url = "https://files.pythonhosted.org/packages/fb/fe/301e0936b79bcab4cacc7548bf2853fc28dced0a578bab1f7ef53c9aa75b/imageio-2.37.2-py3-none-any.whl", hash = "sha256:ad9adfb20335d718c03de457358ed69f141021a333c40a53e57273d8a5bd0b9b", size = 317646, upload-time = "2025-11-04T14:29:37.948Z" }, ] [[package]] @@ -561,9 +1228,111 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/2c/c6/fa760e12a2483469e2bf5058c5faff664acf66cadb4df2ad6205b016a73d/imageio_ffmpeg-0.6.0-py3-none-win_amd64.whl", hash = "sha256:02fa47c83703c37df6bfe4896aab339013f62bf02c5ebf2dce6da56af04ffc0a", size = 31246824, upload-time = "2025-01-16T21:34:28.6Z" }, ] +[[package]] +name = "importlib-metadata" +version = "8.7.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/49/3b30cad09e7771a4982d9975a8cbf64f00d4a1ececb53297f1d9a7be1b10/importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb", size = 57107, upload-time = "2025-12-21T10:00:19.278Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/5e/f8e9a1d23b9c20a551a8a02ea3637b4642e22c2626e3a13a9a29cdea99eb/importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151", size = 27865, upload-time = "2025-12-21T10:00:18.329Z" }, +] + +[[package]] +name = "importlib-resources" +version = "6.5.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cf/8c/f834fbf984f691b4f7ff60f50b514cc3de5cc08abfc3295564dd89c5e2e7/importlib_resources-6.5.2.tar.gz", hash = "sha256:185f87adef5bcc288449d98fb4fba07cea78bc036455dd44c5fc4a2fe78fed2c", size = 44693, upload-time = "2025-01-03T18:51:56.698Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/ed/1f1afb2e9e7f38a545d628f864d562a5ae64fe6f7a10e28ffb9b185b4e89/importlib_resources-6.5.2-py3-none-any.whl", hash = "sha256:789cfdc3ed28c78b67a06acb8126751ced69a3d5f79c095a98298cd8a760ccec", size = 37461, upload-time = "2025-01-03T18:51:54.306Z" }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, +] + +[[package]] +name = "jsonpatch" +version = "1.33" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jsonpointer" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/78/18813351fe5d63acad16aec57f94ec2b70a09e53ca98145589e185423873/jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c", size = 21699, upload-time = "2023-06-26T12:07:29.144Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/07/02e16ed01e04a374e644b575638ec7987ae846d25ad97bcc9945a3ee4b0e/jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade", size = 12898, upload-time = "2023-06-16T21:01:28.466Z" }, +] + +[[package]] +name = "jsonpointer" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6a/0a/eebeb1fa92507ea94016a2a790b93c2ae41a7e18778f85471dc54475ed25/jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef", size = 9114, upload-time = "2024-06-10T19:24:42.462Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/71/92/5e77f98553e9e75130c78900d000368476aed74276eb8ae8796f65f00918/jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942", size = 7595, upload-time = "2024-06-10T19:24:40.698Z" }, +] + +[[package]] +name = "jsonschema" +version = "4.25.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855, upload-time = "2025-09-08T01:34:59.186Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, +] + +[[package]] +name = "kiwisolver" +version = "1.4.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5c/3c/85844f1b0feb11ee581ac23fe5fce65cd049a200c1446708cc1b7f922875/kiwisolver-1.4.9.tar.gz", hash = "sha256:c3b22c26c6fd6811b0ae8363b95ca8ce4ea3c202d3d0975b2914310ceb1bcc4d", size = 97564, upload-time = "2025-08-10T21:27:49.279Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/c9/13573a747838aeb1c76e3267620daa054f4152444d1f3d1a2324b78255b5/kiwisolver-1.4.9-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ac5a486ac389dddcc5bef4f365b6ae3ffff2c433324fb38dd35e3fab7c957999", size = 123686, upload-time = "2025-08-10T21:26:10.034Z" }, + { url = "https://files.pythonhosted.org/packages/51/ea/2ecf727927f103ffd1739271ca19c424d0e65ea473fbaeea1c014aea93f6/kiwisolver-1.4.9-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f2ba92255faa7309d06fe44c3a4a97efe1c8d640c2a79a5ef728b685762a6fd2", size = 66460, upload-time = "2025-08-10T21:26:11.083Z" }, + { url = "https://files.pythonhosted.org/packages/5b/5a/51f5464373ce2aeb5194508298a508b6f21d3867f499556263c64c621914/kiwisolver-1.4.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a2899935e724dd1074cb568ce7ac0dce28b2cd6ab539c8e001a8578eb106d14", size = 64952, upload-time = "2025-08-10T21:26:12.058Z" }, + { url = "https://files.pythonhosted.org/packages/70/90/6d240beb0f24b74371762873e9b7f499f1e02166a2d9c5801f4dbf8fa12e/kiwisolver-1.4.9-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f6008a4919fdbc0b0097089f67a1eb55d950ed7e90ce2cc3e640abadd2757a04", size = 1474756, upload-time = "2025-08-10T21:26:13.096Z" }, + { url = "https://files.pythonhosted.org/packages/12/42/f36816eaf465220f683fb711efdd1bbf7a7005a2473d0e4ed421389bd26c/kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:67bb8b474b4181770f926f7b7d2f8c0248cbcb78b660fdd41a47054b28d2a752", size = 1276404, upload-time = "2025-08-10T21:26:14.457Z" }, + { url = "https://files.pythonhosted.org/packages/2e/64/bc2de94800adc830c476dce44e9b40fd0809cddeef1fde9fcf0f73da301f/kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2327a4a30d3ee07d2fbe2e7933e8a37c591663b96ce42a00bc67461a87d7df77", size = 1294410, upload-time = "2025-08-10T21:26:15.73Z" }, + { url = "https://files.pythonhosted.org/packages/5f/42/2dc82330a70aa8e55b6d395b11018045e58d0bb00834502bf11509f79091/kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7a08b491ec91b1d5053ac177afe5290adacf1f0f6307d771ccac5de30592d198", size = 1343631, upload-time = "2025-08-10T21:26:17.045Z" }, + { url = "https://files.pythonhosted.org/packages/22/fd/f4c67a6ed1aab149ec5a8a401c323cee7a1cbe364381bb6c9c0d564e0e20/kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8fc5c867c22b828001b6a38d2eaeb88160bf5783c6cb4a5e440efc981ce286d", size = 2224963, upload-time = "2025-08-10T21:26:18.737Z" }, + { url = "https://files.pythonhosted.org/packages/45/aa/76720bd4cb3713314677d9ec94dcc21ced3f1baf4830adde5bb9b2430a5f/kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:3b3115b2581ea35bb6d1f24a4c90af37e5d9b49dcff267eeed14c3893c5b86ab", size = 2321295, upload-time = "2025-08-10T21:26:20.11Z" }, + { url = "https://files.pythonhosted.org/packages/80/19/d3ec0d9ab711242f56ae0dc2fc5d70e298bb4a1f9dfab44c027668c673a1/kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:858e4c22fb075920b96a291928cb7dea5644e94c0ee4fcd5af7e865655e4ccf2", size = 2487987, upload-time = "2025-08-10T21:26:21.49Z" }, + { url = "https://files.pythonhosted.org/packages/39/e9/61e4813b2c97e86b6fdbd4dd824bf72d28bcd8d4849b8084a357bc0dd64d/kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ed0fecd28cc62c54b262e3736f8bb2512d8dcfdc2bcf08be5f47f96bf405b145", size = 2291817, upload-time = "2025-08-10T21:26:22.812Z" }, + { url = "https://files.pythonhosted.org/packages/a0/41/85d82b0291db7504da3c2defe35c9a8a5c9803a730f297bd823d11d5fb77/kiwisolver-1.4.9-cp312-cp312-win_amd64.whl", hash = "sha256:f68208a520c3d86ea51acf688a3e3002615a7f0238002cccc17affecc86a8a54", size = 73895, upload-time = "2025-08-10T21:26:24.37Z" }, + { url = "https://files.pythonhosted.org/packages/e2/92/5f3068cf15ee5cb624a0c7596e67e2a0bb2adee33f71c379054a491d07da/kiwisolver-1.4.9-cp312-cp312-win_arm64.whl", hash = "sha256:2c1a4f57df73965f3f14df20b80ee29e6a7930a57d2d9e8491a25f676e197c60", size = 64992, upload-time = "2025-08-10T21:26:25.732Z" }, +] + [[package]] name = "kombu" -version = "5.5.4" +version = "5.6.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "amqp" }, @@ -571,90 +1340,465 @@ dependencies = [ { name = "tzdata" }, { name = "vine" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/0f/d3/5ff936d8319ac86b9c409f1501b07c426e6ad41966fedace9ef1b966e23f/kombu-5.5.4.tar.gz", hash = "sha256:886600168275ebeada93b888e831352fe578168342f0d1d5833d88ba0d847363", size = 461992, upload-time = "2025-06-01T10:19:22.281Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b6/a5/607e533ed6c83ae1a696969b8e1c137dfebd5759a2e9682e26ff1b97740b/kombu-5.6.2.tar.gz", hash = "sha256:8060497058066c6f5aed7c26d7cd0d3b574990b09de842a8c5aaed0b92cc5a55", size = 472594, upload-time = "2025-12-29T20:30:07.779Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ef/70/a07dcf4f62598c8ad579df241af55ced65bed76e42e45d3c368a6d82dbc1/kombu-5.5.4-py3-none-any.whl", hash = "sha256:a12ed0557c238897d8e518f1d1fdf84bd1516c5e305af2dacd85c2015115feb8", size = 210034, upload-time = "2025-06-01T10:19:20.436Z" }, + { url = "https://files.pythonhosted.org/packages/fb/0f/834427d8c03ff1d7e867d3db3d176470c64871753252b21b4f4897d1fa45/kombu-5.6.2-py3-none-any.whl", hash = "sha256:efcfc559da324d41d61ca311b0c64965ea35b4c55cc04ee36e55386145dace93", size = 214219, upload-time = "2025-12-29T20:30:05.74Z" }, +] + +[[package]] +name = "kubernetes" +version = "34.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "durationpy" }, + { name = "google-auth" }, + { name = "python-dateutil" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "requests-oauthlib" }, + { name = "six" }, + { name = "urllib3" }, + { name = "websocket-client" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ef/55/3f880ef65f559cbed44a9aa20d3bdbc219a2c3a3bac4a30a513029b03ee9/kubernetes-34.1.0.tar.gz", hash = "sha256:8fe8edb0b5d290a2f3ac06596b23f87c658977d46b5f8df9d0f4ea83d0003912", size = 1083771, upload-time = "2025-09-29T20:23:49.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/ec/65f7d563aa4a62dd58777e8f6aa882f15db53b14eb29aba0c28a20f7eb26/kubernetes-34.1.0-py2.py3-none-any.whl", hash = "sha256:bffba2272534e224e6a7a74d582deb0b545b7c9879d2cd9e4aae9481d1f2cc2a", size = 2008380, upload-time = "2025-09-29T20:23:47.684Z" }, +] + +[[package]] +name = "langchain" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "langgraph" }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/12/3a74c22abdfddd877dfc2ee666d516f9132877fcd25eb4dd694835c59c79/langchain-1.2.0.tar.gz", hash = "sha256:a087d1e2b2969819e29a91a6d5f98302aafe31bd49ba377ecee3bf5a5dcfe14a", size = 536126, upload-time = "2025-12-15T14:51:42.24Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/23/00/4e3fa0d90f5a5c376ccb8ca983d0f0f7287783dfac48702e18f01d24673b/langchain-1.2.0-py3-none-any.whl", hash = "sha256:82f0d17aa4fbb11560b30e1e7d4aeb75e3ad71ce09b85c90ab208b181a24ffac", size = 102828, upload-time = "2025-12-15T14:51:40.802Z" }, +] + +[[package]] +name = "langchain-classic" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "langchain-text-splitters" }, + { name = "langsmith" }, + { name = "pydantic" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "sqlalchemy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7c/4b/bd03518418ece4c13192a504449b58c28afee915dc4a6f4b02622458cb1b/langchain_classic-1.0.1.tar.gz", hash = "sha256:40a499684df36b005a1213735dc7f8dca8f5eb67978d6ec763e7a49780864fdc", size = 10516020, upload-time = "2025-12-23T22:55:22.615Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/0f/eab87f017d7fe28e8c11fff614f4cdbfae32baadb77d0f79e9f922af1df2/langchain_classic-1.0.1-py3-none-any.whl", hash = "sha256:131d83a02bb80044c68fedc1ab4ae885d5b8f8c2c742d8ab9e7534ad9cda8e80", size = 1040666, upload-time = "2025-12-23T22:55:21.025Z" }, +] + +[[package]] +name = "langchain-community" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "dataclasses-json" }, + { name = "httpx-sse" }, + { name = "langchain-classic" }, + { name = "langchain-core" }, + { name = "langsmith" }, + { name = "numpy" }, + { name = "pydantic-settings" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "sqlalchemy" }, + { name = "tenacity" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/53/97/a03585d42b9bdb6fbd935282d6e3348b10322a24e6ce12d0c99eb461d9af/langchain_community-0.4.1.tar.gz", hash = "sha256:f3b211832728ee89f169ddce8579b80a085222ddb4f4ed445a46e977d17b1e85", size = 33241144, upload-time = "2025-10-27T15:20:32.504Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f0/a4/c4fde67f193401512337456cabc2148f2c43316e445f5decd9f8806e2992/langchain_community-0.4.1-py3-none-any.whl", hash = "sha256:2135abb2c7748a35c84613108f7ebf30f8505b18c3c18305ffaecfc7651f6c6a", size = 2533285, upload-time = "2025-10-27T15:20:30.767Z" }, +] + +[[package]] +name = "langchain-core" +version = "1.2.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jsonpatch" }, + { name = "langsmith" }, + { name = "packaging" }, + { name = "pydantic" }, + { name = "pyyaml" }, + { name = "tenacity" }, + { name = "typing-extensions" }, + { name = "uuid-utils" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c8/86/bd678d69341ae4178bc8dfa04024d63636e5d580ff03d4502c8bc2262917/langchain_core-1.2.5.tar.gz", hash = "sha256:d674f6df42f07e846859b9d3afe547cad333d6bf9763e92c88eb4f8aaedcd3cc", size = 820445, upload-time = "2025-12-22T23:45:32.041Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/bd/9df897cbc98290bf71140104ee5b9777cf5291afb80333aa7da5a497339b/langchain_core-1.2.5-py3-none-any.whl", hash = "sha256:3255944ef4e21b2551facb319bfc426057a40247c0a05de5bd6f2fc021fbfa34", size = 484851, upload-time = "2025-12-22T23:45:30.525Z" }, +] + +[[package]] +name = "langchain-text-splitters" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/41/42/c178dcdc157b473330eb7cc30883ea69b8ec60078c7b85e2d521054c4831/langchain_text_splitters-1.1.0.tar.gz", hash = "sha256:75e58acb7585dc9508f3cd9d9809cb14751283226c2d6e21fb3a9ae57582ca22", size = 272230, upload-time = "2025-12-14T01:15:38.659Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d8/1a/a84ed1c046deecf271356b0179c1b9fba95bfdaa6f934e1849dee26fad7b/langchain_text_splitters-1.1.0-py3-none-any.whl", hash = "sha256:f00341fe883358786104a5f881375ac830a4dd40253ecd42b4c10536c6e4693f", size = 34182, upload-time = "2025-12-14T01:15:37.382Z" }, +] + +[[package]] +name = "langgraph" +version = "1.0.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "langgraph-checkpoint" }, + { name = "langgraph-prebuilt" }, + { name = "langgraph-sdk" }, + { name = "pydantic" }, + { name = "xxhash" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7d/47/28f4d4d33d88f69de26f7a54065961ac0c662cec2479b36a2db081ef5cb6/langgraph-1.0.5.tar.gz", hash = "sha256:7f6ae59622386b60fe9fa0ad4c53f42016b668455ed604329e7dc7904adbf3f8", size = 493969, upload-time = "2025-12-12T23:05:48.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/23/1b/e318ee76e42d28f515d87356ac5bd7a7acc8bad3b8f54ee377bef62e1cbf/langgraph-1.0.5-py3-none-any.whl", hash = "sha256:b4cfd173dca3c389735b47228ad8b295e6f7b3df779aba3a1e0c23871f81281e", size = 157056, upload-time = "2025-12-12T23:05:46.499Z" }, +] + +[[package]] +name = "langgraph-checkpoint" +version = "3.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "ormsgpack" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0f/07/2b1c042fa87d40cf2db5ca27dc4e8dd86f9a0436a10aa4361a8982718ae7/langgraph_checkpoint-3.0.1.tar.gz", hash = "sha256:59222f875f85186a22c494aedc65c4e985a3df27e696e5016ba0b98a5ed2cee0", size = 137785, upload-time = "2025-11-04T21:55:47.774Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/e3/616e3a7ff737d98c1bbb5700dd62278914e2a9ded09a79a1fa93cf24ce12/langgraph_checkpoint-3.0.1-py3-none-any.whl", hash = "sha256:9b04a8d0edc0474ce4eaf30c5d731cee38f11ddff50a6177eead95b5c4e4220b", size = 46249, upload-time = "2025-11-04T21:55:46.472Z" }, +] + +[[package]] +name = "langgraph-prebuilt" +version = "1.0.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "langgraph-checkpoint" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/46/f9/54f8891b32159e4542236817aea2ee83de0de18bce28e9bdba08c7f93001/langgraph_prebuilt-1.0.5.tar.gz", hash = "sha256:85802675ad778cc7240fd02d47db1e0b59c0c86d8369447d77ce47623845db2d", size = 144453, upload-time = "2025-11-20T16:47:39.23Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/5e/aeba4a5b39fe6e874e0dd003a82da71c7153e671312671a8dacc5cb7c1af/langgraph_prebuilt-1.0.5-py3-none-any.whl", hash = "sha256:22369563e1848862ace53fbc11b027c28dd04a9ac39314633bb95f2a7e258496", size = 35072, upload-time = "2025-11-20T16:47:38.187Z" }, +] + +[[package]] +name = "langgraph-sdk" +version = "0.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, + { name = "orjson" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a9/d3/b6be0b0aba2a53a8920a2b0b4328a83121ec03eea9952e576d06a4182f6f/langgraph_sdk-0.3.1.tar.gz", hash = "sha256:f6dadfd2444eeff3e01405a9005c95fb3a028d4bd954ebec80ea6150084f92bb", size = 130312, upload-time = "2025-12-18T22:11:47.42Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ab/fe/0c1c9c01a154eba62b20b02fabe811fd94a2b810061ae9e4d8462b8cf85a/langgraph_sdk-0.3.1-py3-none-any.whl", hash = "sha256:0b856923bfd20bf3441ce9d03bef488aa333fb610e972618799a9d584436acad", size = 66517, upload-time = "2025-12-18T22:11:46.625Z" }, +] + +[[package]] +name = "langsmith" +version = "0.5.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, + { name = "orjson", marker = "platform_python_implementation != 'PyPy'" }, + { name = "packaging" }, + { name = "pydantic" }, + { name = "requests" }, + { name = "requests-toolbelt" }, + { name = "uuid-utils" }, + { name = "zstandard" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fb/92/967ba83ec40448f46e23f231731b1564207af5ffba32aecef4e1f2f9f83f/langsmith-0.5.1.tar.gz", hash = "sha256:6a10b38cb4ce58941b7f1dbdf41a461868605dd0162bf05d17690f2e4b6e50e7", size = 871631, upload-time = "2025-12-24T19:50:24.823Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/19/67/1720b01e58d3487a44c780a86aabad95d9eaaf6b2fa8d0718c98f0eca18d/langsmith-0.5.1-py3-none-any.whl", hash = "sha256:70aa2a4c75add3f723c3bbac80dbb8adc575077834d3a733ee1ec133206ff351", size = 275527, upload-time = "2025-12-24T19:50:22.808Z" }, +] + +[[package]] +name = "lazy-loader" +version = "0.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6f/6b/c875b30a1ba490860c93da4cabf479e03f584eba06fe5963f6f6644653d8/lazy_loader-0.4.tar.gz", hash = "sha256:47c75182589b91a4e1a85a136c074285a5ad4d9f39c63e0d7fb76391c4574cd1", size = 15431, upload-time = "2024-04-05T13:03:12.261Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/60/d497a310bde3f01cb805196ac61b7ad6dc5dcf8dce66634dc34364b20b4f/lazy_loader-0.4-py3-none-any.whl", hash = "sha256:342aa8e14d543a154047afb4ba8ef17f5563baad3fc610d7b15b213b0f119efc", size = 12097, upload-time = "2024-04-05T13:03:10.514Z" }, +] + +[[package]] +name = "load" +version = "1.0.14" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "future" }, + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/59/1c/3ef5d3c857d59ad978806529f7784f500e639703bc03d926be96f3777965/load-1.0.14.tar.gz", hash = "sha256:81f8b0fecbe9c0d437c2ef4c39a8f52f5ebb8c906c95d959e5584f2aaccb426a", size = 25094, upload-time = "2025-06-22T10:31:18.457Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/8f/07d5d2845499998982401c24b07c38bedd4e5ab0a67bddcd055ec3276209/load-1.0.14-py2.py3-none-any.whl", hash = "sha256:75999b0d239ef95110f0f4f8a66dc860ad70f71d833939c975ecaf7c4d221f07", size = 24483, upload-time = "2025-06-22T10:31:16.811Z" }, +] + +[[package]] +name = "load-dotenv" +version = "0.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "python-dotenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d7/95/055d123dde74bd54b99b27a3c2d8c6e218f544e0bd437480f61ccf85a07f/load-dotenv-0.1.0.tar.gz", hash = "sha256:bbe5f40072d4a61eadca66de6c222df5a2d935d6d41b703be1ff75396f635145", size = 6404, upload-time = "2022-09-02T19:55:01.349Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/2e/268a528dfa1f76999435634b0453fdd917ea878a818a6d5981632f7acffb/load_dotenv-0.1.0-py3-none-any.whl", hash = "sha256:614803f720153fb8a5f97124a72aaa3930a67aa5547cbe8603d01ea7f9ac1adf", size = 7175, upload-time = "2022-09-02T19:54:59.85Z" }, +] + +[[package]] +name = "loguru" +version = "0.7.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "win32-setctime", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3a/05/a1dae3dffd1116099471c643b8924f5aa6524411dc6c63fdae648c4f1aca/loguru-0.7.3.tar.gz", hash = "sha256:19480589e77d47b8d85b2c827ad95d49bf31b0dcde16593892eb51dd18706eb6", size = 63559, upload-time = "2024-12-06T11:20:56.608Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/29/0348de65b8cc732daa3e33e67806420b2ae89bdce2b04af740289c5c6c8c/loguru-0.7.3-py3-none-any.whl", hash = "sha256:31a33c10c8e1e10422bfd431aeb5d351c7cf7fa671e3c4df004162264b28220c", size = 61595, upload-time = "2024-12-06T11:20:54.538Z" }, +] + +[[package]] +name = "markdown-it-py" +version = "4.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/f5/4ec618ed16cc4f8fb3b701563655a69816155e79e24a17b651541804721d/markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3", size = 73070, upload-time = "2025-08-11T12:57:52.854Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" }, +] + +[[package]] +name = "markupsafe" +version = "3.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7e/99/7690b6d4034fffd95959cbe0c02de8deb3098cc577c67bb6a24fe5d7caa7/markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698", size = 80313, upload-time = "2025-09-27T18:37:40.426Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/72/147da192e38635ada20e0a2e1a51cf8823d2119ce8883f7053879c2199b5/markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e", size = 11615, upload-time = "2025-09-27T18:36:30.854Z" }, + { url = "https://files.pythonhosted.org/packages/9a/81/7e4e08678a1f98521201c3079f77db69fb552acd56067661f8c2f534a718/markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce", size = 12020, upload-time = "2025-09-27T18:36:31.971Z" }, + { url = "https://files.pythonhosted.org/packages/1e/2c/799f4742efc39633a1b54a92eec4082e4f815314869865d876824c257c1e/markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d", size = 24332, upload-time = "2025-09-27T18:36:32.813Z" }, + { url = "https://files.pythonhosted.org/packages/3c/2e/8d0c2ab90a8c1d9a24f0399058ab8519a3279d1bd4289511d74e909f060e/markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d", size = 22947, upload-time = "2025-09-27T18:36:33.86Z" }, + { url = "https://files.pythonhosted.org/packages/2c/54/887f3092a85238093a0b2154bd629c89444f395618842e8b0c41783898ea/markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a", size = 21962, upload-time = "2025-09-27T18:36:35.099Z" }, + { url = "https://files.pythonhosted.org/packages/c9/2f/336b8c7b6f4a4d95e91119dc8521402461b74a485558d8f238a68312f11c/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b", size = 23760, upload-time = "2025-09-27T18:36:36.001Z" }, + { url = "https://files.pythonhosted.org/packages/32/43/67935f2b7e4982ffb50a4d169b724d74b62a3964bc1a9a527f5ac4f1ee2b/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f", size = 21529, upload-time = "2025-09-27T18:36:36.906Z" }, + { url = "https://files.pythonhosted.org/packages/89/e0/4486f11e51bbba8b0c041098859e869e304d1c261e59244baa3d295d47b7/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b", size = 23015, upload-time = "2025-09-27T18:36:37.868Z" }, + { url = "https://files.pythonhosted.org/packages/2f/e1/78ee7a023dac597a5825441ebd17170785a9dab23de95d2c7508ade94e0e/markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d", size = 14540, upload-time = "2025-09-27T18:36:38.761Z" }, + { url = "https://files.pythonhosted.org/packages/aa/5b/bec5aa9bbbb2c946ca2733ef9c4ca91c91b6a24580193e891b5f7dbe8e1e/markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c", size = 15105, upload-time = "2025-09-27T18:36:39.701Z" }, + { url = "https://files.pythonhosted.org/packages/e5/f1/216fc1bbfd74011693a4fd837e7026152e89c4bcf3e77b6692fba9923123/markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f", size = 13906, upload-time = "2025-09-27T18:36:40.689Z" }, +] + +[[package]] +name = "marshmallow" +version = "3.26.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/79/de6c16cc902f4fc372236926b0ce2ab7845268dcc30fb2fbb7f71b418631/marshmallow-3.26.2.tar.gz", hash = "sha256:bbe2adb5a03e6e3571b573f42527c6fe926e17467833660bebd11593ab8dfd57", size = 222095, upload-time = "2025-12-22T06:53:53.309Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/2f/5108cb3ee4ba6501748c4908b908e55f42a5b66245b4cfe0c99326e1ef6e/marshmallow-3.26.2-py3-none-any.whl", hash = "sha256:013fa8a3c4c276c24d26d84ce934dc964e2aa794345a0f8c7e5a7191482c8a73", size = 50964, upload-time = "2025-12-22T06:53:51.801Z" }, +] + +[[package]] +name = "matplotlib" +version = "3.10.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "contourpy" }, + { name = "cycler" }, + { name = "fonttools" }, + { name = "kiwisolver" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pillow" }, + { name = "pyparsing" }, + { name = "python-dateutil" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8a/76/d3c6e3a13fe484ebe7718d14e269c9569c4eb0020a968a327acb3b9a8fe6/matplotlib-3.10.8.tar.gz", hash = "sha256:2299372c19d56bcd35cf05a2738308758d32b9eaed2371898d8f5bd33f084aa3", size = 34806269, upload-time = "2025-12-10T22:56:51.155Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/67/f997cdcbb514012eb0d10cd2b4b332667997fb5ebe26b8d41d04962fa0e6/matplotlib-3.10.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:64fcc24778ca0404ce0cb7b6b77ae1f4c7231cdd60e6778f999ee05cbd581b9a", size = 8260453, upload-time = "2025-12-10T22:55:30.709Z" }, + { url = "https://files.pythonhosted.org/packages/7e/65/07d5f5c7f7c994f12c768708bd2e17a4f01a2b0f44a1c9eccad872433e2e/matplotlib-3.10.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b9a5ca4ac220a0cdd1ba6bcba3608547117d30468fefce49bb26f55c1a3d5c58", size = 8148321, upload-time = "2025-12-10T22:55:33.265Z" }, + { url = "https://files.pythonhosted.org/packages/3e/f3/c5195b1ae57ef85339fd7285dfb603b22c8b4e79114bae5f4f0fcf688677/matplotlib-3.10.8-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3ab4aabc72de4ff77b3ec33a6d78a68227bf1123465887f9905ba79184a1cc04", size = 8716944, upload-time = "2025-12-10T22:55:34.922Z" }, + { url = "https://files.pythonhosted.org/packages/00/f9/7638f5cc82ec8a7aa005de48622eecc3ed7c9854b96ba15bd76b7fd27574/matplotlib-3.10.8-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:24d50994d8c5816ddc35411e50a86ab05f575e2530c02752e02538122613371f", size = 9550099, upload-time = "2025-12-10T22:55:36.789Z" }, + { url = "https://files.pythonhosted.org/packages/57/61/78cd5920d35b29fd2a0fe894de8adf672ff52939d2e9b43cb83cd5ce1bc7/matplotlib-3.10.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:99eefd13c0dc3b3c1b4d561c1169e65fe47aab7b8158754d7c084088e2329466", size = 9613040, upload-time = "2025-12-10T22:55:38.715Z" }, + { url = "https://files.pythonhosted.org/packages/30/4e/c10f171b6e2f44d9e3a2b96efa38b1677439d79c99357600a62cc1e9594e/matplotlib-3.10.8-cp312-cp312-win_amd64.whl", hash = "sha256:dd80ecb295460a5d9d260df63c43f4afbdd832d725a531f008dad1664f458adf", size = 8142717, upload-time = "2025-12-10T22:55:41.103Z" }, + { url = "https://files.pythonhosted.org/packages/f1/76/934db220026b5fef85f45d51a738b91dea7d70207581063cd9bd8fafcf74/matplotlib-3.10.8-cp312-cp312-win_arm64.whl", hash = "sha256:3c624e43ed56313651bc18a47f838b60d7b8032ed348911c54906b130b20071b", size = 8012751, upload-time = "2025-12-10T22:55:42.684Z" }, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, +] + +[[package]] +name = "minio" +version = "7.2.20" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "argon2-cffi" }, + { name = "certifi" }, + { name = "pycryptodome" }, + { name = "typing-extensions" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/40/df/6dfc6540f96a74125a11653cce717603fd5b7d0001a8e847b3e54e72d238/minio-7.2.20.tar.gz", hash = "sha256:95898b7a023fbbfde375985aa77e2cd6a0762268db79cf886f002a9ea8e68598", size = 136113, upload-time = "2025-11-27T00:37:15.569Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3e/9a/b697530a882588a84db616580f2ba5d1d515c815e11c30d219145afeec87/minio-7.2.20-py3-none-any.whl", hash = "sha256:eb33dd2fb80e04c3726a76b13241c6be3c4c46f8d81e1d58e757786f6501897e", size = 93751, upload-time = "2025-11-27T00:37:13.993Z" }, +] + +[[package]] +name = "mmcv" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "addict" }, + { name = "mmengine" }, + { name = "numpy" }, + { name = "opencv-python" }, + { name = "packaging" }, + { name = "pillow" }, + { name = "pyyaml" }, + { name = "regex", marker = "sys_platform == 'win32'" }, + { name = "yapf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/a2/57a733e7e84985a8a0e3101dfb8170fc9db92435c16afad253069ae3f9df/mmcv-2.2.0.tar.gz", hash = "sha256:ac479247e808d8802f89eadf04d4118de86bdfe81361ec5aed0cc1bf731c67c9", size = 479121, upload-time = "2024-04-24T14:24:28.064Z" } + +[[package]] +name = "mmengine" +version = "0.10.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "addict" }, + { name = "matplotlib" }, + { name = "numpy" }, + { name = "opencv-python" }, + { name = "pyyaml" }, + { name = "regex", marker = "sys_platform == 'win32'" }, + { name = "rich" }, + { name = "termcolor" }, + { name = "yapf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/17/14/959360bbd8374e23fc1b720906999add16a3ac071a501636db12c5861ff5/mmengine-0.10.7.tar.gz", hash = "sha256:d20ffcc31127567e53dceff132612a87f0081de06cbb7ab2bdb7439125a69225", size = 378090, upload-time = "2025-03-04T12:23:09.568Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/98/8e/f98332248aad102511bea4ae19c0ddacd2f0a994f3ca4c82b7a369e0af8b/mmengine-0.10.7-py3-none-any.whl", hash = "sha256:262ac976a925562f78cd5fd14dd1bc9b680ed0aa81f0d85b723ef782f99c54ee", size = 452720, upload-time = "2025-03-04T12:23:06.339Z" }, +] + +[[package]] +name = "mmh3" +version = "5.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a7/af/f28c2c2f51f31abb4725f9a64bc7863d5f491f6539bd26aee2a1d21a649e/mmh3-5.2.0.tar.gz", hash = "sha256:1efc8fec8478e9243a78bb993422cf79f8ff85cb4cf6b79647480a31e0d950a8", size = 33582, upload-time = "2025-07-29T07:43:48.49Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/6a/d5aa7edb5c08e0bd24286c7d08341a0446f9a2fbbb97d96a8a6dd81935ee/mmh3-5.2.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:384eda9361a7bf83a85e09447e1feafe081034af9dd428893701b959230d84be", size = 56141, upload-time = "2025-07-29T07:42:13.456Z" }, + { url = "https://files.pythonhosted.org/packages/08/49/131d0fae6447bc4a7299ebdb1a6fb9d08c9f8dcf97d75ea93e8152ddf7ab/mmh3-5.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2c9da0d568569cc87315cb063486d761e38458b8ad513fedd3dc9263e1b81bcd", size = 40681, upload-time = "2025-07-29T07:42:14.306Z" }, + { url = "https://files.pythonhosted.org/packages/8f/6f/9221445a6bcc962b7f5ff3ba18ad55bba624bacdc7aa3fc0a518db7da8ec/mmh3-5.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:86d1be5d63232e6eb93c50881aea55ff06eb86d8e08f9b5417c8c9b10db9db96", size = 40062, upload-time = "2025-07-29T07:42:15.08Z" }, + { url = "https://files.pythonhosted.org/packages/1e/d4/6bb2d0fef81401e0bb4c297d1eb568b767de4ce6fc00890bc14d7b51ecc4/mmh3-5.2.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:bf7bee43e17e81671c447e9c83499f53d99bf440bc6d9dc26a841e21acfbe094", size = 97333, upload-time = "2025-07-29T07:42:16.436Z" }, + { url = "https://files.pythonhosted.org/packages/44/e0/ccf0daff8134efbb4fbc10a945ab53302e358c4b016ada9bf97a6bdd50c1/mmh3-5.2.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7aa18cdb58983ee660c9c400b46272e14fa253c675ed963d3812487f8ca42037", size = 103310, upload-time = "2025-07-29T07:42:17.796Z" }, + { url = "https://files.pythonhosted.org/packages/02/63/1965cb08a46533faca0e420e06aff8bbaf9690a6f0ac6ae6e5b2e4544687/mmh3-5.2.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ae9d032488fcec32d22be6542d1a836f00247f40f320844dbb361393b5b22773", size = 106178, upload-time = "2025-07-29T07:42:19.281Z" }, + { url = "https://files.pythonhosted.org/packages/c2/41/c883ad8e2c234013f27f92061200afc11554ea55edd1bcf5e1accd803a85/mmh3-5.2.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e1861fb6b1d0453ed7293200139c0a9011eeb1376632e048e3766945b13313c5", size = 113035, upload-time = "2025-07-29T07:42:20.356Z" }, + { url = "https://files.pythonhosted.org/packages/df/b5/1ccade8b1fa625d634a18bab7bf08a87457e09d5ec8cf83ca07cbea9d400/mmh3-5.2.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:99bb6a4d809aa4e528ddfe2c85dd5239b78b9dd14be62cca0329db78505e7b50", size = 120784, upload-time = "2025-07-29T07:42:21.377Z" }, + { url = "https://files.pythonhosted.org/packages/77/1c/919d9171fcbdcdab242e06394464ccf546f7d0f3b31e0d1e3a630398782e/mmh3-5.2.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1f8d8b627799f4e2fcc7c034fed8f5f24dc7724ff52f69838a3d6d15f1ad4765", size = 99137, upload-time = "2025-07-29T07:42:22.344Z" }, + { url = "https://files.pythonhosted.org/packages/66/8a/1eebef5bd6633d36281d9fc83cf2e9ba1ba0e1a77dff92aacab83001cee4/mmh3-5.2.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b5995088dd7023d2d9f310a0c67de5a2b2e06a570ecfd00f9ff4ab94a67cde43", size = 98664, upload-time = "2025-07-29T07:42:23.269Z" }, + { url = "https://files.pythonhosted.org/packages/13/41/a5d981563e2ee682b21fb65e29cc0f517a6734a02b581359edd67f9d0360/mmh3-5.2.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:1a5f4d2e59d6bba8ef01b013c472741835ad961e7c28f50c82b27c57748744a4", size = 106459, upload-time = "2025-07-29T07:42:24.238Z" }, + { url = "https://files.pythonhosted.org/packages/24/31/342494cd6ab792d81e083680875a2c50fa0c5df475ebf0b67784f13e4647/mmh3-5.2.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:fd6e6c3d90660d085f7e73710eab6f5545d4854b81b0135a3526e797009dbda3", size = 110038, upload-time = "2025-07-29T07:42:25.629Z" }, + { url = "https://files.pythonhosted.org/packages/28/44/efda282170a46bb4f19c3e2b90536513b1d821c414c28469a227ca5a1789/mmh3-5.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c4a2f3d83879e3de2eb8cbf562e71563a8ed15ee9b9c2e77ca5d9f73072ac15c", size = 97545, upload-time = "2025-07-29T07:42:27.04Z" }, + { url = "https://files.pythonhosted.org/packages/68/8f/534ae319c6e05d714f437e7206f78c17e66daca88164dff70286b0e8ea0c/mmh3-5.2.0-cp312-cp312-win32.whl", hash = "sha256:2421b9d665a0b1ad724ec7332fb5a98d075f50bc51a6ff854f3a1882bd650d49", size = 40805, upload-time = "2025-07-29T07:42:28.032Z" }, + { url = "https://files.pythonhosted.org/packages/b8/f6/f6abdcfefcedab3c964868048cfe472764ed358c2bf6819a70dd4ed4ed3a/mmh3-5.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:72d80005b7634a3a2220f81fbeb94775ebd12794623bb2e1451701ea732b4aa3", size = 41597, upload-time = "2025-07-29T07:42:28.894Z" }, + { url = "https://files.pythonhosted.org/packages/15/fd/f7420e8cbce45c259c770cac5718badf907b302d3a99ec587ba5ce030237/mmh3-5.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:3d6bfd9662a20c054bc216f861fa330c2dac7c81e7fb8307b5e32ab5b9b4d2e0", size = 39350, upload-time = "2025-07-29T07:42:29.794Z" }, ] [[package]] name = "moviepy" -version = "2.2.1" +version = "1.0.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "decorator" }, { name = "imageio" }, { name = "imageio-ffmpeg" }, { name = "numpy" }, - { name = "pillow" }, { name = "proglog" }, - { name = "python-dotenv" }, + { name = "requests" }, + { name = "tqdm" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/de/61/15f9476e270f64c78a834e7459ca045d669f869cec24eed26807b8cd479d/moviepy-2.2.1.tar.gz", hash = "sha256:c80cb56815ece94e5e3e2d361aa40070eeb30a09d23a24c4e684d03e16deacb1", size = 58431438, upload-time = "2025-05-21T19:31:52.601Z" } +sdist = { url = "https://files.pythonhosted.org/packages/18/54/01a8c4e35c75ca9724d19a7e4de9dc23f0ceb8769102c7de056113af61c3/moviepy-1.0.3.tar.gz", hash = "sha256:2884e35d1788077db3ff89e763c5ba7bfddbd7ae9108c9bc809e7ba58fa433f5", size = 388311, upload-time = "2020-05-07T16:27:46.856Z" } + +[[package]] +name = "mpmath" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106, upload-time = "2023-03-07T16:47:11.061Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9a/73/7d3b2010baa0b5eb1e4dfa9e4385e89b6716be76f2fa21a6c0fe34b68e5a/moviepy-2.2.1-py3-none-any.whl", hash = "sha256:6b56803fec2ac54b557404126ac1160e65448e03798fa282bd23e8fab3795060", size = 129871, upload-time = "2025-05-21T19:31:50.11Z" }, + { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198, upload-time = "2023-03-07T16:47:09.197Z" }, ] [[package]] name = "multidict" -version = "6.5.0" +version = "6.7.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/46/b5/59f27b4ce9951a4bce56b88ba5ff5159486797ab18863f2b4c1c5e8465bd/multidict-6.5.0.tar.gz", hash = "sha256:942bd8002492ba819426a8d7aefde3189c1b87099cdf18aaaefefcf7f3f7b6d2", size = 98512, upload-time = "2025-06-17T14:15:56.556Z" } +sdist = { url = "https://files.pythonhosted.org/packages/80/1e/5492c365f222f907de1039b91f922b93fa4f764c713ee858d235495d8f50/multidict-6.7.0.tar.gz", hash = "sha256:c6e99d9a65ca282e578dfea819cfa9c0a62b2499d8677392e09feaf305e9e6f5", size = 101834, upload-time = "2025-10-06T14:52:30.657Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/0a/fa/18f4950e00924f7e84c8195f4fc303295e14df23f713d64e778b8fa8b903/multidict-6.5.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1bb986c8ea9d49947bc325c51eced1ada6d8d9b4c5b15fd3fcdc3c93edef5a74", size = 73474, upload-time = "2025-06-17T14:14:13.528Z" }, - { url = "https://files.pythonhosted.org/packages/6c/66/0392a2a8948bccff57e4793c9dde3e5c088f01e8b7f8867ee58a2f187fc5/multidict-6.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:03c0923da300120830fc467e23805d63bbb4e98b94032bd863bc7797ea5fa653", size = 43741, upload-time = "2025-06-17T14:14:15.188Z" }, - { url = "https://files.pythonhosted.org/packages/98/3e/f48487c91b2a070566cfbab876d7e1ebe7deb0a8002e4e896a97998ae066/multidict-6.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4c78d5ec00fdd35c91680ab5cf58368faad4bd1a8721f87127326270248de9bc", size = 42143, upload-time = "2025-06-17T14:14:16.612Z" }, - { url = "https://files.pythonhosted.org/packages/3f/49/439c6cc1cd00365cf561bdd3579cc3fa1a0d38effb3a59b8d9562839197f/multidict-6.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aadc3cb78be90a887f8f6b73945b840da44b4a483d1c9750459ae69687940c97", size = 239303, upload-time = "2025-06-17T14:14:17.707Z" }, - { url = "https://files.pythonhosted.org/packages/c4/24/491786269e90081cb536e4d7429508725bc92ece176d1204a4449de7c41c/multidict-6.5.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:5b02e1ca495d71e07e652e4cef91adae3bf7ae4493507a263f56e617de65dafc", size = 236913, upload-time = "2025-06-17T14:14:18.981Z" }, - { url = "https://files.pythonhosted.org/packages/e8/76/bbe2558b820ebeca8a317ab034541790e8160ca4b1e450415383ac69b339/multidict-6.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7fe92a62326eef351668eec4e2dfc494927764a0840a1895cff16707fceffcd3", size = 250752, upload-time = "2025-06-17T14:14:20.297Z" }, - { url = "https://files.pythonhosted.org/packages/3e/e3/3977f2c1123f553ceff9f53cd4de04be2c1912333c6fabbcd51531655476/multidict-6.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7673ee4f63879ecd526488deb1989041abcb101b2d30a9165e1e90c489f3f7fb", size = 243937, upload-time = "2025-06-17T14:14:21.935Z" }, - { url = "https://files.pythonhosted.org/packages/b6/b8/7a6e9c13c79709cdd2f22ee849f058e6da76892d141a67acc0e6c30d845c/multidict-6.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa097ae2a29f573de7e2d86620cbdda5676d27772d4ed2669cfa9961a0d73955", size = 237419, upload-time = "2025-06-17T14:14:23.215Z" }, - { url = "https://files.pythonhosted.org/packages/84/9d/8557f5e88da71bc7e7a8ace1ada4c28197f3bfdc2dd6e51d3b88f2e16e8e/multidict-6.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:300da0fa4f8457d9c4bd579695496116563409e676ac79b5e4dca18e49d1c308", size = 237222, upload-time = "2025-06-17T14:14:24.516Z" }, - { url = "https://files.pythonhosted.org/packages/a3/3b/8f023ad60e7969cb6bc0683738d0e1618f5ff5723d6d2d7818dc6df6ad3d/multidict-6.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9a19bd108c35877b57393243d392d024cfbfdefe759fd137abb98f6fc910b64c", size = 247861, upload-time = "2025-06-17T14:14:25.839Z" }, - { url = "https://files.pythonhosted.org/packages/af/1c/9cf5a099ce7e3189906cf5daa72c44ee962dcb4c1983659f3a6f8a7446ab/multidict-6.5.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:0f32a1777465a35c35ddbbd7fc1293077938a69402fcc59e40b2846d04a120dd", size = 243917, upload-time = "2025-06-17T14:14:27.164Z" }, - { url = "https://files.pythonhosted.org/packages/6c/bb/88ee66ebeef56868044bac58feb1cc25658bff27b20e3cfc464edc181287/multidict-6.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9cc1e10c14ce8112d1e6d8971fe3cdbe13e314f68bea0e727429249d4a6ce164", size = 249214, upload-time = "2025-06-17T14:14:28.795Z" }, - { url = "https://files.pythonhosted.org/packages/3e/ec/a90e88cc4a1309f33088ab1cdd5c0487718f49dfb82c5ffc845bb17c1973/multidict-6.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:e95c5e07a06594bdc288117ca90e89156aee8cb2d7c330b920d9c3dd19c05414", size = 258682, upload-time = "2025-06-17T14:14:30.066Z" }, - { url = "https://files.pythonhosted.org/packages/d2/d8/16dd69a6811920a31f4e06114ebe67b1cd922c8b05c9c82b050706d0b6fe/multidict-6.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:40ff26f58323795f5cd2855e2718a1720a1123fb90df4553426f0efd76135462", size = 254254, upload-time = "2025-06-17T14:14:31.323Z" }, - { url = "https://files.pythonhosted.org/packages/ac/a8/90193a5f5ca1bdbf92633d69a25a2ef9bcac7b412b8d48c84d01a2732518/multidict-6.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:76803a29fd71869a8b59c2118c9dcfb3b8f9c8723e2cce6baeb20705459505cf", size = 247741, upload-time = "2025-06-17T14:14:32.717Z" }, - { url = "https://files.pythonhosted.org/packages/cd/43/29c7a747153c05b41d1f67455426af39ed88d6de3f21c232b8f2724bde13/multidict-6.5.0-cp312-cp312-win32.whl", hash = "sha256:df7ecbc65a53a2ce1b3a0c82e6ad1a43dcfe7c6137733f9176a92516b9f5b851", size = 41049, upload-time = "2025-06-17T14:14:33.941Z" }, - { url = "https://files.pythonhosted.org/packages/1e/e8/8f3fc32b7e901f3a2719764d64aeaf6ae77b4ba961f1c3a3cf3867766636/multidict-6.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:0ec1c3fbbb0b655a6540bce408f48b9a7474fd94ed657dcd2e890671fefa7743", size = 44700, upload-time = "2025-06-17T14:14:35.016Z" }, - { url = "https://files.pythonhosted.org/packages/24/e4/e250806adc98d524d41e69c8d4a42bc3513464adb88cb96224df12928617/multidict-6.5.0-cp312-cp312-win_arm64.whl", hash = "sha256:2d24a00d34808b22c1f15902899b9d82d0faeca9f56281641c791d8605eacd35", size = 41703, upload-time = "2025-06-17T14:14:36.168Z" }, - { url = "https://files.pythonhosted.org/packages/1a/c9/092c4e9402b6d16de761cff88cb842a5c8cc50ccecaf9c4481ba53264b9e/multidict-6.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:53d92df1752df67a928fa7f884aa51edae6f1cf00eeb38cbcf318cf841c17456", size = 73486, upload-time = "2025-06-17T14:14:37.238Z" }, - { url = "https://files.pythonhosted.org/packages/08/f9/6f7ddb8213f5fdf4db48d1d640b78e8aef89b63a5de8a2313286db709250/multidict-6.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:680210de2c38eef17ce46b8df8bf2c1ece489261a14a6e43c997d49843a27c99", size = 43745, upload-time = "2025-06-17T14:14:38.32Z" }, - { url = "https://files.pythonhosted.org/packages/f3/a7/b9be0163bfeee3bb08a77a1705e24eb7e651d594ea554107fac8a1ca6a4d/multidict-6.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e279259bcb936732bfa1a8eec82b5d2352b3df69d2fa90d25808cfc403cee90a", size = 42135, upload-time = "2025-06-17T14:14:39.897Z" }, - { url = "https://files.pythonhosted.org/packages/8e/30/93c8203f943a417bda3c573a34d5db0cf733afdfffb0ca78545c7716dbd8/multidict-6.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1c185fc1069781e3fc8b622c4331fb3b433979850392daa5efbb97f7f9959bb", size = 238585, upload-time = "2025-06-17T14:14:41.332Z" }, - { url = "https://files.pythonhosted.org/packages/9d/fe/2582b56a1807604774f566eeef183b0d6b148f4b89d1612cd077567b2e1e/multidict-6.5.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6bb5f65ff91daf19ce97f48f63585e51595539a8a523258b34f7cef2ec7e0617", size = 236174, upload-time = "2025-06-17T14:14:42.602Z" }, - { url = "https://files.pythonhosted.org/packages/9b/c4/d8b66d42d385bd4f974cbd1eaa8b265e6b8d297249009f312081d5ded5c7/multidict-6.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d8646b4259450c59b9286db280dd57745897897284f6308edbdf437166d93855", size = 250145, upload-time = "2025-06-17T14:14:43.944Z" }, - { url = "https://files.pythonhosted.org/packages/bc/64/62feda5093ee852426aae3df86fab079f8bf1cdbe403e1078c94672ad3ec/multidict-6.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d245973d4ecc04eea0a8e5ebec7882cf515480036e1b48e65dffcfbdf86d00be", size = 243470, upload-time = "2025-06-17T14:14:45.343Z" }, - { url = "https://files.pythonhosted.org/packages/67/dc/9f6fa6e854625cf289c0e9f4464b40212a01f76b2f3edfe89b6779b4fb93/multidict-6.5.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a133e7ddc9bc7fb053733d0ff697ce78c7bf39b5aec4ac12857b6116324c8d75", size = 236968, upload-time = "2025-06-17T14:14:46.609Z" }, - { url = "https://files.pythonhosted.org/packages/46/ae/4b81c6e3745faee81a156f3f87402315bdccf04236f75c03e37be19c94ff/multidict-6.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80d696fa38d738fcebfd53eec4d2e3aeb86a67679fd5e53c325756682f152826", size = 236575, upload-time = "2025-06-17T14:14:47.929Z" }, - { url = "https://files.pythonhosted.org/packages/8a/fa/4089d7642ea344226e1bfab60dd588761d4791754f8072e911836a39bedf/multidict-6.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:20d30c9410ac3908abbaa52ee5967a754c62142043cf2ba091e39681bd51d21a", size = 247632, upload-time = "2025-06-17T14:14:49.525Z" }, - { url = "https://files.pythonhosted.org/packages/16/ee/a353dac797de0f28fb7f078cc181c5f2eefe8dd16aa11a7100cbdc234037/multidict-6.5.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:6c65068cc026f217e815fa519d8e959a7188e94ec163ffa029c94ca3ef9d4a73", size = 243520, upload-time = "2025-06-17T14:14:50.83Z" }, - { url = "https://files.pythonhosted.org/packages/50/ec/560deb3d2d95822d6eb1bcb1f1cb728f8f0197ec25be7c936d5d6a5d133c/multidict-6.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:e355ac668a8c3e49c2ca8daa4c92f0ad5b705d26da3d5af6f7d971e46c096da7", size = 248551, upload-time = "2025-06-17T14:14:52.229Z" }, - { url = "https://files.pythonhosted.org/packages/10/85/ddf277e67c78205f6695f2a7639be459bca9cc353b962fd8085a492a262f/multidict-6.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:08db204213d0375a91a381cae0677ab95dd8c67a465eb370549daf6dbbf8ba10", size = 258362, upload-time = "2025-06-17T14:14:53.934Z" }, - { url = "https://files.pythonhosted.org/packages/02/fc/d64ee1df9b87c5210f2d4c419cab07f28589c81b4e5711eda05a122d0614/multidict-6.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:ffa58e3e215af8f6536dc837a990e456129857bb6fd546b3991be470abd9597a", size = 253862, upload-time = "2025-06-17T14:14:55.323Z" }, - { url = "https://files.pythonhosted.org/packages/c9/7c/a2743c00d9e25f4826d3a77cc13d4746398872cf21c843eef96bb9945665/multidict-6.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3e86eb90015c6f21658dbd257bb8e6aa18bdb365b92dd1fba27ec04e58cdc31b", size = 247391, upload-time = "2025-06-17T14:14:57.293Z" }, - { url = "https://files.pythonhosted.org/packages/9b/03/7773518db74c442904dbd349074f1e7f2a854cee4d9529fc59e623d3949e/multidict-6.5.0-cp313-cp313-win32.whl", hash = "sha256:f34a90fbd9959d0f857323bd3c52b3e6011ed48f78d7d7b9e04980b8a41da3af", size = 41115, upload-time = "2025-06-17T14:14:59.33Z" }, - { url = "https://files.pythonhosted.org/packages/eb/9a/6fc51b1dc11a7baa944bc101a92167d8b0f5929d376a8c65168fc0d35917/multidict-6.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:fcb2aa79ac6aef8d5b709bbfc2fdb1d75210ba43038d70fbb595b35af470ce06", size = 44768, upload-time = "2025-06-17T14:15:00.427Z" }, - { url = "https://files.pythonhosted.org/packages/82/2d/0d010be24b663b3c16e3d3307bbba2de5ae8eec496f6027d5c0515b371a8/multidict-6.5.0-cp313-cp313-win_arm64.whl", hash = "sha256:6dcee5e7e92060b4bb9bb6f01efcbb78c13d0e17d9bc6eec71660dd71dc7b0c2", size = 41770, upload-time = "2025-06-17T14:15:01.854Z" }, - { url = "https://files.pythonhosted.org/packages/aa/d1/a71711a5f32f84b7b036e82182e3250b949a0ce70d51a2c6a4079e665449/multidict-6.5.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:cbbc88abea2388fde41dd574159dec2cda005cb61aa84950828610cb5010f21a", size = 80450, upload-time = "2025-06-17T14:15:02.968Z" }, - { url = "https://files.pythonhosted.org/packages/0f/a2/953a9eede63a98fcec2c1a2c1a0d88de120056219931013b871884f51b43/multidict-6.5.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:70b599f70ae6536e5976364d3c3cf36f40334708bd6cebdd1e2438395d5e7676", size = 46971, upload-time = "2025-06-17T14:15:04.149Z" }, - { url = "https://files.pythonhosted.org/packages/44/61/60250212953459edda2c729e1d85130912f23c67bd4f585546fe4bdb1578/multidict-6.5.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:828bab777aa8d29d59700018178061854e3a47727e0611cb9bec579d3882de3b", size = 45548, upload-time = "2025-06-17T14:15:05.666Z" }, - { url = "https://files.pythonhosted.org/packages/11/b6/e78ee82e96c495bc2582b303f68bed176b481c8d81a441fec07404fce2ca/multidict-6.5.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9695fc1462f17b131c111cf0856a22ff154b0480f86f539d24b2778571ff94d", size = 238545, upload-time = "2025-06-17T14:15:06.88Z" }, - { url = "https://files.pythonhosted.org/packages/5a/0f/6132ca06670c8d7b374c3a4fd1ba896fc37fbb66b0de903f61db7d1020ec/multidict-6.5.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0b5ac6ebaf5d9814b15f399337ebc6d3a7f4ce9331edd404e76c49a01620b68d", size = 229931, upload-time = "2025-06-17T14:15:08.24Z" }, - { url = "https://files.pythonhosted.org/packages/c0/63/d9957c506e6df6b3e7a194f0eea62955c12875e454b978f18262a65d017b/multidict-6.5.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84a51e3baa77ded07be4766a9e41d977987b97e49884d4c94f6d30ab6acaee14", size = 248181, upload-time = "2025-06-17T14:15:09.907Z" }, - { url = "https://files.pythonhosted.org/packages/43/3f/7d5490579640db5999a948e2c41d4a0efd91a75989bda3e0a03a79c92be2/multidict-6.5.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8de67f79314d24179e9b1869ed15e88d6ba5452a73fc9891ac142e0ee018b5d6", size = 241846, upload-time = "2025-06-17T14:15:11.596Z" }, - { url = "https://files.pythonhosted.org/packages/e1/f7/252b1ce949ece52bba4c0de7aa2e3a3d5964e800bce71fb778c2e6c66f7c/multidict-6.5.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17f78a52c214481d30550ec18208e287dfc4736f0c0148208334b105fd9e0887", size = 232893, upload-time = "2025-06-17T14:15:12.946Z" }, - { url = "https://files.pythonhosted.org/packages/45/7e/0070bfd48c16afc26e056f2acce49e853c0d604a69c7124bc0bbdb1bcc0a/multidict-6.5.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2966d0099cb2e2039f9b0e73e7fd5eb9c85805681aa2a7f867f9d95b35356921", size = 228567, upload-time = "2025-06-17T14:15:14.267Z" }, - { url = "https://files.pythonhosted.org/packages/2a/31/90551c75322113ebf5fd9c5422e8641d6952f6edaf6b6c07fdc49b1bebdd/multidict-6.5.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:86fb42ed5ed1971c642cc52acc82491af97567534a8e381a8d50c02169c4e684", size = 246188, upload-time = "2025-06-17T14:15:15.985Z" }, - { url = "https://files.pythonhosted.org/packages/cc/e2/aa4b02a55e7767ff292871023817fe4db83668d514dab7ccbce25eaf7659/multidict-6.5.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:4e990cbcb6382f9eae4ec720bcac6a1351509e6fc4a5bb70e4984b27973934e6", size = 235178, upload-time = "2025-06-17T14:15:17.395Z" }, - { url = "https://files.pythonhosted.org/packages/7d/5c/f67e726717c4b138b166be1700e2b56e06fbbcb84643d15f9a9d7335ff41/multidict-6.5.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:d99a59d64bb1f7f2117bec837d9e534c5aeb5dcedf4c2b16b9753ed28fdc20a3", size = 243422, upload-time = "2025-06-17T14:15:18.939Z" }, - { url = "https://files.pythonhosted.org/packages/e5/1c/15fa318285e26a50aa3fa979bbcffb90f9b4d5ec58882d0590eda067d0da/multidict-6.5.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:e8ef15cc97c9890212e1caf90f0d63f6560e1e101cf83aeaf63a57556689fb34", size = 254898, upload-time = "2025-06-17T14:15:20.31Z" }, - { url = "https://files.pythonhosted.org/packages/ad/3d/d6c6d1c2e9b61ca80313912d30bb90d4179335405e421ef0a164eac2c0f9/multidict-6.5.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:b8a09aec921b34bd8b9f842f0bcfd76c6a8c033dc5773511e15f2d517e7e1068", size = 247129, upload-time = "2025-06-17T14:15:21.665Z" }, - { url = "https://files.pythonhosted.org/packages/29/15/1568258cf0090bfa78d44be66247cfdb16e27dfd935c8136a1e8632d3057/multidict-6.5.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ff07b504c23b67f2044533244c230808a1258b3493aaf3ea2a0785f70b7be461", size = 243841, upload-time = "2025-06-17T14:15:23.38Z" }, - { url = "https://files.pythonhosted.org/packages/65/57/64af5dbcfd61427056e840c8e520b502879d480f9632fbe210929fd87393/multidict-6.5.0-cp313-cp313t-win32.whl", hash = "sha256:9232a117341e7e979d210e41c04e18f1dc3a1d251268df6c818f5334301274e1", size = 46761, upload-time = "2025-06-17T14:15:24.733Z" }, - { url = "https://files.pythonhosted.org/packages/26/a8/cac7f7d61e188ff44f28e46cb98f9cc21762e671c96e031f06c84a60556e/multidict-6.5.0-cp313-cp313t-win_amd64.whl", hash = "sha256:44cb5c53fb2d4cbcee70a768d796052b75d89b827643788a75ea68189f0980a1", size = 52112, upload-time = "2025-06-17T14:15:25.906Z" }, - { url = "https://files.pythonhosted.org/packages/51/9f/076533feb1b5488d22936da98b9c217205cfbf9f56f7174e8c5c86d86fe6/multidict-6.5.0-cp313-cp313t-win_arm64.whl", hash = "sha256:51d33fafa82640c0217391d4ce895d32b7e84a832b8aee0dcc1b04d8981ec7f4", size = 44358, upload-time = "2025-06-17T14:15:27.117Z" }, - { url = "https://files.pythonhosted.org/packages/44/d8/45e8fc9892a7386d074941429e033adb4640e59ff0780d96a8cf46fe788e/multidict-6.5.0-py3-none-any.whl", hash = "sha256:5634b35f225977605385f56153bd95a7133faffc0ffe12ad26e10517537e8dfc", size = 12181, upload-time = "2025-06-17T14:15:55.156Z" }, + { url = "https://files.pythonhosted.org/packages/c2/9e/9f61ac18d9c8b475889f32ccfa91c9f59363480613fc807b6e3023d6f60b/multidict-6.7.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8a3862568a36d26e650a19bb5cbbba14b71789032aebc0423f8cc5f150730184", size = 76877, upload-time = "2025-10-06T14:49:20.884Z" }, + { url = "https://files.pythonhosted.org/packages/38/6f/614f09a04e6184f8824268fce4bc925e9849edfa654ddd59f0b64508c595/multidict-6.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:960c60b5849b9b4f9dcc9bea6e3626143c252c74113df2c1540aebce70209b45", size = 45467, upload-time = "2025-10-06T14:49:22.054Z" }, + { url = "https://files.pythonhosted.org/packages/b3/93/c4f67a436dd026f2e780c433277fff72be79152894d9fc36f44569cab1a6/multidict-6.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2049be98fb57a31b4ccf870bf377af2504d4ae35646a19037ec271e4c07998aa", size = 43834, upload-time = "2025-10-06T14:49:23.566Z" }, + { url = "https://files.pythonhosted.org/packages/7f/f5/013798161ca665e4a422afbc5e2d9e4070142a9ff8905e482139cd09e4d0/multidict-6.7.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0934f3843a1860dd465d38895c17fce1f1cb37295149ab05cd1b9a03afacb2a7", size = 250545, upload-time = "2025-10-06T14:49:24.882Z" }, + { url = "https://files.pythonhosted.org/packages/71/2f/91dbac13e0ba94669ea5119ba267c9a832f0cb65419aca75549fcf09a3dc/multidict-6.7.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b3e34f3a1b8131ba06f1a73adab24f30934d148afcd5f5de9a73565a4404384e", size = 258305, upload-time = "2025-10-06T14:49:26.778Z" }, + { url = "https://files.pythonhosted.org/packages/ef/b0/754038b26f6e04488b48ac621f779c341338d78503fb45403755af2df477/multidict-6.7.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:efbb54e98446892590dc2458c19c10344ee9a883a79b5cec4bc34d6656e8d546", size = 242363, upload-time = "2025-10-06T14:49:28.562Z" }, + { url = "https://files.pythonhosted.org/packages/87/15/9da40b9336a7c9fa606c4cf2ed80a649dffeb42b905d4f63a1d7eb17d746/multidict-6.7.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a35c5fc61d4f51eb045061e7967cfe3123d622cd500e8868e7c0c592a09fedc4", size = 268375, upload-time = "2025-10-06T14:49:29.96Z" }, + { url = "https://files.pythonhosted.org/packages/82/72/c53fcade0cc94dfaad583105fd92b3a783af2091eddcb41a6d5a52474000/multidict-6.7.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:29fe6740ebccba4175af1b9b87bf553e9c15cd5868ee967e010efcf94e4fd0f1", size = 269346, upload-time = "2025-10-06T14:49:31.404Z" }, + { url = "https://files.pythonhosted.org/packages/0d/e2/9baffdae21a76f77ef8447f1a05a96ec4bc0a24dae08767abc0a2fe680b8/multidict-6.7.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:123e2a72e20537add2f33a79e605f6191fba2afda4cbb876e35c1a7074298a7d", size = 256107, upload-time = "2025-10-06T14:49:32.974Z" }, + { url = "https://files.pythonhosted.org/packages/3c/06/3f06f611087dc60d65ef775f1fb5aca7c6d61c6db4990e7cda0cef9b1651/multidict-6.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b284e319754366c1aee2267a2036248b24eeb17ecd5dc16022095e747f2f4304", size = 253592, upload-time = "2025-10-06T14:49:34.52Z" }, + { url = "https://files.pythonhosted.org/packages/20/24/54e804ec7945b6023b340c412ce9c3f81e91b3bf5fa5ce65558740141bee/multidict-6.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:803d685de7be4303b5a657b76e2f6d1240e7e0a8aa2968ad5811fa2285553a12", size = 251024, upload-time = "2025-10-06T14:49:35.956Z" }, + { url = "https://files.pythonhosted.org/packages/14/48/011cba467ea0b17ceb938315d219391d3e421dfd35928e5dbdc3f4ae76ef/multidict-6.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c04a328260dfd5db8c39538f999f02779012268f54614902d0afc775d44e0a62", size = 251484, upload-time = "2025-10-06T14:49:37.631Z" }, + { url = "https://files.pythonhosted.org/packages/0d/2f/919258b43bb35b99fa127435cfb2d91798eb3a943396631ef43e3720dcf4/multidict-6.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8a19cdb57cd3df4cd865849d93ee14920fb97224300c88501f16ecfa2604b4e0", size = 263579, upload-time = "2025-10-06T14:49:39.502Z" }, + { url = "https://files.pythonhosted.org/packages/31/22/a0e884d86b5242b5a74cf08e876bdf299e413016b66e55511f7a804a366e/multidict-6.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b2fd74c52accced7e75de26023b7dccee62511a600e62311b918ec5c168fc2a", size = 259654, upload-time = "2025-10-06T14:49:41.32Z" }, + { url = "https://files.pythonhosted.org/packages/b2/e5/17e10e1b5c5f5a40f2fcbb45953c9b215f8a4098003915e46a93f5fcaa8f/multidict-6.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3e8bfdd0e487acf992407a140d2589fe598238eaeffa3da8448d63a63cd363f8", size = 251511, upload-time = "2025-10-06T14:49:46.021Z" }, + { url = "https://files.pythonhosted.org/packages/e3/9a/201bb1e17e7af53139597069c375e7b0dcbd47594604f65c2d5359508566/multidict-6.7.0-cp312-cp312-win32.whl", hash = "sha256:dd32a49400a2c3d52088e120ee00c1e3576cbff7e10b98467962c74fdb762ed4", size = 41895, upload-time = "2025-10-06T14:49:48.718Z" }, + { url = "https://files.pythonhosted.org/packages/46/e2/348cd32faad84eaf1d20cce80e2bb0ef8d312c55bca1f7fa9865e7770aaf/multidict-6.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:92abb658ef2d7ef22ac9f8bb88e8b6c3e571671534e029359b6d9e845923eb1b", size = 46073, upload-time = "2025-10-06T14:49:50.28Z" }, + { url = "https://files.pythonhosted.org/packages/25/ec/aad2613c1910dce907480e0c3aa306905830f25df2e54ccc9dea450cb5aa/multidict-6.7.0-cp312-cp312-win_arm64.whl", hash = "sha256:490dab541a6a642ce1a9d61a4781656b346a55c13038f0b1244653828e3a83ec", size = 43226, upload-time = "2025-10-06T14:49:52.304Z" }, + { url = "https://files.pythonhosted.org/packages/b7/da/7d22601b625e241d4f23ef1ebff8acfc60da633c9e7e7922e24d10f592b3/multidict-6.7.0-py3-none-any.whl", hash = "sha256:394fc5c42a333c9ffc3e421a4c85e08580d990e08b99f6bf35b4132114c5dcb3", size = 12317, upload-time = "2025-10-06T14:52:29.272Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "networkx" +version = "3.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6a/51/63fe664f3908c97be9d2e4f1158eb633317598cfa6e1fc14af5383f17512/networkx-3.6.1.tar.gz", hash = "sha256:26b7c357accc0c8cde558ad486283728b65b6a95d85ee1cd66bafab4c8168509", size = 2517025, upload-time = "2025-12-08T17:02:39.908Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/c9/b2622292ea83fbb4ec318f5b9ab867d0a28ab43c5717bb85b0a5f6b3b0a4/networkx-3.6.1-py3-none-any.whl", hash = "sha256:d47fbf302e7d9cbbb9e2555a0d267983d2aa476bac30e90dfbe5669bd57f3762", size = 2068504, upload-time = "2025-12-08T17:02:38.159Z" }, ] [[package]] @@ -673,6 +1817,360 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/16/2e/86f24451c2d530c88daf997cb8d6ac622c1d40d19f5a031ed68a4b73a374/numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818", size = 15517754, upload-time = "2024-02-05T23:58:36.364Z" }, ] +[[package]] +name = "numpy-typing-compat" +version = "20251206.1.25" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f9/63/f166333649396d083b9e95b5aa15feb56f9168f766a72540132206119937/numpy_typing_compat-20251206.1.25.tar.gz", hash = "sha256:27ff188fe70102312ea5e8553423897a4f3365eee15aa2a7ee1fcf6efc6fed12", size = 5060, upload-time = "2025-12-06T20:02:00.974Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b4/cb/99443f79c562466d128e3bf94d1507146fba386ec2ce85e97fe916225691/numpy_typing_compat-20251206.1.25-py3-none-any.whl", hash = "sha256:9be87412b68c1e9e193e7bfd996cae4ec07de5880c19d70bf81f890f51644e7f", size = 6354, upload-time = "2025-12-06T20:01:51.007Z" }, +] + +[[package]] +name = "nvidia-cublas-cu12" +version = "12.8.4.1" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/61/e24b560ab2e2eaeb3c839129175fb330dfcfc29e5203196e5541a4c44682/nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:8ac4e771d5a348c551b2a426eda6193c19aa630236b418086020df5ba9667142", size = 594346921, upload-time = "2025-03-07T01:44:31.254Z" }, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.8.90" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f8/02/2adcaa145158bf1a8295d83591d22e4103dbfd821bcaf6f3f53151ca4ffa/nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ea0cb07ebda26bb9b29ba82cda34849e73c166c18162d3913575b0c9db9a6182", size = 10248621, upload-time = "2025-03-07T01:40:21.213Z" }, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.8.93" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/6b/32f747947df2da6994e999492ab306a903659555dddc0fbdeb9d71f75e52/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:a7756528852ef889772a84c6cd89d41dfa74667e24cca16bb31f8f061e3e9994", size = 88040029, upload-time = "2025-03-07T01:42:13.562Z" }, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.8.90" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0d/9b/a997b638fcd068ad6e4d53b8551a7d30fe8b404d6f1804abf1df69838932/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:adade8dcbd0edf427b7204d480d6066d33902cab2a4707dcfc48a2d0fd44ab90", size = 954765, upload-time = "2025-03-07T01:40:01.615Z" }, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "9.10.2.21" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/ba/51/e123d997aa098c61d029f76663dedbfb9bc8dcf8c60cbd6adbe42f76d049/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:949452be657fa16687d0930933f032835951ef0892b37d2d53824d1a84dc97a8", size = 706758467, upload-time = "2025-06-06T21:54:08.597Z" }, +] + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.3.3.83" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/1f/13/ee4e00f30e676b66ae65b4f08cb5bcbb8392c03f54f2d5413ea99a5d1c80/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d2dd21ec0b88cf61b62e6b43564355e5222e4a3fb394cac0db101f2dd0d4f74", size = 193118695, upload-time = "2025-03-07T01:45:27.821Z" }, +] + +[[package]] +name = "nvidia-cufile-cu12" +version = "1.13.1.3" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bb/fe/1bcba1dfbfb8d01be8d93f07bfc502c93fa23afa6fd5ab3fc7c1df71038a/nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1d069003be650e131b21c932ec3d8969c1715379251f8d23a1860554b1cb24fc", size = 1197834, upload-time = "2025-03-07T01:45:50.723Z" }, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.9.90" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/aa/6584b56dc84ebe9cf93226a5cde4d99080c8e90ab40f0c27bda7a0f29aa1/nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:b32331d4f4df5d6eefa0554c565b626c7216f87a06a4f56fab27c3b68a830ec9", size = 63619976, upload-time = "2025-03-07T01:46:23.323Z" }, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.7.3.90" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "nvidia-cusparse-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/48/9a13d2975803e8cf2777d5ed57b87a0b6ca2cc795f9a4f59796a910bfb80/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:4376c11ad263152bd50ea295c05370360776f8c3427b30991df774f9fb26c450", size = 267506905, upload-time = "2025-03-07T01:47:16.273Z" }, +] + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.5.8.93" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/f5/e1854cb2f2bcd4280c44736c93550cc300ff4b8c95ebe370d0aa7d2b473d/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1ec05d76bbbd8b61b06a80e1eaf8cf4959c3d4ce8e711b65ebd0443bb0ebb13b", size = 288216466, upload-time = "2025-03-07T01:48:13.779Z" }, +] + +[[package]] +name = "nvidia-cusparselt-cu12" +version = "0.7.1" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/56/79/12978b96bd44274fe38b5dde5cfb660b1d114f70a65ef962bcbbed99b549/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f1bb701d6b930d5a7cea44c19ceb973311500847f81b634d802b7b539dc55623", size = 287193691, upload-time = "2025-02-26T00:15:44.104Z" }, +] + +[[package]] +name = "nvidia-nccl-cu12" +version = "2.27.5" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6e/89/f7a07dc961b60645dbbf42e80f2bc85ade7feb9a491b11a1e973aa00071f/nvidia_nccl_cu12-2.27.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ad730cf15cb5d25fe849c6e6ca9eb5b76db16a80f13f425ac68d8e2e55624457", size = 322348229, upload-time = "2025-06-26T04:11:28.385Z" }, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.8.93" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/74/86a07f1d0f42998ca31312f998bd3b9a7eff7f52378f4f270c8679c77fb9/nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:81ff63371a7ebd6e6451970684f916be2eab07321b73c9d244dc2b4da7f73b88", size = 39254836, upload-time = "2025-03-07T01:49:55.661Z" }, +] + +[[package]] +name = "nvidia-nvshmem-cu12" +version = "3.3.20" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/6c/99acb2f9eb85c29fc6f3a7ac4dccfd992e22666dd08a642b303311326a97/nvidia_nvshmem_cu12-3.3.20-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d00f26d3f9b2e3c3065be895e3059d6479ea5c638a3f38c9fec49b1b9dd7c1e5", size = 124657145, upload-time = "2025-08-04T20:25:19.995Z" }, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.8.90" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/eb/86626c1bbc2edb86323022371c39aa48df6fd8b0a1647bc274577f72e90b/nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5b17e2001cc0d751a5bc2c6ec6d26ad95913324a4adb86788c944f8ce9ba441f", size = 89954, upload-time = "2025-03-07T01:42:44.131Z" }, +] + +[[package]] +name = "oauthlib" +version = "3.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/5f/19930f824ffeb0ad4372da4812c50edbd1434f678c90c2733e1188edfc63/oauthlib-3.3.1.tar.gz", hash = "sha256:0f0f8aa759826a193cf66c12ea1af1637f87b9b4622d46e866952bb022e538c9", size = 185918, upload-time = "2025-06-19T22:48:08.269Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/9c/92789c596b8df838baa98fa71844d84283302f7604ed565dafe5a6b5041a/oauthlib-3.3.1-py3-none-any.whl", hash = "sha256:88119c938d2b8fb88561af5f6ee0eec8cc8d552b7bb1f712743136eb7523b7a1", size = 160065, upload-time = "2025-06-19T22:48:06.508Z" }, +] + +[[package]] +name = "ollama" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9d/5a/652dac4b7affc2b37b95386f8ae78f22808af09d720689e3d7a86b6ed98e/ollama-0.6.1.tar.gz", hash = "sha256:478c67546836430034b415ed64fa890fd3d1ff91781a9d548b3325274e69d7c6", size = 51620, upload-time = "2025-11-13T23:02:17.416Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/4f/4a617ee93d8208d2bcf26b2d8b9402ceaed03e3853c754940e2290fed063/ollama-0.6.1-py3-none-any.whl", hash = "sha256:fc4c984b345735c5486faeee67d8a265214a31cbb828167782dc642ce0a2bf8c", size = 14354, upload-time = "2025-11-13T23:02:16.292Z" }, +] + +[[package]] +name = "onnxruntime" +version = "1.23.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coloredlogs" }, + { name = "flatbuffers" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "protobuf" }, + { name = "sympy" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/1b/9e/f748cd64161213adeef83d0cb16cb8ace1e62fa501033acdd9f9341fff57/onnxruntime-1.23.2-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:b8f029a6b98d3cf5be564d52802bb50a8489ab73409fa9db0bf583eabb7c2321", size = 17195929, upload-time = "2025-10-22T03:47:36.24Z" }, + { url = "https://files.pythonhosted.org/packages/91/9d/a81aafd899b900101988ead7fb14974c8a58695338ab6a0f3d6b0100f30b/onnxruntime-1.23.2-cp312-cp312-macosx_13_0_x86_64.whl", hash = "sha256:218295a8acae83905f6f1aed8cacb8e3eb3bd7513a13fe4ba3b2664a19fc4a6b", size = 19157705, upload-time = "2025-10-22T03:46:40.415Z" }, + { url = "https://files.pythonhosted.org/packages/3c/35/4e40f2fba272a6698d62be2cd21ddc3675edfc1a4b9ddefcc4648f115315/onnxruntime-1.23.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:76ff670550dc23e58ea9bc53b5149b99a44e63b34b524f7b8547469aaa0dcb8c", size = 15226915, upload-time = "2025-10-22T03:46:27.773Z" }, + { url = "https://files.pythonhosted.org/packages/ef/88/9cc25d2bafe6bc0d4d3c1db3ade98196d5b355c0b273e6a5dc09c5d5d0d5/onnxruntime-1.23.2-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f9b4ae77f8e3c9bee50c27bc1beede83f786fe1d52e99ac85aa8d65a01e9b77", size = 17382649, upload-time = "2025-10-22T03:47:02.782Z" }, + { url = "https://files.pythonhosted.org/packages/c0/b4/569d298f9fc4d286c11c45e85d9ffa9e877af12ace98af8cab52396e8f46/onnxruntime-1.23.2-cp312-cp312-win_amd64.whl", hash = "sha256:25de5214923ce941a3523739d34a520aac30f21e631de53bba9174dc9c004435", size = 13470528, upload-time = "2025-10-22T03:47:28.106Z" }, +] + +[[package]] +name = "opencv-python" +version = "4.11.0.86" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/17/06/68c27a523103dad5837dc5b87e71285280c4f098c60e4fe8a8db6486ab09/opencv-python-4.11.0.86.tar.gz", hash = "sha256:03d60ccae62304860d232272e4a4fda93c39d595780cb40b161b310244b736a4", size = 95171956, upload-time = "2025-01-16T13:52:24.737Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/4d/53b30a2a3ac1f75f65a59eb29cf2ee7207ce64867db47036ad61743d5a23/opencv_python-4.11.0.86-cp37-abi3-macosx_13_0_arm64.whl", hash = "sha256:432f67c223f1dc2824f5e73cdfcd9db0efc8710647d4e813012195dc9122a52a", size = 37326322, upload-time = "2025-01-16T13:52:25.887Z" }, + { url = "https://files.pythonhosted.org/packages/3b/84/0a67490741867eacdfa37bc18df96e08a9d579583b419010d7f3da8ff503/opencv_python-4.11.0.86-cp37-abi3-macosx_13_0_x86_64.whl", hash = "sha256:9d05ef13d23fe97f575153558653e2d6e87103995d54e6a35db3f282fe1f9c66", size = 56723197, upload-time = "2025-01-16T13:55:21.222Z" }, + { url = "https://files.pythonhosted.org/packages/f3/bd/29c126788da65c1fb2b5fb621b7fed0ed5f9122aa22a0868c5e2c15c6d23/opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b92ae2c8852208817e6776ba1ea0d6b1e0a1b5431e971a2a0ddd2a8cc398202", size = 42230439, upload-time = "2025-01-16T13:51:35.822Z" }, + { url = "https://files.pythonhosted.org/packages/2c/8b/90eb44a40476fa0e71e05a0283947cfd74a5d36121a11d926ad6f3193cc4/opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b02611523803495003bd87362db3e1d2a0454a6a63025dc6658a9830570aa0d", size = 62986597, upload-time = "2025-01-16T13:52:08.836Z" }, + { url = "https://files.pythonhosted.org/packages/fb/d7/1d5941a9dde095468b288d989ff6539dd69cd429dbf1b9e839013d21b6f0/opencv_python-4.11.0.86-cp37-abi3-win32.whl", hash = "sha256:810549cb2a4aedaa84ad9a1c92fbfdfc14090e2749cedf2c1589ad8359aa169b", size = 29384337, upload-time = "2025-01-16T13:52:13.549Z" }, + { url = "https://files.pythonhosted.org/packages/a4/7d/f1c30a92854540bf789e9cd5dde7ef49bbe63f855b85a2e6b3db8135c591/opencv_python-4.11.0.86-cp37-abi3-win_amd64.whl", hash = "sha256:085ad9b77c18853ea66283e98affefe2de8cc4c1f43eda4c100cf9b2721142ec", size = 39488044, upload-time = "2025-01-16T13:52:21.928Z" }, +] + +[[package]] +name = "opentelemetry-api" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/97/b9/3161be15bb8e3ad01be8be5a968a9237c3027c5be504362ff800fca3e442/opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c", size = 65767, upload-time = "2025-12-11T13:32:39.182Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/df/d3f1ddf4bb4cb50ed9b1139cc7b1c54c34a1e7ce8fd1b9a37c0d1551a6bd/opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950", size = 66356, upload-time = "2025-12-11T13:32:17.304Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-proto" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/9d/22d241b66f7bbde88a3bfa6847a351d2c46b84de23e71222c6aae25c7050/opentelemetry_exporter_otlp_proto_common-1.39.1.tar.gz", hash = "sha256:763370d4737a59741c89a67b50f9e39271639ee4afc999dadfe768541c027464", size = 20409, upload-time = "2025-12-11T13:32:40.885Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8c/02/ffc3e143d89a27ac21fd557365b98bd0653b98de8a101151d5805b5d4c33/opentelemetry_exporter_otlp_proto_common-1.39.1-py3-none-any.whl", hash = "sha256:08f8a5862d64cc3435105686d0216c1365dc5701f86844a8cd56597d0c764fde", size = 18366, upload-time = "2025-12-11T13:32:20.2Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-grpc" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "googleapis-common-protos" }, + { name = "grpcio" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-proto" }, + { name = "opentelemetry-sdk" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/53/48/b329fed2c610c2c32c9366d9dc597202c9d1e58e631c137ba15248d8850f/opentelemetry_exporter_otlp_proto_grpc-1.39.1.tar.gz", hash = "sha256:772eb1c9287485d625e4dbe9c879898e5253fea111d9181140f51291b5fec3ad", size = 24650, upload-time = "2025-12-11T13:32:41.429Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/a3/cc9b66575bd6597b98b886a2067eea2693408d2d5f39dad9ab7fc264f5f3/opentelemetry_exporter_otlp_proto_grpc-1.39.1-py3-none-any.whl", hash = "sha256:fa1c136a05c7e9b4c09f739469cbdb927ea20b34088ab1d959a849b5cc589c18", size = 19766, upload-time = "2025-12-11T13:32:21.027Z" }, +] + +[[package]] +name = "opentelemetry-proto" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/49/1d/f25d76d8260c156c40c97c9ed4511ec0f9ce353f8108ca6e7561f82a06b2/opentelemetry_proto-1.39.1.tar.gz", hash = "sha256:6c8e05144fc0d3ed4d22c2289c6b126e03bcd0e6a7da0f16cedd2e1c2772e2c8", size = 46152, upload-time = "2025-12-11T13:32:48.681Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/95/b40c96a7b5203005a0b03d8ce8cd212ff23f1793d5ba289c87a097571b18/opentelemetry_proto-1.39.1-py3-none-any.whl", hash = "sha256:22cdc78efd3b3765d09e68bfbd010d4fc254c9818afd0b6b423387d9dee46007", size = 72535, upload-time = "2025-12-11T13:32:33.866Z" }, +] + +[[package]] +name = "opentelemetry-sdk" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/fb/c76080c9ba07e1e8235d24cdcc4d125ef7aa3edf23eb4e497c2e50889adc/opentelemetry_sdk-1.39.1.tar.gz", hash = "sha256:cf4d4563caf7bff906c9f7967e2be22d0d6b349b908be0d90fb21c8e9c995cc6", size = 171460, upload-time = "2025-12-11T13:32:49.369Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/98/e91cf858f203d86f4eccdf763dcf01cf03f1dae80c3750f7e635bfa206b6/opentelemetry_sdk-1.39.1-py3-none-any.whl", hash = "sha256:4d5482c478513ecb0a5d938dcc61394e647066e0cc2676bee9f3af3f3f45f01c", size = 132565, upload-time = "2025-12-11T13:32:35.069Z" }, +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.60b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/df/553f93ed38bf22f4b999d9be9c185adb558982214f33eae539d3b5cd0858/opentelemetry_semantic_conventions-0.60b1.tar.gz", hash = "sha256:87c228b5a0669b748c76d76df6c364c369c28f1c465e50f661e39737e84bc953", size = 137935, upload-time = "2025-12-11T13:32:50.487Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7a/5e/5958555e09635d09b75de3c4f8b9cae7335ca545d77392ffe7331534c402/opentelemetry_semantic_conventions-0.60b1-py3-none-any.whl", hash = "sha256:9fa8c8b0c110da289809292b0591220d3a7b53c1526a23021e977d68597893fb", size = 219982, upload-time = "2025-12-11T13:32:36.955Z" }, +] + +[[package]] +name = "optype" +version = "0.15.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d7/93/6b9e43138ce36fbad134bd1a50460a7bbda61105b5a964e4cf773fe4d845/optype-0.15.0.tar.gz", hash = "sha256:457d6ca9e7da19967ec16d42bdf94e240b33b5d70a56fbbf5b427e5ea39cf41e", size = 99978, upload-time = "2025-12-08T12:32:41.422Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/8b/93f6c496fc5da062fd7e7c4745b5a8dd09b7b576c626075844fe97951a7d/optype-0.15.0-py3-none-any.whl", hash = "sha256:caba40ece9ea39b499fa76c036a82e0d452a432dd4dd3e8e0d30892be2e8c76c", size = 88716, upload-time = "2025-12-08T12:32:39.669Z" }, +] + +[package.optional-dependencies] +numpy = [ + { name = "numpy" }, + { name = "numpy-typing-compat" }, +] + +[[package]] +name = "orjson" +version = "3.11.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/04/b8/333fdb27840f3bf04022d21b654a35f58e15407183aeb16f3b41aa053446/orjson-3.11.5.tar.gz", hash = "sha256:82393ab47b4fe44ffd0a7659fa9cfaacc717eb617c93cde83795f14af5c2e9d5", size = 5972347, upload-time = "2025-12-06T15:55:39.458Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a4/8052a029029b096a78955eadd68ab594ce2197e24ec50e6b6d2ab3f4e33b/orjson-3.11.5-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:334e5b4bff9ad101237c2d799d9fd45737752929753bf4faf4b207335a416b7d", size = 245347, upload-time = "2025-12-06T15:54:22.061Z" }, + { url = "https://files.pythonhosted.org/packages/64/67/574a7732bd9d9d79ac620c8790b4cfe0717a3d5a6eb2b539e6e8995e24a0/orjson-3.11.5-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:ff770589960a86eae279f5d8aa536196ebda8273a2a07db2a54e82b93bc86626", size = 129435, upload-time = "2025-12-06T15:54:23.615Z" }, + { url = "https://files.pythonhosted.org/packages/52/8d/544e77d7a29d90cf4d9eecd0ae801c688e7f3d1adfa2ebae5e1e94d38ab9/orjson-3.11.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed24250e55efbcb0b35bed7caaec8cedf858ab2f9f2201f17b8938c618c8ca6f", size = 132074, upload-time = "2025-12-06T15:54:24.694Z" }, + { url = "https://files.pythonhosted.org/packages/6e/57/b9f5b5b6fbff9c26f77e785baf56ae8460ef74acdb3eae4931c25b8f5ba9/orjson-3.11.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a66d7769e98a08a12a139049aac2f0ca3adae989817f8c43337455fbc7669b85", size = 130520, upload-time = "2025-12-06T15:54:26.185Z" }, + { url = "https://files.pythonhosted.org/packages/f6/6d/d34970bf9eb33f9ec7c979a262cad86076814859e54eb9a059a52f6dc13d/orjson-3.11.5-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:86cfc555bfd5794d24c6a1903e558b50644e5e68e6471d66502ce5cb5fdef3f9", size = 136209, upload-time = "2025-12-06T15:54:27.264Z" }, + { url = "https://files.pythonhosted.org/packages/e7/39/bc373b63cc0e117a105ea12e57280f83ae52fdee426890d57412432d63b3/orjson-3.11.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a230065027bc2a025e944f9d4714976a81e7ecfa940923283bca7bbc1f10f626", size = 139837, upload-time = "2025-12-06T15:54:28.75Z" }, + { url = "https://files.pythonhosted.org/packages/cb/aa/7c4818c8d7d324da220f4f1af55c343956003aa4d1ce1857bdc1d396ba69/orjson-3.11.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b29d36b60e606df01959c4b982729c8845c69d1963f88686608be9ced96dbfaa", size = 137307, upload-time = "2025-12-06T15:54:29.856Z" }, + { url = "https://files.pythonhosted.org/packages/46/bf/0993b5a056759ba65145effe3a79dd5a939d4a070eaa5da2ee3180fbb13f/orjson-3.11.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c74099c6b230d4261fdc3169d50efc09abf38ace1a42ea2f9994b1d79153d477", size = 139020, upload-time = "2025-12-06T15:54:31.024Z" }, + { url = "https://files.pythonhosted.org/packages/65/e8/83a6c95db3039e504eda60fc388f9faedbb4f6472f5aba7084e06552d9aa/orjson-3.11.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e697d06ad57dd0c7a737771d470eedc18e68dfdefcdd3b7de7f33dfda5b6212e", size = 141099, upload-time = "2025-12-06T15:54:32.196Z" }, + { url = "https://files.pythonhosted.org/packages/b9/b4/24fdc024abfce31c2f6812973b0a693688037ece5dc64b7a60c1ce69e2f2/orjson-3.11.5-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:e08ca8a6c851e95aaecc32bc44a5aa75d0ad26af8cdac7c77e4ed93acf3d5b69", size = 413540, upload-time = "2025-12-06T15:54:33.361Z" }, + { url = "https://files.pythonhosted.org/packages/d9/37/01c0ec95d55ed0c11e4cae3e10427e479bba40c77312b63e1f9665e0737d/orjson-3.11.5-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e8b5f96c05fce7d0218df3fdfeb962d6b8cfff7e3e20264306b46dd8b217c0f3", size = 151530, upload-time = "2025-12-06T15:54:34.6Z" }, + { url = "https://files.pythonhosted.org/packages/f9/d4/f9ebc57182705bb4bbe63f5bbe14af43722a2533135e1d2fb7affa0c355d/orjson-3.11.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ddbfdb5099b3e6ba6d6ea818f61997bb66de14b411357d24c4612cf1ebad08ca", size = 141863, upload-time = "2025-12-06T15:54:35.801Z" }, + { url = "https://files.pythonhosted.org/packages/0d/04/02102b8d19fdcb009d72d622bb5781e8f3fae1646bf3e18c53d1bc8115b5/orjson-3.11.5-cp312-cp312-win32.whl", hash = "sha256:9172578c4eb09dbfcf1657d43198de59b6cef4054de385365060ed50c458ac98", size = 135255, upload-time = "2025-12-06T15:54:37.209Z" }, + { url = "https://files.pythonhosted.org/packages/d4/fb/f05646c43d5450492cb387de5549f6de90a71001682c17882d9f66476af5/orjson-3.11.5-cp312-cp312-win_amd64.whl", hash = "sha256:2b91126e7b470ff2e75746f6f6ee32b9ab67b7a93c8ba1d15d3a0caaf16ec875", size = 133252, upload-time = "2025-12-06T15:54:38.401Z" }, + { url = "https://files.pythonhosted.org/packages/dc/a6/7b8c0b26ba18c793533ac1cd145e131e46fcf43952aa94c109b5b913c1f0/orjson-3.11.5-cp312-cp312-win_arm64.whl", hash = "sha256:acbc5fac7e06777555b0722b8ad5f574739e99ffe99467ed63da98f97f9ca0fe", size = 126777, upload-time = "2025-12-06T15:54:39.515Z" }, +] + +[[package]] +name = "ormsgpack" +version = "1.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/96/34c40d621996c2f377a18decbd3c59f031dde73c3ba47d1e1e8f29a05aaa/ormsgpack-1.12.1.tar.gz", hash = "sha256:a3877fde1e4f27a39f92681a0aab6385af3a41d0c25375d33590ae20410ea2ac", size = 39476, upload-time = "2025-12-14T07:57:43.248Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/fe/ab9167ca037406b5703add24049cf3e18021a3b16133ea20615b1f160ea4/ormsgpack-1.12.1-cp312-cp312-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:4d7fb0e1b6fbc701d75269f7405a4f79230a6ce0063fb1092e4f6577e312f86d", size = 376725, upload-time = "2025-12-14T07:57:07.894Z" }, + { url = "https://files.pythonhosted.org/packages/c7/ea/2820e65f506894c459b840d1091ae6e327fde3d5a3f3b002a11a1b9bdf7d/ormsgpack-1.12.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:43a9353e2db5b024c91a47d864ef15eaa62d81824cfc7740fed4cef7db738694", size = 202466, upload-time = "2025-12-14T07:57:09.049Z" }, + { url = "https://files.pythonhosted.org/packages/45/8b/def01c13339c5bbec2ee1469ef53e7fadd66c8d775df974ee4def1572515/ormsgpack-1.12.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fc8fe866b7706fc25af0adf1f600bc06ece5b15ca44e34641327198b821e5c3c", size = 210748, upload-time = "2025-12-14T07:57:10.074Z" }, + { url = "https://files.pythonhosted.org/packages/5d/d2/bf350c92f7f067dd9484499705f2d8366d8d9008a670e3d1d0add1908f85/ormsgpack-1.12.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:813755b5f598a78242042e05dfd1ada4e769e94b98c9ab82554550f97ff4d641", size = 211510, upload-time = "2025-12-14T07:57:11.165Z" }, + { url = "https://files.pythonhosted.org/packages/74/92/9d689bcb95304a6da26c4d59439c350940c25d1b35f146d402ccc6344c51/ormsgpack-1.12.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8eea2a13536fae45d78f93f2cc846c9765c7160c85f19cfefecc20873c137cdd", size = 386237, upload-time = "2025-12-14T07:57:12.306Z" }, + { url = "https://files.pythonhosted.org/packages/17/fe/bd3107547f8b6129265dd957f40b9cd547d2445db2292aacb13335a7ea89/ormsgpack-1.12.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:7a02ebda1a863cbc604740e76faca8eee1add322db2dcbe6cf32669fffdff65c", size = 479589, upload-time = "2025-12-14T07:57:13.475Z" }, + { url = "https://files.pythonhosted.org/packages/c1/7c/e8e5cc9edb967d44f6f85e9ebdad440b59af3fae00b137a4327dc5aed9bb/ormsgpack-1.12.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3c0bd63897c439931cdf29348e5e6e8c330d529830e848d10767615c0f3d1b82", size = 388077, upload-time = "2025-12-14T07:57:14.551Z" }, + { url = "https://files.pythonhosted.org/packages/35/6b/5031797e43b58506f28a8760b26dc23f2620fb4f2200c4c1b3045603e67e/ormsgpack-1.12.1-cp312-cp312-win_amd64.whl", hash = "sha256:362f2e812f8d7035dc25a009171e09d7cc97cb30d3c9e75a16aeae00ca3c1dcf", size = 116190, upload-time = "2025-12-14T07:57:15.575Z" }, + { url = "https://files.pythonhosted.org/packages/1e/fd/9f43ea6425e383a6b2dbfafebb06fd60e8d68c700ef715adfbcdb499f75d/ormsgpack-1.12.1-cp312-cp312-win_arm64.whl", hash = "sha256:6190281e381db2ed0045052208f47a995ccf61eed48f1215ae3cce3fbccd59c5", size = 109990, upload-time = "2025-12-14T07:57:16.419Z" }, +] + +[[package]] +name = "overrides" +version = "7.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/36/86/b585f53236dec60aba864e050778b25045f857e17f6e5ea0ae95fe80edd2/overrides-7.7.0.tar.gz", hash = "sha256:55158fa3d93b98cc75299b1e67078ad9003ca27945c76162c1c0766d6f91820a", size = 22812, upload-time = "2024-01-27T21:01:33.423Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/ab/fc8290c6a4c722e5514d80f62b2dc4c4df1a68a41d1364e625c35990fcf3/overrides-7.7.0-py3-none-any.whl", hash = "sha256:c7ed9d062f78b8e4c1a7b70bd8796b35ead4d9f510227ef9c5dc7626c60d7e49", size = 17832, upload-time = "2024-01-27T21:01:31.393Z" }, +] + [[package]] name = "packaging" version = "25.0" @@ -682,64 +2180,118 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, ] +[[package]] +name = "pamqp" +version = "3.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/62/35bbd3d3021e008606cd0a9532db7850c65741bbf69ac8a3a0d8cfeb7934/pamqp-3.3.0.tar.gz", hash = "sha256:40b8795bd4efcf2b0f8821c1de83d12ca16d5760f4507836267fd7a02b06763b", size = 30993, upload-time = "2024-01-12T20:37:25.085Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ac/8d/c1e93296e109a320e508e38118cf7d1fc2a4d1c2ec64de78565b3c445eb5/pamqp-3.3.0-py2.py3-none-any.whl", hash = "sha256:c901a684794157ae39b52cbf700db8c9aae7a470f13528b9d7b4e5f7202f8eb0", size = 33848, upload-time = "2024-01-12T20:37:21.359Z" }, +] + +[[package]] +name = "pandas" +version = "2.3.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "python-dateutil" }, + { name = "pytz" }, + { name = "tzdata" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/33/01/d40b85317f86cf08d853a4f495195c73815fdf205eef3993821720274518/pandas-2.3.3.tar.gz", hash = "sha256:e05e1af93b977f7eafa636d043f9f94c7ee3ac81af99c13508215942e64c993b", size = 4495223, upload-time = "2025-09-29T23:34:51.853Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9c/fb/231d89e8637c808b997d172b18e9d4a4bc7bf31296196c260526055d1ea0/pandas-2.3.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d21f6d74eb1725c2efaa71a2bfc661a0689579b58e9c0ca58a739ff0b002b53", size = 11597846, upload-time = "2025-09-29T23:19:48.856Z" }, + { url = "https://files.pythonhosted.org/packages/5c/bd/bf8064d9cfa214294356c2d6702b716d3cf3bb24be59287a6a21e24cae6b/pandas-2.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3fd2f887589c7aa868e02632612ba39acb0b8948faf5cc58f0850e165bd46f35", size = 10729618, upload-time = "2025-09-29T23:39:08.659Z" }, + { url = "https://files.pythonhosted.org/packages/57/56/cf2dbe1a3f5271370669475ead12ce77c61726ffd19a35546e31aa8edf4e/pandas-2.3.3-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ecaf1e12bdc03c86ad4a7ea848d66c685cb6851d807a26aa245ca3d2017a1908", size = 11737212, upload-time = "2025-09-29T23:19:59.765Z" }, + { url = "https://files.pythonhosted.org/packages/e5/63/cd7d615331b328e287d8233ba9fdf191a9c2d11b6af0c7a59cfcec23de68/pandas-2.3.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b3d11d2fda7eb164ef27ffc14b4fcab16a80e1ce67e9f57e19ec0afaf715ba89", size = 12362693, upload-time = "2025-09-29T23:20:14.098Z" }, + { url = "https://files.pythonhosted.org/packages/a6/de/8b1895b107277d52f2b42d3a6806e69cfef0d5cf1d0ba343470b9d8e0a04/pandas-2.3.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a68e15f780eddf2b07d242e17a04aa187a7ee12b40b930bfdd78070556550e98", size = 12771002, upload-time = "2025-09-29T23:20:26.76Z" }, + { url = "https://files.pythonhosted.org/packages/87/21/84072af3187a677c5893b170ba2c8fbe450a6ff911234916da889b698220/pandas-2.3.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:371a4ab48e950033bcf52b6527eccb564f52dc826c02afd9a1bc0ab731bba084", size = 13450971, upload-time = "2025-09-29T23:20:41.344Z" }, + { url = "https://files.pythonhosted.org/packages/86/41/585a168330ff063014880a80d744219dbf1dd7a1c706e75ab3425a987384/pandas-2.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:a16dcec078a01eeef8ee61bf64074b4e524a2a3f4b3be9326420cabe59c4778b", size = 10992722, upload-time = "2025-09-29T23:20:54.139Z" }, +] + [[package]] name = "pandas-stubs" -version = "2.2.3.250527" +version = "2.3.3.251219" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy" }, { name = "types-pytz" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5f/0d/5fe7f7f3596eb1c2526fea151e9470f86b379183d8b9debe44b2098651ca/pandas_stubs-2.2.3.250527.tar.gz", hash = "sha256:e2d694c4e72106055295ad143664e5c99e5815b07190d1ff85b73b13ff019e63", size = 106312, upload-time = "2025-05-27T15:24:29.716Z" } +sdist = { url = "https://files.pythonhosted.org/packages/95/ee/5407e9e63d22a47774f9246ca80b24f82c36f26efd39f9e3c5b584b915aa/pandas_stubs-2.3.3.251219.tar.gz", hash = "sha256:dc2883e6daff49d380d1b5a2e864983ab9be8cd9a661fa861e3dea37559a5af4", size = 106899, upload-time = "2025-12-19T15:49:53.766Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ec/f8/46141ba8c9d7064dc5008bfb4a6ae5bd3c30e4c61c28b5c5ed485bf358ba/pandas_stubs-2.2.3.250527-py3-none-any.whl", hash = "sha256:cd0a49a95b8c5f944e605be711042a4dd8550e2c559b43d70ba2c4b524b66163", size = 159683, upload-time = "2025-05-27T15:24:28.4Z" }, + { url = "https://files.pythonhosted.org/packages/64/20/69f2a39792a653fd64d916cd563ed79ec6e5dcfa6408c4674021d810afcf/pandas_stubs-2.3.3.251219-py3-none-any.whl", hash = "sha256:ccc6337febb51d6d8a08e4c96b479478a0da0ef704b5e08bd212423fe1cb549c", size = 163667, upload-time = "2025-12-19T15:49:52.072Z" }, ] [[package]] -name = "pika-stubs" -version = "0.1.3" +name = "param" +version = "2.3.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c6/7a/0ce91b1507e1a88e104bddd2b64d47cc80a9eda53b7e74bb5a6038c926ae/pika-stubs-0.1.3.tar.gz", hash = "sha256:aaa78fa9f52eb3591b6073fbbe2607567405d1857be268d447bea252e22dd6cf", size = 15155, upload-time = "2020-06-10T02:19:59.923Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c2/bb/ffd1606c28a957fb6444ed3edefe41373cdd7b3e001630b07e3a53a6bea3/param-2.3.1.tar.gz", hash = "sha256:84e59fc3a9bfb0e4c8100eb92d5be529deea3ec9c1f0881a0068c5caf31f21f3", size = 201772, upload-time = "2025-11-25T15:35:54.842Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/da/9d476e9aadfa854719f3cb917e3f7a170a657a182d8d1d6e546594a4872b/param-2.3.1-py3-none-any.whl", hash = "sha256:886b19031438719bbecfd15044dcdd9ed3cb9edb199191294f75600c7081d163", size = 139818, upload-time = "2025-11-25T15:35:53.556Z" }, +] + +[[package]] +name = "perf-analyzer" +version = "2.59.1" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/01/0b/89940fa2c8415c3637a24b8f22ab4cef1ea28872acab31d17eb98d7ddc21/perf_analyzer-2.59.1-py3-none-manylinux_2_38_aarch64.whl", hash = "sha256:493ac232e55fa4467aeb007aafe48e0ace67198b5012d4a43489c5d5d543fed4", size = 6766351, upload-time = "2025-08-13T00:39:50.239Z" }, + { url = "https://files.pythonhosted.org/packages/e7/5c/a178c441c82f558c8ffd9a621738b7b63040bca4d392e455a49e1c20e5ac/perf_analyzer-2.59.1-py3-none-manylinux_2_38_x86_64.whl", hash = "sha256:1719ad97306f442eed16a8abe7930ab81cd84c61658b8bbe864bee6520c7a656", size = 7176325, upload-time = "2025-08-13T00:33:15.998Z" }, +] + +[[package]] +name = "pika" +version = "1.3.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/db/db/d4102f356af18f316c67f2cead8ece307f731dd63140e2c71f170ddacf9b/pika-1.3.2.tar.gz", hash = "sha256:b2a327ddddf8570b4965b3576ac77091b850262d34ce8c1d8cb4e4146aa4145f", size = 145029, upload-time = "2023-05-05T14:25:43.368Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/f3/f412836ec714d36f0f4ab581b84c491e3f42c6b5b97a6c6ed1817f3c16d0/pika-1.3.2-py3-none-any.whl", hash = "sha256:0779a7c1fafd805672796085560d290213a465e4f6f76a6fb19e378d8041a14f", size = 155415, upload-time = "2023-05-05T14:25:41.484Z" }, +] [[package]] name = "pillow" -version = "11.2.1" +version = "12.0.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/af/cb/bb5c01fcd2a69335b86c22142b2bccfc3464087efb7fd382eee5ffc7fdf7/pillow-11.2.1.tar.gz", hash = "sha256:a64dd61998416367b7ef979b73d3a85853ba9bec4c2925f74e588879a58716b6", size = 47026707, upload-time = "2025-04-12T17:50:03.289Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/cace85a1b0c9775a9f8f5d5423c8261c858760e2466c79b2dd184638b056/pillow-12.0.0.tar.gz", hash = "sha256:87d4f8125c9988bfbed67af47dd7a953e2fc7b0cc1e7800ec6d2080d490bb353", size = 47008828, upload-time = "2025-10-15T18:24:14.008Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c7/40/052610b15a1b8961f52537cc8326ca6a881408bc2bdad0d852edeb6ed33b/pillow-11.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:78afba22027b4accef10dbd5eed84425930ba41b3ea0a86fa8d20baaf19d807f", size = 3190185, upload-time = "2025-04-12T17:48:00.417Z" }, - { url = "https://files.pythonhosted.org/packages/e5/7e/b86dbd35a5f938632093dc40d1682874c33dcfe832558fc80ca56bfcb774/pillow-11.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78092232a4ab376a35d68c4e6d5e00dfd73454bd12b230420025fbe178ee3b0b", size = 3030306, upload-time = "2025-04-12T17:48:02.391Z" }, - { url = "https://files.pythonhosted.org/packages/a4/5c/467a161f9ed53e5eab51a42923c33051bf8d1a2af4626ac04f5166e58e0c/pillow-11.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25a5f306095c6780c52e6bbb6109624b95c5b18e40aab1c3041da3e9e0cd3e2d", size = 4416121, upload-time = "2025-04-12T17:48:04.554Z" }, - { url = "https://files.pythonhosted.org/packages/62/73/972b7742e38ae0e2ac76ab137ca6005dcf877480da0d9d61d93b613065b4/pillow-11.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c7b29dbd4281923a2bfe562acb734cee96bbb129e96e6972d315ed9f232bef4", size = 4501707, upload-time = "2025-04-12T17:48:06.831Z" }, - { url = "https://files.pythonhosted.org/packages/e4/3a/427e4cb0b9e177efbc1a84798ed20498c4f233abde003c06d2650a6d60cb/pillow-11.2.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3e645b020f3209a0181a418bffe7b4a93171eef6c4ef6cc20980b30bebf17b7d", size = 4522921, upload-time = "2025-04-12T17:48:09.229Z" }, - { url = "https://files.pythonhosted.org/packages/fe/7c/d8b1330458e4d2f3f45d9508796d7caf0c0d3764c00c823d10f6f1a3b76d/pillow-11.2.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b2dbea1012ccb784a65349f57bbc93730b96e85b42e9bf7b01ef40443db720b4", size = 4612523, upload-time = "2025-04-12T17:48:11.631Z" }, - { url = "https://files.pythonhosted.org/packages/b3/2f/65738384e0b1acf451de5a573d8153fe84103772d139e1e0bdf1596be2ea/pillow-11.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:da3104c57bbd72948d75f6a9389e6727d2ab6333c3617f0a89d72d4940aa0443", size = 4587836, upload-time = "2025-04-12T17:48:13.592Z" }, - { url = "https://files.pythonhosted.org/packages/6a/c5/e795c9f2ddf3debb2dedd0df889f2fe4b053308bb59a3cc02a0cd144d641/pillow-11.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:598174aef4589af795f66f9caab87ba4ff860ce08cd5bb447c6fc553ffee603c", size = 4669390, upload-time = "2025-04-12T17:48:15.938Z" }, - { url = "https://files.pythonhosted.org/packages/96/ae/ca0099a3995976a9fce2f423166f7bff9b12244afdc7520f6ed38911539a/pillow-11.2.1-cp312-cp312-win32.whl", hash = "sha256:1d535df14716e7f8776b9e7fee118576d65572b4aad3ed639be9e4fa88a1cad3", size = 2332309, upload-time = "2025-04-12T17:48:17.885Z" }, - { url = "https://files.pythonhosted.org/packages/7c/18/24bff2ad716257fc03da964c5e8f05d9790a779a8895d6566e493ccf0189/pillow-11.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:14e33b28bf17c7a38eede290f77db7c664e4eb01f7869e37fa98a5aa95978941", size = 2676768, upload-time = "2025-04-12T17:48:19.655Z" }, - { url = "https://files.pythonhosted.org/packages/da/bb/e8d656c9543276517ee40184aaa39dcb41e683bca121022f9323ae11b39d/pillow-11.2.1-cp312-cp312-win_arm64.whl", hash = "sha256:21e1470ac9e5739ff880c211fc3af01e3ae505859392bf65458c224d0bf283eb", size = 2415087, upload-time = "2025-04-12T17:48:21.991Z" }, - { url = "https://files.pythonhosted.org/packages/36/9c/447528ee3776e7ab8897fe33697a7ff3f0475bb490c5ac1456a03dc57956/pillow-11.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fdec757fea0b793056419bca3e9932eb2b0ceec90ef4813ea4c1e072c389eb28", size = 3190098, upload-time = "2025-04-12T17:48:23.915Z" }, - { url = "https://files.pythonhosted.org/packages/b5/09/29d5cd052f7566a63e5b506fac9c60526e9ecc553825551333e1e18a4858/pillow-11.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b0e130705d568e2f43a17bcbe74d90958e8a16263868a12c3e0d9c8162690830", size = 3030166, upload-time = "2025-04-12T17:48:25.738Z" }, - { url = "https://files.pythonhosted.org/packages/71/5d/446ee132ad35e7600652133f9c2840b4799bbd8e4adba881284860da0a36/pillow-11.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bdb5e09068332578214cadd9c05e3d64d99e0e87591be22a324bdbc18925be0", size = 4408674, upload-time = "2025-04-12T17:48:27.908Z" }, - { url = "https://files.pythonhosted.org/packages/69/5f/cbe509c0ddf91cc3a03bbacf40e5c2339c4912d16458fcb797bb47bcb269/pillow-11.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d189ba1bebfbc0c0e529159631ec72bb9e9bc041f01ec6d3233d6d82eb823bc1", size = 4496005, upload-time = "2025-04-12T17:48:29.888Z" }, - { url = "https://files.pythonhosted.org/packages/f9/b3/dd4338d8fb8a5f312021f2977fb8198a1184893f9b00b02b75d565c33b51/pillow-11.2.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:191955c55d8a712fab8934a42bfefbf99dd0b5875078240943f913bb66d46d9f", size = 4518707, upload-time = "2025-04-12T17:48:31.874Z" }, - { url = "https://files.pythonhosted.org/packages/13/eb/2552ecebc0b887f539111c2cd241f538b8ff5891b8903dfe672e997529be/pillow-11.2.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:ad275964d52e2243430472fc5d2c2334b4fc3ff9c16cb0a19254e25efa03a155", size = 4610008, upload-time = "2025-04-12T17:48:34.422Z" }, - { url = "https://files.pythonhosted.org/packages/72/d1/924ce51bea494cb6e7959522d69d7b1c7e74f6821d84c63c3dc430cbbf3b/pillow-11.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:750f96efe0597382660d8b53e90dd1dd44568a8edb51cb7f9d5d918b80d4de14", size = 4585420, upload-time = "2025-04-12T17:48:37.641Z" }, - { url = "https://files.pythonhosted.org/packages/43/ab/8f81312d255d713b99ca37479a4cb4b0f48195e530cdc1611990eb8fd04b/pillow-11.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fe15238d3798788d00716637b3d4e7bb6bde18b26e5d08335a96e88564a36b6b", size = 4667655, upload-time = "2025-04-12T17:48:39.652Z" }, - { url = "https://files.pythonhosted.org/packages/94/86/8f2e9d2dc3d308dfd137a07fe1cc478df0a23d42a6c4093b087e738e4827/pillow-11.2.1-cp313-cp313-win32.whl", hash = "sha256:3fe735ced9a607fee4f481423a9c36701a39719252a9bb251679635f99d0f7d2", size = 2332329, upload-time = "2025-04-12T17:48:41.765Z" }, - { url = "https://files.pythonhosted.org/packages/6d/ec/1179083b8d6067a613e4d595359b5fdea65d0a3b7ad623fee906e1b3c4d2/pillow-11.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:74ee3d7ecb3f3c05459ba95eed5efa28d6092d751ce9bf20e3e253a4e497e691", size = 2676388, upload-time = "2025-04-12T17:48:43.625Z" }, - { url = "https://files.pythonhosted.org/packages/23/f1/2fc1e1e294de897df39fa8622d829b8828ddad938b0eaea256d65b84dd72/pillow-11.2.1-cp313-cp313-win_arm64.whl", hash = "sha256:5119225c622403afb4b44bad4c1ca6c1f98eed79db8d3bc6e4e160fc6339d66c", size = 2414950, upload-time = "2025-04-12T17:48:45.475Z" }, - { url = "https://files.pythonhosted.org/packages/c4/3e/c328c48b3f0ead7bab765a84b4977acb29f101d10e4ef57a5e3400447c03/pillow-11.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8ce2e8411c7aaef53e6bb29fe98f28cd4fbd9a1d9be2eeea434331aac0536b22", size = 3192759, upload-time = "2025-04-12T17:48:47.866Z" }, - { url = "https://files.pythonhosted.org/packages/18/0e/1c68532d833fc8b9f404d3a642991441d9058eccd5606eab31617f29b6d4/pillow-11.2.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9ee66787e095127116d91dea2143db65c7bb1e232f617aa5957c0d9d2a3f23a7", size = 3033284, upload-time = "2025-04-12T17:48:50.189Z" }, - { url = "https://files.pythonhosted.org/packages/b7/cb/6faf3fb1e7705fd2db74e070f3bf6f88693601b0ed8e81049a8266de4754/pillow-11.2.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9622e3b6c1d8b551b6e6f21873bdcc55762b4b2126633014cea1803368a9aa16", size = 4445826, upload-time = "2025-04-12T17:48:52.346Z" }, - { url = "https://files.pythonhosted.org/packages/07/94/8be03d50b70ca47fb434a358919d6a8d6580f282bbb7af7e4aa40103461d/pillow-11.2.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63b5dff3a68f371ea06025a1a6966c9a1e1ee452fc8020c2cd0ea41b83e9037b", size = 4527329, upload-time = "2025-04-12T17:48:54.403Z" }, - { url = "https://files.pythonhosted.org/packages/fd/a4/bfe78777076dc405e3bd2080bc32da5ab3945b5a25dc5d8acaa9de64a162/pillow-11.2.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:31df6e2d3d8fc99f993fd253e97fae451a8db2e7207acf97859732273e108406", size = 4549049, upload-time = "2025-04-12T17:48:56.383Z" }, - { url = "https://files.pythonhosted.org/packages/65/4d/eaf9068dc687c24979e977ce5677e253624bd8b616b286f543f0c1b91662/pillow-11.2.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:062b7a42d672c45a70fa1f8b43d1d38ff76b63421cbbe7f88146b39e8a558d91", size = 4635408, upload-time = "2025-04-12T17:48:58.782Z" }, - { url = "https://files.pythonhosted.org/packages/1d/26/0fd443365d9c63bc79feb219f97d935cd4b93af28353cba78d8e77b61719/pillow-11.2.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4eb92eca2711ef8be42fd3f67533765d9fd043b8c80db204f16c8ea62ee1a751", size = 4614863, upload-time = "2025-04-12T17:49:00.709Z" }, - { url = "https://files.pythonhosted.org/packages/49/65/dca4d2506be482c2c6641cacdba5c602bc76d8ceb618fd37de855653a419/pillow-11.2.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f91ebf30830a48c825590aede79376cb40f110b387c17ee9bd59932c961044f9", size = 4692938, upload-time = "2025-04-12T17:49:02.946Z" }, - { url = "https://files.pythonhosted.org/packages/b3/92/1ca0c3f09233bd7decf8f7105a1c4e3162fb9142128c74adad0fb361b7eb/pillow-11.2.1-cp313-cp313t-win32.whl", hash = "sha256:e0b55f27f584ed623221cfe995c912c61606be8513bfa0e07d2c674b4516d9dd", size = 2335774, upload-time = "2025-04-12T17:49:04.889Z" }, - { url = "https://files.pythonhosted.org/packages/a5/ac/77525347cb43b83ae905ffe257bbe2cc6fd23acb9796639a1f56aa59d191/pillow-11.2.1-cp313-cp313t-win_amd64.whl", hash = "sha256:36d6b82164c39ce5482f649b437382c0fb2395eabc1e2b1702a6deb8ad647d6e", size = 2681895, upload-time = "2025-04-12T17:49:06.635Z" }, - { url = "https://files.pythonhosted.org/packages/67/32/32dc030cfa91ca0fc52baebbba2e009bb001122a1daa8b6a79ad830b38d3/pillow-11.2.1-cp313-cp313t-win_arm64.whl", hash = "sha256:225c832a13326e34f212d2072982bb1adb210e0cc0b153e688743018c94a2681", size = 2417234, upload-time = "2025-04-12T17:49:08.399Z" }, + { url = "https://files.pythonhosted.org/packages/2c/90/4fcce2c22caf044e660a198d740e7fbc14395619e3cb1abad12192c0826c/pillow-12.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:53561a4ddc36facb432fae7a9d8afbfaf94795414f5cdc5fc52f28c1dca90371", size = 5249377, upload-time = "2025-10-15T18:22:05.993Z" }, + { url = "https://files.pythonhosted.org/packages/fd/e0/ed960067543d080691d47d6938ebccbf3976a931c9567ab2fbfab983a5dd/pillow-12.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:71db6b4c1653045dacc1585c1b0d184004f0d7e694c7b34ac165ca70c0838082", size = 4650343, upload-time = "2025-10-15T18:22:07.718Z" }, + { url = "https://files.pythonhosted.org/packages/e7/a1/f81fdeddcb99c044bf7d6faa47e12850f13cee0849537a7d27eeab5534d4/pillow-12.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2fa5f0b6716fc88f11380b88b31fe591a06c6315e955c096c35715788b339e3f", size = 6232981, upload-time = "2025-10-15T18:22:09.287Z" }, + { url = "https://files.pythonhosted.org/packages/88/e1/9098d3ce341a8750b55b0e00c03f1630d6178f38ac191c81c97a3b047b44/pillow-12.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:82240051c6ca513c616f7f9da06e871f61bfd7805f566275841af15015b8f98d", size = 8041399, upload-time = "2025-10-15T18:22:10.872Z" }, + { url = "https://files.pythonhosted.org/packages/a7/62/a22e8d3b602ae8cc01446d0c57a54e982737f44b6f2e1e019a925143771d/pillow-12.0.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:55f818bd74fe2f11d4d7cbc65880a843c4075e0ac7226bc1a23261dbea531953", size = 6347740, upload-time = "2025-10-15T18:22:12.769Z" }, + { url = "https://files.pythonhosted.org/packages/4f/87/424511bdcd02c8d7acf9f65caa09f291a519b16bd83c3fb3374b3d4ae951/pillow-12.0.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b87843e225e74576437fd5b6a4c2205d422754f84a06942cfaf1dc32243e45a8", size = 7040201, upload-time = "2025-10-15T18:22:14.813Z" }, + { url = "https://files.pythonhosted.org/packages/dc/4d/435c8ac688c54d11755aedfdd9f29c9eeddf68d150fe42d1d3dbd2365149/pillow-12.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c607c90ba67533e1b2355b821fef6764d1dd2cbe26b8c1005ae84f7aea25ff79", size = 6462334, upload-time = "2025-10-15T18:22:16.375Z" }, + { url = "https://files.pythonhosted.org/packages/2b/f2/ad34167a8059a59b8ad10bc5c72d4d9b35acc6b7c0877af8ac885b5f2044/pillow-12.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:21f241bdd5080a15bc86d3466a9f6074a9c2c2b314100dd896ac81ee6db2f1ba", size = 7134162, upload-time = "2025-10-15T18:22:17.996Z" }, + { url = "https://files.pythonhosted.org/packages/0c/b1/a7391df6adacf0a5c2cf6ac1cf1fcc1369e7d439d28f637a847f8803beb3/pillow-12.0.0-cp312-cp312-win32.whl", hash = "sha256:dd333073e0cacdc3089525c7df7d39b211bcdf31fc2824e49d01c6b6187b07d0", size = 6298769, upload-time = "2025-10-15T18:22:19.923Z" }, + { url = "https://files.pythonhosted.org/packages/a2/0b/d87733741526541c909bbf159e338dcace4f982daac6e5a8d6be225ca32d/pillow-12.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9fe611163f6303d1619bbcb653540a4d60f9e55e622d60a3108be0d5b441017a", size = 7001107, upload-time = "2025-10-15T18:22:21.644Z" }, + { url = "https://files.pythonhosted.org/packages/bc/96/aaa61ce33cc98421fb6088af2a03be4157b1e7e0e87087c888e2370a7f45/pillow-12.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:7dfb439562f234f7d57b1ac6bc8fe7f838a4bd49c79230e0f6a1da93e82f1fad", size = 2436012, upload-time = "2025-10-15T18:22:23.621Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" }, +] + +[[package]] +name = "posthog" +version = "5.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "backoff" }, + { name = "distro" }, + { name = "python-dateutil" }, + { name = "requests" }, + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/48/20/60ae67bb9d82f00427946218d49e2e7e80fb41c15dc5019482289ec9ce8d/posthog-5.4.0.tar.gz", hash = "sha256:701669261b8d07cdde0276e5bc096b87f9e200e3b9589c5ebff14df658c5893c", size = 88076, upload-time = "2025-06-20T23:19:23.485Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4f/98/e480cab9a08d1c09b1c59a93dade92c1bb7544826684ff2acbfd10fcfbd4/posthog-5.4.0-py3-none-any.whl", hash = "sha256:284dfa302f64353484420b52d4ad81ff5c2c2d1d607c4e2db602ac72761831bd", size = 105364, upload-time = "2025-06-20T23:19:22.001Z" }, ] [[package]] @@ -756,71 +2308,38 @@ wheels = [ [[package]] name = "prompt-toolkit" -version = "3.0.51" +version = "3.0.52" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "wcwidth" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/bb/6e/9d084c929dfe9e3bfe0c6a47e31f78a25c54627d64a66e884a8bf5474f1c/prompt_toolkit-3.0.51.tar.gz", hash = "sha256:931a162e3b27fc90c86f1b48bb1fb2c528c2761475e57c9c06de13311c7b54ed", size = 428940, upload-time = "2025-04-15T09:18:47.731Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/96/06e01a7b38dce6fe1db213e061a4602dd6032a8a97ef6c1a862537732421/prompt_toolkit-3.0.52.tar.gz", hash = "sha256:28cde192929c8e7321de85de1ddbe736f1375148b02f2e17edd840042b1be855", size = 434198, upload-time = "2025-08-27T15:24:02.057Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ce/4f/5249960887b1fbe561d9ff265496d170b55a735b76724f10ef19f9e40716/prompt_toolkit-3.0.51-py3-none-any.whl", hash = "sha256:52742911fde84e2d423e2f9a4cf1de7d7ac4e51958f648d9540e0fb8db077b07", size = 387810, upload-time = "2025-04-15T09:18:44.753Z" }, + { url = "https://files.pythonhosted.org/packages/84/03/0d3ce49e2505ae70cf43bc5bb3033955d2fc9f932163e84dc0779cc47f48/prompt_toolkit-3.0.52-py3-none-any.whl", hash = "sha256:9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955", size = 391431, upload-time = "2025-08-27T15:23:59.498Z" }, ] [[package]] name = "propcache" -version = "0.3.2" +version = "0.4.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a6/16/43264e4a779dd8588c21a70f0709665ee8f611211bdd2c87d952cfa7c776/propcache-0.3.2.tar.gz", hash = "sha256:20d7d62e4e7ef05f221e0db2856b979540686342e7dd9973b815599c7057e168", size = 44139, upload-time = "2025-06-09T22:56:06.081Z" } +sdist = { url = "https://files.pythonhosted.org/packages/9e/da/e9fc233cf63743258bff22b3dfa7ea5baef7b5bc324af47a0ad89b8ffc6f/propcache-0.4.1.tar.gz", hash = "sha256:f48107a8c637e80362555f37ecf49abe20370e557cc4ab374f04ec4423c97c3d", size = 46442, upload-time = "2025-10-08T19:49:02.291Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a8/42/9ca01b0a6f48e81615dca4765a8f1dd2c057e0540f6116a27dc5ee01dfb6/propcache-0.3.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8de106b6c84506b31c27168582cd3cb3000a6412c16df14a8628e5871ff83c10", size = 73674, upload-time = "2025-06-09T22:54:30.551Z" }, - { url = "https://files.pythonhosted.org/packages/af/6e/21293133beb550f9c901bbece755d582bfaf2176bee4774000bd4dd41884/propcache-0.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:28710b0d3975117239c76600ea351934ac7b5ff56e60953474342608dbbb6154", size = 43570, upload-time = "2025-06-09T22:54:32.296Z" }, - { url = "https://files.pythonhosted.org/packages/0c/c8/0393a0a3a2b8760eb3bde3c147f62b20044f0ddac81e9d6ed7318ec0d852/propcache-0.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce26862344bdf836650ed2487c3d724b00fbfec4233a1013f597b78c1cb73615", size = 43094, upload-time = "2025-06-09T22:54:33.929Z" }, - { url = "https://files.pythonhosted.org/packages/37/2c/489afe311a690399d04a3e03b069225670c1d489eb7b044a566511c1c498/propcache-0.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bca54bd347a253af2cf4544bbec232ab982f4868de0dd684246b67a51bc6b1db", size = 226958, upload-time = "2025-06-09T22:54:35.186Z" }, - { url = "https://files.pythonhosted.org/packages/9d/ca/63b520d2f3d418c968bf596839ae26cf7f87bead026b6192d4da6a08c467/propcache-0.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55780d5e9a2ddc59711d727226bb1ba83a22dd32f64ee15594b9392b1f544eb1", size = 234894, upload-time = "2025-06-09T22:54:36.708Z" }, - { url = "https://files.pythonhosted.org/packages/11/60/1d0ed6fff455a028d678df30cc28dcee7af77fa2b0e6962ce1df95c9a2a9/propcache-0.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:035e631be25d6975ed87ab23153db6a73426a48db688070d925aa27e996fe93c", size = 233672, upload-time = "2025-06-09T22:54:38.062Z" }, - { url = "https://files.pythonhosted.org/packages/37/7c/54fd5301ef38505ab235d98827207176a5c9b2aa61939b10a460ca53e123/propcache-0.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee6f22b6eaa39297c751d0e80c0d3a454f112f5c6481214fcf4c092074cecd67", size = 224395, upload-time = "2025-06-09T22:54:39.634Z" }, - { url = "https://files.pythonhosted.org/packages/ee/1a/89a40e0846f5de05fdc6779883bf46ba980e6df4d2ff8fb02643de126592/propcache-0.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ca3aee1aa955438c4dba34fc20a9f390e4c79967257d830f137bd5a8a32ed3b", size = 212510, upload-time = "2025-06-09T22:54:41.565Z" }, - { url = "https://files.pythonhosted.org/packages/5e/33/ca98368586c9566a6b8d5ef66e30484f8da84c0aac3f2d9aec6d31a11bd5/propcache-0.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7a4f30862869fa2b68380d677cc1c5fcf1e0f2b9ea0cf665812895c75d0ca3b8", size = 222949, upload-time = "2025-06-09T22:54:43.038Z" }, - { url = "https://files.pythonhosted.org/packages/ba/11/ace870d0aafe443b33b2f0b7efdb872b7c3abd505bfb4890716ad7865e9d/propcache-0.3.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:b77ec3c257d7816d9f3700013639db7491a434644c906a2578a11daf13176251", size = 217258, upload-time = "2025-06-09T22:54:44.376Z" }, - { url = "https://files.pythonhosted.org/packages/5b/d2/86fd6f7adffcfc74b42c10a6b7db721d1d9ca1055c45d39a1a8f2a740a21/propcache-0.3.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:cab90ac9d3f14b2d5050928483d3d3b8fb6b4018893fc75710e6aa361ecb2474", size = 213036, upload-time = "2025-06-09T22:54:46.243Z" }, - { url = "https://files.pythonhosted.org/packages/07/94/2d7d1e328f45ff34a0a284cf5a2847013701e24c2a53117e7c280a4316b3/propcache-0.3.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0b504d29f3c47cf6b9e936c1852246c83d450e8e063d50562115a6be6d3a2535", size = 227684, upload-time = "2025-06-09T22:54:47.63Z" }, - { url = "https://files.pythonhosted.org/packages/b7/05/37ae63a0087677e90b1d14710e532ff104d44bc1efa3b3970fff99b891dc/propcache-0.3.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:ce2ac2675a6aa41ddb2a0c9cbff53780a617ac3d43e620f8fd77ba1c84dcfc06", size = 234562, upload-time = "2025-06-09T22:54:48.982Z" }, - { url = "https://files.pythonhosted.org/packages/a4/7c/3f539fcae630408d0bd8bf3208b9a647ccad10976eda62402a80adf8fc34/propcache-0.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:62b4239611205294cc433845b914131b2a1f03500ff3c1ed093ed216b82621e1", size = 222142, upload-time = "2025-06-09T22:54:50.424Z" }, - { url = "https://files.pythonhosted.org/packages/7c/d2/34b9eac8c35f79f8a962546b3e97e9d4b990c420ee66ac8255d5d9611648/propcache-0.3.2-cp312-cp312-win32.whl", hash = "sha256:df4a81b9b53449ebc90cc4deefb052c1dd934ba85012aa912c7ea7b7e38b60c1", size = 37711, upload-time = "2025-06-09T22:54:52.072Z" }, - { url = "https://files.pythonhosted.org/packages/19/61/d582be5d226cf79071681d1b46b848d6cb03d7b70af7063e33a2787eaa03/propcache-0.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:7046e79b989d7fe457bb755844019e10f693752d169076138abf17f31380800c", size = 41479, upload-time = "2025-06-09T22:54:53.234Z" }, - { url = "https://files.pythonhosted.org/packages/dc/d1/8c747fafa558c603c4ca19d8e20b288aa0c7cda74e9402f50f31eb65267e/propcache-0.3.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ca592ed634a73ca002967458187109265e980422116c0a107cf93d81f95af945", size = 71286, upload-time = "2025-06-09T22:54:54.369Z" }, - { url = "https://files.pythonhosted.org/packages/61/99/d606cb7986b60d89c36de8a85d58764323b3a5ff07770a99d8e993b3fa73/propcache-0.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9ecb0aad4020e275652ba3975740f241bd12a61f1a784df044cf7477a02bc252", size = 42425, upload-time = "2025-06-09T22:54:55.642Z" }, - { url = "https://files.pythonhosted.org/packages/8c/96/ef98f91bbb42b79e9bb82bdd348b255eb9d65f14dbbe3b1594644c4073f7/propcache-0.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7f08f1cc28bd2eade7a8a3d2954ccc673bb02062e3e7da09bc75d843386b342f", size = 41846, upload-time = "2025-06-09T22:54:57.246Z" }, - { url = "https://files.pythonhosted.org/packages/5b/ad/3f0f9a705fb630d175146cd7b1d2bf5555c9beaed54e94132b21aac098a6/propcache-0.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1a342c834734edb4be5ecb1e9fb48cb64b1e2320fccbd8c54bf8da8f2a84c33", size = 208871, upload-time = "2025-06-09T22:54:58.975Z" }, - { url = "https://files.pythonhosted.org/packages/3a/38/2085cda93d2c8b6ec3e92af2c89489a36a5886b712a34ab25de9fbca7992/propcache-0.3.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a544caaae1ac73f1fecfae70ded3e93728831affebd017d53449e3ac052ac1e", size = 215720, upload-time = "2025-06-09T22:55:00.471Z" }, - { url = "https://files.pythonhosted.org/packages/61/c1/d72ea2dc83ac7f2c8e182786ab0fc2c7bd123a1ff9b7975bee671866fe5f/propcache-0.3.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310d11aa44635298397db47a3ebce7db99a4cc4b9bbdfcf6c98a60c8d5261cf1", size = 215203, upload-time = "2025-06-09T22:55:01.834Z" }, - { url = "https://files.pythonhosted.org/packages/af/81/b324c44ae60c56ef12007105f1460d5c304b0626ab0cc6b07c8f2a9aa0b8/propcache-0.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c1396592321ac83157ac03a2023aa6cc4a3cc3cfdecb71090054c09e5a7cce3", size = 206365, upload-time = "2025-06-09T22:55:03.199Z" }, - { url = "https://files.pythonhosted.org/packages/09/73/88549128bb89e66d2aff242488f62869014ae092db63ccea53c1cc75a81d/propcache-0.3.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cabf5b5902272565e78197edb682017d21cf3b550ba0460ee473753f28d23c1", size = 196016, upload-time = "2025-06-09T22:55:04.518Z" }, - { url = "https://files.pythonhosted.org/packages/b9/3f/3bdd14e737d145114a5eb83cb172903afba7242f67c5877f9909a20d948d/propcache-0.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0a2f2235ac46a7aa25bdeb03a9e7060f6ecbd213b1f9101c43b3090ffb971ef6", size = 205596, upload-time = "2025-06-09T22:55:05.942Z" }, - { url = "https://files.pythonhosted.org/packages/0f/ca/2f4aa819c357d3107c3763d7ef42c03980f9ed5c48c82e01e25945d437c1/propcache-0.3.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:92b69e12e34869a6970fd2f3da91669899994b47c98f5d430b781c26f1d9f387", size = 200977, upload-time = "2025-06-09T22:55:07.792Z" }, - { url = "https://files.pythonhosted.org/packages/cd/4a/e65276c7477533c59085251ae88505caf6831c0e85ff8b2e31ebcbb949b1/propcache-0.3.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:54e02207c79968ebbdffc169591009f4474dde3b4679e16634d34c9363ff56b4", size = 197220, upload-time = "2025-06-09T22:55:09.173Z" }, - { url = "https://files.pythonhosted.org/packages/7c/54/fc7152e517cf5578278b242396ce4d4b36795423988ef39bb8cd5bf274c8/propcache-0.3.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4adfb44cb588001f68c5466579d3f1157ca07f7504fc91ec87862e2b8e556b88", size = 210642, upload-time = "2025-06-09T22:55:10.62Z" }, - { url = "https://files.pythonhosted.org/packages/b9/80/abeb4a896d2767bf5f1ea7b92eb7be6a5330645bd7fb844049c0e4045d9d/propcache-0.3.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fd3e6019dc1261cd0291ee8919dd91fbab7b169bb76aeef6c716833a3f65d206", size = 212789, upload-time = "2025-06-09T22:55:12.029Z" }, - { url = "https://files.pythonhosted.org/packages/b3/db/ea12a49aa7b2b6d68a5da8293dcf50068d48d088100ac016ad92a6a780e6/propcache-0.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4c181cad81158d71c41a2bce88edce078458e2dd5ffee7eddd6b05da85079f43", size = 205880, upload-time = "2025-06-09T22:55:13.45Z" }, - { url = "https://files.pythonhosted.org/packages/d1/e5/9076a0bbbfb65d1198007059c65639dfd56266cf8e477a9707e4b1999ff4/propcache-0.3.2-cp313-cp313-win32.whl", hash = "sha256:8a08154613f2249519e549de2330cf8e2071c2887309a7b07fb56098f5170a02", size = 37220, upload-time = "2025-06-09T22:55:15.284Z" }, - { url = "https://files.pythonhosted.org/packages/d3/f5/b369e026b09a26cd77aa88d8fffd69141d2ae00a2abaaf5380d2603f4b7f/propcache-0.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:e41671f1594fc4ab0a6dec1351864713cb3a279910ae8b58f884a88a0a632c05", size = 40678, upload-time = "2025-06-09T22:55:16.445Z" }, - { url = "https://files.pythonhosted.org/packages/a4/3a/6ece377b55544941a08d03581c7bc400a3c8cd3c2865900a68d5de79e21f/propcache-0.3.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:9a3cf035bbaf035f109987d9d55dc90e4b0e36e04bbbb95af3055ef17194057b", size = 76560, upload-time = "2025-06-09T22:55:17.598Z" }, - { url = "https://files.pythonhosted.org/packages/0c/da/64a2bb16418740fa634b0e9c3d29edff1db07f56d3546ca2d86ddf0305e1/propcache-0.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:156c03d07dc1323d8dacaa221fbe028c5c70d16709cdd63502778e6c3ccca1b0", size = 44676, upload-time = "2025-06-09T22:55:18.922Z" }, - { url = "https://files.pythonhosted.org/packages/36/7b/f025e06ea51cb72c52fb87e9b395cced02786610b60a3ed51da8af017170/propcache-0.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74413c0ba02ba86f55cf60d18daab219f7e531620c15f1e23d95563f505efe7e", size = 44701, upload-time = "2025-06-09T22:55:20.106Z" }, - { url = "https://files.pythonhosted.org/packages/a4/00/faa1b1b7c3b74fc277f8642f32a4c72ba1d7b2de36d7cdfb676db7f4303e/propcache-0.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f066b437bb3fa39c58ff97ab2ca351db465157d68ed0440abecb21715eb24b28", size = 276934, upload-time = "2025-06-09T22:55:21.5Z" }, - { url = "https://files.pythonhosted.org/packages/74/ab/935beb6f1756e0476a4d5938ff44bf0d13a055fed880caf93859b4f1baf4/propcache-0.3.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1304b085c83067914721e7e9d9917d41ad87696bf70f0bc7dee450e9c71ad0a", size = 278316, upload-time = "2025-06-09T22:55:22.918Z" }, - { url = "https://files.pythonhosted.org/packages/f8/9d/994a5c1ce4389610838d1caec74bdf0e98b306c70314d46dbe4fcf21a3e2/propcache-0.3.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab50cef01b372763a13333b4e54021bdcb291fc9a8e2ccb9c2df98be51bcde6c", size = 282619, upload-time = "2025-06-09T22:55:24.651Z" }, - { url = "https://files.pythonhosted.org/packages/2b/00/a10afce3d1ed0287cef2e09506d3be9822513f2c1e96457ee369adb9a6cd/propcache-0.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fad3b2a085ec259ad2c2842666b2a0a49dea8463579c606426128925af1ed725", size = 265896, upload-time = "2025-06-09T22:55:26.049Z" }, - { url = "https://files.pythonhosted.org/packages/2e/a8/2aa6716ffa566ca57c749edb909ad27884680887d68517e4be41b02299f3/propcache-0.3.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:261fa020c1c14deafd54c76b014956e2f86991af198c51139faf41c4d5e83892", size = 252111, upload-time = "2025-06-09T22:55:27.381Z" }, - { url = "https://files.pythonhosted.org/packages/36/4f/345ca9183b85ac29c8694b0941f7484bf419c7f0fea2d1e386b4f7893eed/propcache-0.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:46d7f8aa79c927e5f987ee3a80205c987717d3659f035c85cf0c3680526bdb44", size = 268334, upload-time = "2025-06-09T22:55:28.747Z" }, - { url = "https://files.pythonhosted.org/packages/3e/ca/fcd54f78b59e3f97b3b9715501e3147f5340167733d27db423aa321e7148/propcache-0.3.2-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:6d8f3f0eebf73e3c0ff0e7853f68be638b4043c65a70517bb575eff54edd8dbe", size = 255026, upload-time = "2025-06-09T22:55:30.184Z" }, - { url = "https://files.pythonhosted.org/packages/8b/95/8e6a6bbbd78ac89c30c225210a5c687790e532ba4088afb8c0445b77ef37/propcache-0.3.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:03c89c1b14a5452cf15403e291c0ccd7751d5b9736ecb2c5bab977ad6c5bcd81", size = 250724, upload-time = "2025-06-09T22:55:31.646Z" }, - { url = "https://files.pythonhosted.org/packages/ee/b0/0dd03616142baba28e8b2d14ce5df6631b4673850a3d4f9c0f9dd714a404/propcache-0.3.2-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:0cc17efde71e12bbaad086d679ce575268d70bc123a5a71ea7ad76f70ba30bba", size = 268868, upload-time = "2025-06-09T22:55:33.209Z" }, - { url = "https://files.pythonhosted.org/packages/c5/98/2c12407a7e4fbacd94ddd32f3b1e3d5231e77c30ef7162b12a60e2dd5ce3/propcache-0.3.2-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:acdf05d00696bc0447e278bb53cb04ca72354e562cf88ea6f9107df8e7fd9770", size = 271322, upload-time = "2025-06-09T22:55:35.065Z" }, - { url = "https://files.pythonhosted.org/packages/35/91/9cb56efbb428b006bb85db28591e40b7736847b8331d43fe335acf95f6c8/propcache-0.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4445542398bd0b5d32df908031cb1b30d43ac848e20470a878b770ec2dcc6330", size = 265778, upload-time = "2025-06-09T22:55:36.45Z" }, - { url = "https://files.pythonhosted.org/packages/9a/4c/b0fe775a2bdd01e176b14b574be679d84fc83958335790f7c9a686c1f468/propcache-0.3.2-cp313-cp313t-win32.whl", hash = "sha256:f86e5d7cd03afb3a1db8e9f9f6eff15794e79e791350ac48a8c924e6f439f394", size = 41175, upload-time = "2025-06-09T22:55:38.436Z" }, - { url = "https://files.pythonhosted.org/packages/a4/ff/47f08595e3d9b5e149c150f88d9714574f1a7cbd89fe2817158a952674bf/propcache-0.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:9704bedf6e7cbe3c65eca4379a9b53ee6a83749f047808cbb5044d40d7d72198", size = 44857, upload-time = "2025-06-09T22:55:39.687Z" }, - { url = "https://files.pythonhosted.org/packages/cc/35/cc0aaecf278bb4575b8555f2b137de5ab821595ddae9da9d3cd1da4072c7/propcache-0.3.2-py3-none-any.whl", hash = "sha256:98f1ec44fb675f5052cccc8e609c46ed23a35a1cfd18545ad4e29002d858a43f", size = 12663, upload-time = "2025-06-09T22:56:04.484Z" }, + { url = "https://files.pythonhosted.org/packages/a2/0f/f17b1b2b221d5ca28b4b876e8bb046ac40466513960646bda8e1853cdfa2/propcache-0.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e153e9cd40cc8945138822807139367f256f89c6810c2634a4f6902b52d3b4e2", size = 80061, upload-time = "2025-10-08T19:46:46.075Z" }, + { url = "https://files.pythonhosted.org/packages/76/47/8ccf75935f51448ba9a16a71b783eb7ef6b9ee60f5d14c7f8a8a79fbeed7/propcache-0.4.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cd547953428f7abb73c5ad82cbb32109566204260d98e41e5dfdc682eb7f8403", size = 46037, upload-time = "2025-10-08T19:46:47.23Z" }, + { url = "https://files.pythonhosted.org/packages/0a/b6/5c9a0e42df4d00bfb4a3cbbe5cf9f54260300c88a0e9af1f47ca5ce17ac0/propcache-0.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f048da1b4f243fc44f205dfd320933a951b8d89e0afd4c7cacc762a8b9165207", size = 47324, upload-time = "2025-10-08T19:46:48.384Z" }, + { url = "https://files.pythonhosted.org/packages/9e/d3/6c7ee328b39a81ee877c962469f1e795f9db87f925251efeb0545e0020d0/propcache-0.4.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ec17c65562a827bba85e3872ead335f95405ea1674860d96483a02f5c698fa72", size = 225505, upload-time = "2025-10-08T19:46:50.055Z" }, + { url = "https://files.pythonhosted.org/packages/01/5d/1c53f4563490b1d06a684742cc6076ef944bc6457df6051b7d1a877c057b/propcache-0.4.1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:405aac25c6394ef275dee4c709be43745d36674b223ba4eb7144bf4d691b7367", size = 230242, upload-time = "2025-10-08T19:46:51.815Z" }, + { url = "https://files.pythonhosted.org/packages/20/e1/ce4620633b0e2422207c3cb774a0ee61cac13abc6217763a7b9e2e3f4a12/propcache-0.4.1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0013cb6f8dde4b2a2f66903b8ba740bdfe378c943c4377a200551ceb27f379e4", size = 238474, upload-time = "2025-10-08T19:46:53.208Z" }, + { url = "https://files.pythonhosted.org/packages/46/4b/3aae6835b8e5f44ea6a68348ad90f78134047b503765087be2f9912140ea/propcache-0.4.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:15932ab57837c3368b024473a525e25d316d8353016e7cc0e5ba9eb343fbb1cf", size = 221575, upload-time = "2025-10-08T19:46:54.511Z" }, + { url = "https://files.pythonhosted.org/packages/6e/a5/8a5e8678bcc9d3a1a15b9a29165640d64762d424a16af543f00629c87338/propcache-0.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:031dce78b9dc099f4c29785d9cf5577a3faf9ebf74ecbd3c856a7b92768c3df3", size = 216736, upload-time = "2025-10-08T19:46:56.212Z" }, + { url = "https://files.pythonhosted.org/packages/f1/63/b7b215eddeac83ca1c6b934f89d09a625aa9ee4ba158338854c87210cc36/propcache-0.4.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ab08df6c9a035bee56e31af99be621526bd237bea9f32def431c656b29e41778", size = 213019, upload-time = "2025-10-08T19:46:57.595Z" }, + { url = "https://files.pythonhosted.org/packages/57/74/f580099a58c8af587cac7ba19ee7cb418506342fbbe2d4a4401661cca886/propcache-0.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4d7af63f9f93fe593afbf104c21b3b15868efb2c21d07d8732c0c4287e66b6a6", size = 220376, upload-time = "2025-10-08T19:46:59.067Z" }, + { url = "https://files.pythonhosted.org/packages/c4/ee/542f1313aff7eaf19c2bb758c5d0560d2683dac001a1c96d0774af799843/propcache-0.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cfc27c945f422e8b5071b6e93169679e4eb5bf73bbcbf1ba3ae3a83d2f78ebd9", size = 226988, upload-time = "2025-10-08T19:47:00.544Z" }, + { url = "https://files.pythonhosted.org/packages/8f/18/9c6b015dd9c6930f6ce2229e1f02fb35298b847f2087ea2b436a5bfa7287/propcache-0.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:35c3277624a080cc6ec6f847cbbbb5b49affa3598c4535a0a4682a697aaa5c75", size = 215615, upload-time = "2025-10-08T19:47:01.968Z" }, + { url = "https://files.pythonhosted.org/packages/80/9e/e7b85720b98c45a45e1fca6a177024934dc9bc5f4d5dd04207f216fc33ed/propcache-0.4.1-cp312-cp312-win32.whl", hash = "sha256:671538c2262dadb5ba6395e26c1731e1d52534bfe9ae56d0b5573ce539266aa8", size = 38066, upload-time = "2025-10-08T19:47:03.503Z" }, + { url = "https://files.pythonhosted.org/packages/54/09/d19cff2a5aaac632ec8fc03737b223597b1e347416934c1b3a7df079784c/propcache-0.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:cb2d222e72399fcf5890d1d5cc1060857b9b236adff2792ff48ca2dfd46c81db", size = 41655, upload-time = "2025-10-08T19:47:04.973Z" }, + { url = "https://files.pythonhosted.org/packages/68/ab/6b5c191bb5de08036a8c697b265d4ca76148efb10fa162f14af14fb5f076/propcache-0.4.1-cp312-cp312-win_arm64.whl", hash = "sha256:204483131fb222bdaaeeea9f9e6c6ed0cac32731f75dfc1d4a567fc1926477c1", size = 37789, upload-time = "2025-10-08T19:47:06.077Z" }, + { url = "https://files.pythonhosted.org/packages/5b/5a/bc7b4a4ef808fa59a816c17b20c4bef6884daebbdf627ff2a161da67da19/propcache-0.4.1-py3-none-any.whl", hash = "sha256:af2a6052aeb6cf17d3e46ee169099044fd8224cbaf75c76a2ef596e8163e2237", size = 13305, upload-time = "2025-10-08T19:49:00.792Z" }, ] [[package]] @@ -838,12 +2357,283 @@ wheels = [ ] [[package]] -name = "pycparser" -version = "2.22" +name = "psycopg2-binary" +version = "2.9.11" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736, upload-time = "2024-03-30T13:22:22.564Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ac/6c/8767aaa597ba424643dc87348c6f1754dd9f48e80fdc1b9f7ca5c3a7c213/psycopg2-binary-2.9.11.tar.gz", hash = "sha256:b6aed9e096bf63f9e75edf2581aa9a7e7186d97ab5c177aa6c87797cd591236c", size = 379620, upload-time = "2025-10-10T11:14:48.041Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552, upload-time = "2024-03-30T13:22:20.476Z" }, + { url = "https://files.pythonhosted.org/packages/d8/91/f870a02f51be4a65987b45a7de4c2e1897dd0d01051e2b559a38fa634e3e/psycopg2_binary-2.9.11-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:be9b840ac0525a283a96b556616f5b4820e0526addb8dcf6525a0fa162730be4", size = 3756603, upload-time = "2025-10-10T11:11:52.213Z" }, + { url = "https://files.pythonhosted.org/packages/27/fa/cae40e06849b6c9a95eb5c04d419942f00d9eaac8d81626107461e268821/psycopg2_binary-2.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f090b7ddd13ca842ebfe301cd587a76a4cf0913b1e429eb92c1be5dbeb1a19bc", size = 3864509, upload-time = "2025-10-10T11:11:56.452Z" }, + { url = "https://files.pythonhosted.org/packages/2d/75/364847b879eb630b3ac8293798e380e441a957c53657995053c5ec39a316/psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ab8905b5dcb05bf3fb22e0cf90e10f469563486ffb6a96569e51f897c750a76a", size = 4411159, upload-time = "2025-10-10T11:12:00.49Z" }, + { url = "https://files.pythonhosted.org/packages/6f/a0/567f7ea38b6e1c62aafd58375665a547c00c608a471620c0edc364733e13/psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:bf940cd7e7fec19181fdbc29d76911741153d51cab52e5c21165f3262125685e", size = 4468234, upload-time = "2025-10-10T11:12:04.892Z" }, + { url = "https://files.pythonhosted.org/packages/30/da/4e42788fb811bbbfd7b7f045570c062f49e350e1d1f3df056c3fb5763353/psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fa0f693d3c68ae925966f0b14b8edda71696608039f4ed61b1fe9ffa468d16db", size = 4166236, upload-time = "2025-10-10T11:12:11.674Z" }, + { url = "https://files.pythonhosted.org/packages/3c/94/c1777c355bc560992af848d98216148be5f1be001af06e06fc49cbded578/psycopg2_binary-2.9.11-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a1cf393f1cdaf6a9b57c0a719a1068ba1069f022a59b8b1fe44b006745b59757", size = 3983083, upload-time = "2025-10-30T02:55:15.73Z" }, + { url = "https://files.pythonhosted.org/packages/bd/42/c9a21edf0e3daa7825ed04a4a8588686c6c14904344344a039556d78aa58/psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ef7a6beb4beaa62f88592ccc65df20328029d721db309cb3250b0aae0fa146c3", size = 3652281, upload-time = "2025-10-10T11:12:17.713Z" }, + { url = "https://files.pythonhosted.org/packages/12/22/dedfbcfa97917982301496b6b5e5e6c5531d1f35dd2b488b08d1ebc52482/psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:31b32c457a6025e74d233957cc9736742ac5a6cb196c6b68499f6bb51390bd6a", size = 3298010, upload-time = "2025-10-10T11:12:22.671Z" }, + { url = "https://files.pythonhosted.org/packages/66/ea/d3390e6696276078bd01b2ece417deac954dfdd552d2edc3d03204416c0c/psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:edcb3aeb11cb4bf13a2af3c53a15b3d612edeb6409047ea0b5d6a21a9d744b34", size = 3044641, upload-time = "2025-10-30T02:55:19.929Z" }, + { url = "https://files.pythonhosted.org/packages/12/9a/0402ded6cbd321da0c0ba7d34dc12b29b14f5764c2fc10750daa38e825fc/psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:62b6d93d7c0b61a1dd6197d208ab613eb7dcfdcca0a49c42ceb082257991de9d", size = 3347940, upload-time = "2025-10-10T11:12:26.529Z" }, + { url = "https://files.pythonhosted.org/packages/b1/d2/99b55e85832ccde77b211738ff3925a5d73ad183c0b37bcbbe5a8ff04978/psycopg2_binary-2.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:b33fabeb1fde21180479b2d4667e994de7bbf0eec22832ba5d9b5e4cf65b6c6d", size = 2714147, upload-time = "2025-10-10T11:12:29.535Z" }, +] + +[[package]] +name = "py" +version = "1.11.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/98/ff/fec109ceb715d2a6b4c4a85a61af3b40c723a961e8828319fbcb15b868dc/py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719", size = 207796, upload-time = "2021-11-04T17:17:01.377Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/f0/10642828a8dfb741e5f3fbaac830550a518a775c7fff6f04a007259b0548/py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378", size = 98708, upload-time = "2021-11-04T17:17:00.152Z" }, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, +] + +[[package]] +name = "pyasyncore" +version = "1.0.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/25/6e/956e2bc9b47e3310cd524036f506b779a77788c2a1eb732e544240ad346f/pyasyncore-1.0.4.tar.gz", hash = "sha256:2c7a8b9b750ba6260f1e5a061456d61320a80579c6a43d42183417da89c7d5d6", size = 15339, upload-time = "2024-02-28T08:49:47.234Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/46/aaa0999302d7a584a033ec23b6ca21a452cf9c7f6d8dce8d174ac407eb3f/pyasyncore-1.0.4-py3-none-any.whl", hash = "sha256:9e5f6dc9dc057c56370b7a5cdb4c4670fd4b0556de2913ed1f428cd6a5366895", size = 10032, upload-time = "2024-02-28T08:49:45.696Z" }, +] + +[[package]] +name = "pybase64" +version = "1.4.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/b8/4ed5c7ad5ec15b08d35cc79ace6145d5c1ae426e46435f4987379439dfea/pybase64-1.4.3.tar.gz", hash = "sha256:c2ed274c9e0ba9c8f9c4083cfe265e66dd679126cd9c2027965d807352f3f053", size = 137272, upload-time = "2025-12-06T13:27:04.013Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/a7/efcaa564f091a2af7f18a83c1c4875b1437db56ba39540451dc85d56f653/pybase64-1.4.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:18d85e5ab8b986bb32d8446aca6258ed80d1bafe3603c437690b352c648f5967", size = 38167, upload-time = "2025-12-06T13:23:16.821Z" }, + { url = "https://files.pythonhosted.org/packages/db/c7/c7ad35adff2d272bf2930132db2b3eea8c44bb1b1f64eb9b2b8e57cde7b4/pybase64-1.4.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3f5791a3491d116d0deaf4d83268f48792998519698f8751efb191eac84320e9", size = 31673, upload-time = "2025-12-06T13:23:17.835Z" }, + { url = "https://files.pythonhosted.org/packages/43/1b/9a8cab0042b464e9a876d5c65fe5127445a2436da36fda64899b119b1a1b/pybase64-1.4.3-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:f0b3f200c3e06316f6bebabd458b4e4bcd4c2ca26af7c0c766614d91968dee27", size = 68210, upload-time = "2025-12-06T13:23:18.813Z" }, + { url = "https://files.pythonhosted.org/packages/62/f7/965b79ff391ad208b50e412b5d3205ccce372a2d27b7218ae86d5295b105/pybase64-1.4.3-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bb632edfd132b3eaf90c39c89aa314beec4e946e210099b57d40311f704e11d4", size = 71599, upload-time = "2025-12-06T13:23:20.195Z" }, + { url = "https://files.pythonhosted.org/packages/03/4b/a3b5175130b3810bbb8ccfa1edaadbd3afddb9992d877c8a1e2f274b476e/pybase64-1.4.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:356ef1d74648ce997f5a777cf8f1aefecc1c0b4fe6201e0ef3ec8a08170e1b54", size = 59922, upload-time = "2025-12-06T13:23:21.487Z" }, + { url = "https://files.pythonhosted.org/packages/da/5d/c38d1572027fc601b62d7a407721688b04b4d065d60ca489912d6893e6cf/pybase64-1.4.3-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.whl", hash = "sha256:c48361f90db32bacaa5518419d4eb9066ba558013aaf0c7781620279ecddaeb9", size = 56712, upload-time = "2025-12-06T13:23:22.77Z" }, + { url = "https://files.pythonhosted.org/packages/e7/d4/4e04472fef485caa8f561d904d4d69210a8f8fc1608ea15ebd9012b92655/pybase64-1.4.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:702bcaa16ae02139d881aeaef5b1c8ffb4a3fae062fe601d1e3835e10310a517", size = 59300, upload-time = "2025-12-06T13:23:24.543Z" }, + { url = "https://files.pythonhosted.org/packages/86/e7/16e29721b86734b881d09b7e23dfd7c8408ad01a4f4c7525f3b1088e25ec/pybase64-1.4.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:53d0ffe1847b16b647c6413d34d1de08942b7724273dd57e67dcbdb10c574045", size = 60278, upload-time = "2025-12-06T13:23:25.608Z" }, + { url = "https://files.pythonhosted.org/packages/b1/02/18515f211d7c046be32070709a8efeeef8a0203de4fd7521e6b56404731b/pybase64-1.4.3-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:9a1792e8b830a92736dae58f0c386062eb038dfe8004fb03ba33b6083d89cd43", size = 54817, upload-time = "2025-12-06T13:23:26.633Z" }, + { url = "https://files.pythonhosted.org/packages/e7/be/14e29d8e1a481dbff151324c96dd7b5d2688194bb65dc8a00ca0e1ad1e86/pybase64-1.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1d468b1b1ac5ad84875a46eaa458663c3721e8be5f155ade356406848d3701f6", size = 58611, upload-time = "2025-12-06T13:23:27.684Z" }, + { url = "https://files.pythonhosted.org/packages/b4/8a/a2588dfe24e1bbd742a554553778ab0d65fdf3d1c9a06d10b77047d142aa/pybase64-1.4.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:e97b7bdbd62e71898cd542a6a9e320d9da754ff3ebd02cb802d69087ee94d468", size = 52404, upload-time = "2025-12-06T13:23:28.714Z" }, + { url = "https://files.pythonhosted.org/packages/27/fc/afcda7445bebe0cbc38cafdd7813234cdd4fc5573ff067f1abf317bb0cec/pybase64-1.4.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b33aeaa780caaa08ffda87fc584d5eab61e3d3bbb5d86ead02161dc0c20d04bc", size = 68817, upload-time = "2025-12-06T13:23:30.079Z" }, + { url = "https://files.pythonhosted.org/packages/d3/3a/87c3201e555ed71f73e961a787241a2438c2bbb2ca8809c29ddf938a3157/pybase64-1.4.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:1c0efcf78f11cf866bed49caa7b97552bc4855a892f9cc2372abcd3ed0056f0d", size = 57854, upload-time = "2025-12-06T13:23:31.17Z" }, + { url = "https://files.pythonhosted.org/packages/fd/7d/931c2539b31a7b375e7d595b88401eeb5bd6c5ce1059c9123f9b608aaa14/pybase64-1.4.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:66e3791f2ed725a46593f8bd2761ff37d01e2cdad065b1dceb89066f476e50c6", size = 54333, upload-time = "2025-12-06T13:23:32.422Z" }, + { url = "https://files.pythonhosted.org/packages/de/5e/537601e02cc01f27e9d75f440f1a6095b8df44fc28b1eef2cd739aea8cec/pybase64-1.4.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:72bb0b6bddadab26e1b069bb78e83092711a111a80a0d6b9edcb08199ad7299b", size = 56492, upload-time = "2025-12-06T13:23:33.515Z" }, + { url = "https://files.pythonhosted.org/packages/96/97/2a2e57acf8f5c9258d22aba52e71f8050e167b29ed2ee1113677c1b600c1/pybase64-1.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5b3365dbcbcdb0a294f0f50af0c0a16b27a232eddeeb0bceeefd844ef30d2a23", size = 70974, upload-time = "2025-12-06T13:23:36.27Z" }, + { url = "https://files.pythonhosted.org/packages/75/2e/a9e28941c6dab6f06e6d3f6783d3373044be9b0f9a9d3492c3d8d2260ac0/pybase64-1.4.3-cp312-cp312-win32.whl", hash = "sha256:7bca1ed3a5df53305c629ca94276966272eda33c0d71f862d2d3d043f1e1b91a", size = 33686, upload-time = "2025-12-06T13:23:37.848Z" }, + { url = "https://files.pythonhosted.org/packages/83/e3/507ab649d8c3512c258819c51d25c45d6e29d9ca33992593059e7b646a33/pybase64-1.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:9f2da8f56d9b891b18b4daf463a0640eae45a80af548ce435be86aa6eff3603b", size = 35833, upload-time = "2025-12-06T13:23:38.877Z" }, + { url = "https://files.pythonhosted.org/packages/bc/8a/6eba66cd549a2fc74bb4425fd61b839ba0ab3022d3c401b8a8dc2cc00c7a/pybase64-1.4.3-cp312-cp312-win_arm64.whl", hash = "sha256:0631d8a2d035de03aa9bded029b9513e1fee8ed80b7ddef6b8e9389ffc445da0", size = 31185, upload-time = "2025-12-06T13:23:39.908Z" }, + { url = "https://files.pythonhosted.org/packages/17/45/92322aec1b6979e789b5710f73c59f2172bc37c8ce835305434796824b7b/pybase64-1.4.3-graalpy312-graalpy250_312_native-macosx_10_13_x86_64.whl", hash = "sha256:2baaa092f3475f3a9c87ac5198023918ea8b6c125f4c930752ab2cbe3cd1d520", size = 38746, upload-time = "2025-12-06T13:26:25.869Z" }, + { url = "https://files.pythonhosted.org/packages/11/94/f1a07402870388fdfc2ecec0c718111189732f7d0f2d7fe1386e19e8fad0/pybase64-1.4.3-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:cde13c0764b1af07a631729f26df019070dad759981d6975527b7e8ecb465b6c", size = 32573, upload-time = "2025-12-06T13:26:27.792Z" }, + { url = "https://files.pythonhosted.org/packages/fa/8f/43c3bb11ca9bacf81cb0b7a71500bb65b2eda6d5fe07433c09b543de97f3/pybase64-1.4.3-graalpy312-graalpy250_312_native-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:5c29a582b0ea3936d02bd6fe9bf674ab6059e6e45ab71c78404ab2c913224414", size = 43461, upload-time = "2025-12-06T13:26:28.906Z" }, + { url = "https://files.pythonhosted.org/packages/2d/4c/2a5258329200be57497d3972b5308558c6de42e3749c6cc2aa1cbe34b25a/pybase64-1.4.3-graalpy312-graalpy250_312_native-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b6b664758c804fa919b4f1257aa8cf68e95db76fc331de5f70bfc3a34655afe1", size = 36058, upload-time = "2025-12-06T13:26:30.092Z" }, + { url = "https://files.pythonhosted.org/packages/ea/6d/41faa414cde66ec023b0ca8402a8f11cb61731c3dc27c082909cbbd1f929/pybase64-1.4.3-graalpy312-graalpy250_312_native-win_amd64.whl", hash = "sha256:f7537fa22ae56a0bf51e4b0ffc075926ad91c618e1416330939f7ef366b58e3b", size = 36231, upload-time = "2025-12-06T13:26:31.656Z" }, +] + +[[package]] +name = "pycparser" +version = "2.23" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, +] + +[[package]] +name = "pycryptodome" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/a6/8452177684d5e906854776276ddd34eca30d1b1e15aa1ee9cefc289a33f5/pycryptodome-3.23.0.tar.gz", hash = "sha256:447700a657182d60338bab09fdb27518f8856aecd80ae4c6bdddb67ff5da44ef", size = 4921276, upload-time = "2025-05-17T17:21:45.242Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/6c/a1f71542c969912bb0e106f64f60a56cc1f0fabecf9396f45accbe63fa68/pycryptodome-3.23.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:187058ab80b3281b1de11c2e6842a357a1f71b42cb1e15bce373f3d238135c27", size = 2495627, upload-time = "2025-05-17T17:20:47.139Z" }, + { url = "https://files.pythonhosted.org/packages/6e/4e/a066527e079fc5002390c8acdd3aca431e6ea0a50ffd7201551175b47323/pycryptodome-3.23.0-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:cfb5cd445280c5b0a4e6187a7ce8de5a07b5f3f897f235caa11f1f435f182843", size = 1640362, upload-time = "2025-05-17T17:20:50.392Z" }, + { url = "https://files.pythonhosted.org/packages/50/52/adaf4c8c100a8c49d2bd058e5b551f73dfd8cb89eb4911e25a0c469b6b4e/pycryptodome-3.23.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67bd81fcbe34f43ad9422ee8fd4843c8e7198dd88dd3d40e6de42ee65fbe1490", size = 2182625, upload-time = "2025-05-17T17:20:52.866Z" }, + { url = "https://files.pythonhosted.org/packages/5f/e9/a09476d436d0ff1402ac3867d933c61805ec2326c6ea557aeeac3825604e/pycryptodome-3.23.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8987bd3307a39bc03df5c8e0e3d8be0c4c3518b7f044b0f4c15d1aa78f52575", size = 2268954, upload-time = "2025-05-17T17:20:55.027Z" }, + { url = "https://files.pythonhosted.org/packages/f9/c5/ffe6474e0c551d54cab931918127c46d70cab8f114e0c2b5a3c071c2f484/pycryptodome-3.23.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa0698f65e5b570426fc31b8162ed4603b0c2841cbb9088e2b01641e3065915b", size = 2308534, upload-time = "2025-05-17T17:20:57.279Z" }, + { url = "https://files.pythonhosted.org/packages/18/28/e199677fc15ecf43010f2463fde4c1a53015d1fe95fb03bca2890836603a/pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:53ecbafc2b55353edcebd64bf5da94a2a2cdf5090a6915bcca6eca6cc452585a", size = 2181853, upload-time = "2025-05-17T17:20:59.322Z" }, + { url = "https://files.pythonhosted.org/packages/ce/ea/4fdb09f2165ce1365c9eaefef36625583371ee514db58dc9b65d3a255c4c/pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:156df9667ad9f2ad26255926524e1c136d6664b741547deb0a86a9acf5ea631f", size = 2342465, upload-time = "2025-05-17T17:21:03.83Z" }, + { url = "https://files.pythonhosted.org/packages/22/82/6edc3fc42fe9284aead511394bac167693fb2b0e0395b28b8bedaa07ef04/pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:dea827b4d55ee390dc89b2afe5927d4308a8b538ae91d9c6f7a5090f397af1aa", size = 2267414, upload-time = "2025-05-17T17:21:06.72Z" }, + { url = "https://files.pythonhosted.org/packages/59/fe/aae679b64363eb78326c7fdc9d06ec3de18bac68be4b612fc1fe8902693c/pycryptodome-3.23.0-cp37-abi3-win32.whl", hash = "sha256:507dbead45474b62b2bbe318eb1c4c8ee641077532067fec9c1aa82c31f84886", size = 1768484, upload-time = "2025-05-17T17:21:08.535Z" }, + { url = "https://files.pythonhosted.org/packages/54/2f/e97a1b8294db0daaa87012c24a7bb714147c7ade7656973fd6c736b484ff/pycryptodome-3.23.0-cp37-abi3-win_amd64.whl", hash = "sha256:c75b52aacc6c0c260f204cbdd834f76edc9fb0d8e0da9fbf8352ef58202564e2", size = 1799636, upload-time = "2025-05-17T17:21:10.393Z" }, + { url = "https://files.pythonhosted.org/packages/18/3d/f9441a0d798bf2b1e645adc3265e55706aead1255ccdad3856dbdcffec14/pycryptodome-3.23.0-cp37-abi3-win_arm64.whl", hash = "sha256:11eeeb6917903876f134b56ba11abe95c0b0fd5e3330def218083c7d98bbcb3c", size = 1703675, upload-time = "2025-05-17T17:21:13.146Z" }, +] + +[[package]] +name = "pydantic" +version = "2.12.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" }, +] + +[package.optional-dependencies] +email = [ + { name = "email-validator" }, +] + +[[package]] +name = "pydantic-core" +version = "2.41.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" }, + { url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" }, + { url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" }, + { url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" }, + { url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" }, + { url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" }, + { url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" }, + { url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" }, + { url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" }, + { url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" }, + { url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" }, + { url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" }, + { url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" }, + { url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" }, + { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" }, + { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" }, + { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" }, +] + +[[package]] +name = "pydantic-extra-types" +version = "2.10.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3a/10/fb64987804cde41bcc39d9cd757cd5f2bb5d97b389d81aa70238b14b8a7e/pydantic_extra_types-2.10.6.tar.gz", hash = "sha256:c63d70bf684366e6bbe1f4ee3957952ebe6973d41e7802aea0b770d06b116aeb", size = 141858, upload-time = "2025-10-08T13:47:49.483Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/93/04/5c918669096da8d1c9ec7bb716bd72e755526103a61bc5e76a3e4fb23b53/pydantic_extra_types-2.10.6-py3-none-any.whl", hash = "sha256:6106c448316d30abf721b5b9fecc65e983ef2614399a24142d689c7546cc246a", size = 40949, upload-time = "2025-10-08T13:47:48.268Z" }, +] + +[[package]] +name = "pydantic-settings" +version = "2.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/43/4b/ac7e0aae12027748076d72a8764ff1c9d82ca75a7a52622e67ed3f765c54/pydantic_settings-2.12.0.tar.gz", hash = "sha256:005538ef951e3c2a68e1c08b292b5f2e71490def8589d4221b95dab00dafcfd0", size = 194184, upload-time = "2025-11-10T14:25:47.013Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/60/5d4751ba3f4a40a6891f24eec885f51afd78d208498268c734e256fb13c4/pydantic_settings-2.12.0-py3-none-any.whl", hash = "sha256:fddb9fd99a5b18da837b29710391e945b1e30c135477f484084ee513adb93809", size = 51880, upload-time = "2025-11-10T14:25:45.546Z" }, +] + +[[package]] +name = "pydispatcher" +version = "2.0.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/db/030d0700ae90d2f9d52c2f3c1f864881e19cef8cba3b0a08759c8494c19c/PyDispatcher-2.0.7.tar.gz", hash = "sha256:b777c6ad080dc1bad74a4c29d6a46914fa6701ac70f94b0d66fbcfde62f5be31", size = 38891, upload-time = "2023-02-17T20:11:13.106Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/66/0e/9ee7bc0b48ec45d93b302fa2d787830dca4dc454d31a237faa5815995988/PyDispatcher-2.0.7-py3-none-any.whl", hash = "sha256:96543bea04115ffde08f851e1d45cacbfd1ee866ac42127d9b476dc5aefa7de0", size = 12040, upload-time = "2023-02-17T20:11:11.991Z" }, +] + +[[package]] +name = "pyfiglet" +version = "1.0.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c8/e3/0a86276ad2c383ce08d76110a8eec2fe22e7051c4b8ba3fa163a0b08c428/pyfiglet-1.0.4.tar.gz", hash = "sha256:db9c9940ed1bf3048deff534ed52ff2dafbbc2cd7610b17bb5eca1df6d4278ef", size = 1560615, upload-time = "2025-08-15T18:32:47.302Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9f/5c/fe9f95abd5eaedfa69f31e450f7e2768bef121dbdf25bcddee2cd3087a16/pyfiglet-1.0.4-py3-none-any.whl", hash = "sha256:65b57b7a8e1dff8a67dc8e940a117238661d5e14c3e49121032bd404d9b2b39f", size = 1806118, upload-time = "2025-08-15T18:32:45.556Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pymilvus" +version = "2.6.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "grpcio" }, + { name = "orjson" }, + { name = "pandas" }, + { name = "protobuf" }, + { name = "python-dotenv" }, + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/02/04/9ace30346a8fe2d8e9a047678bb563bc63e1e181d6a583a8a205806a211b/pymilvus-2.6.5.tar.gz", hash = "sha256:08f790acbbb4888f76394daa807c0227efdd744b6d39f3130f39afe77ba17ac6", size = 1365608, upload-time = "2025-12-05T08:59:49.777Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/58/45/c5416f2d64dd8add626a90166d8389a97ebc39c107ea01c15ea57bf3a07f/pymilvus-2.6.5-py3-none-any.whl", hash = "sha256:9e1caddd96361cd41f4e0685b6bd3d99bbaea94c8284b1fef5575bcfd47d7a2f", size = 280832, upload-time = "2025-12-05T08:59:48.016Z" }, +] + +[[package]] +name = "pymysql" +version = "1.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/ae/1fe3fcd9f959efa0ebe200b8de88b5a5ce3e767e38c7ac32fb179f16a388/pymysql-1.1.2.tar.gz", hash = "sha256:4961d3e165614ae65014e361811a724e2044ad3ea3739de9903ae7c21f539f03", size = 48258, upload-time = "2025-08-24T12:55:55.146Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/4c/ad33b92b9864cbde84f259d5df035a6447f91891f5be77788e2a3892bce3/pymysql-1.1.2-py3-none-any.whl", hash = "sha256:e6b1d89711dd51f8f74b1631fe08f039e7d76cf67a42a323d3178f0f25762ed9", size = 45300, upload-time = "2025-08-24T12:55:53.394Z" }, +] + +[[package]] +name = "pyparsing" +version = "3.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/33/c1/1d9de9aeaa1b89b0186e5fe23294ff6517fce1bc69149185577cd31016b2/pyparsing-3.3.1.tar.gz", hash = "sha256:47fad0f17ac1e2cad3de3b458570fbc9b03560aa029ed5e16ee5554da9a2251c", size = 1550512, upload-time = "2025-12-23T03:14:04.391Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8b/40/2614036cdd416452f5bf98ec037f38a1afb17f327cb8e6b652d4729e0af8/pyparsing-3.3.1-py3-none-any.whl", hash = "sha256:023b5e7e5520ad96642e2c6db4cb683d3970bd640cdf7115049a6e9c3682df82", size = 121793, upload-time = "2025-12-23T03:14:02.103Z" }, +] + +[[package]] +name = "pypika" +version = "0.48.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c7/2c/94ed7b91db81d61d7096ac8f2d325ec562fc75e35f3baea8749c85b28784/PyPika-0.48.9.tar.gz", hash = "sha256:838836a61747e7c8380cd1b7ff638694b7a7335345d0f559b04b2cd832ad5378", size = 67259, upload-time = "2022-03-15T11:22:57.066Z" } + +[[package]] +name = "pyproject-hooks" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/82/28175b2414effca1cdac8dc99f76d660e7a4fb0ceefa4b4ab8f5f6742925/pyproject_hooks-1.2.0.tar.gz", hash = "sha256:1e859bd5c40fae9448642dd871adf459e5e2084186e8d2c2a79a824c970da1f8", size = 19228, upload-time = "2024-09-29T09:24:13.293Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/24/12818598c362d7f300f18e74db45963dbcb85150324092410c8b49405e42/pyproject_hooks-1.2.0-py3-none-any.whl", hash = "sha256:9e5c6bfa8dcc30091c74b0cf803c81fdd29d94f01992a7707bc97babb1141913", size = 10216, upload-time = "2024-09-29T09:24:11.978Z" }, +] + +[[package]] +name = "pyreadline3" +version = "3.5.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/49/4cea918a08f02817aabae639e3d0ac046fef9f9180518a3ad394e22da148/pyreadline3-3.5.4.tar.gz", hash = "sha256:8d57d53039a1c75adba8e50dd3d992b28143480816187ea5efbd5c78e6c885b7", size = 99839, upload-time = "2024-09-19T02:40:10.062Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl", hash = "sha256:eaf8e6cc3c49bcccf145fc6067ba8643d1df34d604a1ec0eccbf7a18e6d3fae6", size = 83178, upload-time = "2024-09-19T02:40:08.598Z" }, ] [[package]] @@ -860,70 +2650,116 @@ wheels = [ [[package]] name = "python-dotenv" -version = "1.1.0" +version = "1.2.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/88/2c/7bb1416c5620485aa793f2de31d3df393d3686aa8a8506d11e10e13c5baf/python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5", size = 39920, upload-time = "2025-03-25T10:14:56.835Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f0/26/19cadc79a718c5edbec86fd4919a6b6d3f681039a2f6d66d14be94e75fb9/python_dotenv-1.2.1.tar.gz", hash = "sha256:42667e897e16ab0d66954af0e60a9caa94f0fd4ecf3aaf6d2d260eec1aa36ad6", size = 44221, upload-time = "2025-10-26T15:12:10.434Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/1e/18/98a99ad95133c6a6e2005fe89faedf294a748bd5dc803008059409ac9b1e/python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d", size = 20256, upload-time = "2025-03-25T10:14:55.034Z" }, + { url = "https://files.pythonhosted.org/packages/14/1b/a298b06749107c305e1fe0f814c6c74aea7b2f1e10989cb30f544a1b3253/python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61", size = 21230, upload-time = "2025-10-26T15:12:09.109Z" }, ] [[package]] name = "python-multipart" -version = "0.0.20" +version = "0.0.21" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f3/87/f44d7c9f274c7ee665a29b885ec97089ec5dc034c7f3fafa03da9e39a09e/python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13", size = 37158, upload-time = "2024-12-16T19:45:46.972Z" } +sdist = { url = "https://files.pythonhosted.org/packages/78/96/804520d0850c7db98e5ccb70282e29208723f0964e88ffd9d0da2f52ea09/python_multipart-0.0.21.tar.gz", hash = "sha256:7137ebd4d3bbf70ea1622998f902b97a29434a9e8dc40eb203bbcf7c2a2cba92", size = 37196, upload-time = "2025-12-17T09:24:22.446Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" }, + { url = "https://files.pythonhosted.org/packages/aa/76/03af049af4dcee5d27442f71b6924f01f3efb5d2bd34f23fcd563f2cc5f5/python_multipart-0.0.21-py3-none-any.whl", hash = "sha256:cf7a6713e01c87aa35387f4774e812c4361150938d20d232800f75ffcf266090", size = 24541, upload-time = "2025-12-17T09:24:21.153Z" }, ] [[package]] name = "python-rapidjson" -version = "1.20" +version = "1.23" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/47/2a/2510836a65a1fc40c923393611896c3c8ad1e2f583ed0c32cf0bb48cc378/python_rapidjson-1.20.tar.gz", hash = "sha256:115f08c86d2df7543c02605e77c84727cdabc4b08310d2f097e953efeaaa73eb", size = 238158, upload-time = "2024-08-05T17:00:29.91Z" } +sdist = { url = "https://files.pythonhosted.org/packages/26/3a/c32aee1dc385e50c1d6e78e56abdbc6aca283127f06f6ec0be1a86b2e3c1/python_rapidjson-1.23.tar.gz", hash = "sha256:0f845daeb26be147f5720a8c410308235092bb4fbb81ea408aa77203e26296fb", size = 239605, upload-time = "2025-12-07T06:14:27.51Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b1/d1/40616f40499f8f61e83135aa078a0ba7d392e7ea63c016c7cc544ecb7344/python_rapidjson-1.20-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6056fcc8caeb9b04775bf655568bba362c7670ab792c1b438671bb056db954cd", size = 230104, upload-time = "2024-08-05T17:55:27.252Z" }, - { url = "https://files.pythonhosted.org/packages/ea/2f/d28f4da4df83cfeb60fb7b84396a9c3678a0ac615012dc234d5b962fbaaf/python_rapidjson-1.20-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:225bd4cbabfe7910261cbcebb8b811d4ff98e90cdd17c233b916c6aa71a9553f", size = 211105, upload-time = "2024-08-05T17:55:28.869Z" }, - { url = "https://files.pythonhosted.org/packages/b3/60/ebc521afbdb626bb571a815378831f685213cb6b98ffe08176fe3191c5a3/python_rapidjson-1.20-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:026077b663acf93a3f2b1adb87282e611a30214b8ae8001b7e4863a3b978e646", size = 1650309, upload-time = "2024-08-05T17:55:30.917Z" }, - { url = "https://files.pythonhosted.org/packages/19/da/4c375b90c54091e93a600fca06a9f3b8456b0e09050e862e998fc22b6385/python_rapidjson-1.20-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:884e1dd4c0770ed424737941af4d5dc9014995f9c33595f151af13f83ce282c3", size = 1700043, upload-time = "2024-08-05T17:55:33.244Z" }, - { url = "https://files.pythonhosted.org/packages/bc/6e/2718413e7bc300523c5d4eaa25418059d8b17effa9aef2f2ae370493b861/python_rapidjson-1.20-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f55531c8197cb7a21a5ef0ffa46f2b8fc8c5fe7c6fd08bdbd2063ae65d2ff65", size = 1700523, upload-time = "2024-08-05T17:55:35.751Z" }, - { url = "https://files.pythonhosted.org/packages/32/fe/d96e996f9c5140d3ce93d440f871a1b336f1c14fae27b64d4872fc58d45d/python_rapidjson-1.20-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c60121d155562dc694c05ed7df4e39e42ee1d3adff2a060c64a004498e6451f7", size = 1598383, upload-time = "2024-08-05T17:55:37.243Z" }, - { url = "https://files.pythonhosted.org/packages/46/32/ef3a381641b803e1b67c9b9c360d161b650620605768652e704fb35ad2b9/python_rapidjson-1.20-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3a6620eed0b04196f37fab7048c1d672d03391bb29d7f09ee8fee8dea33f11f4", size = 2454134, upload-time = "2024-08-05T17:55:39.04Z" }, - { url = "https://files.pythonhosted.org/packages/2f/50/771826d3f217b7c597f14df0dfa943d9e6f2f14749d974de4402f56ce39a/python_rapidjson-1.20-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ddb63eff401ce7cf20cdd5e21942fc23fbe0e1dc1d96d7ae838645fb1f74fb47", size = 2585576, upload-time = "2024-08-05T17:55:40.689Z" }, - { url = "https://files.pythonhosted.org/packages/64/95/f3e7ed53c9ab27a99c876c42b7d1994312e6fd2c2d8131ce849bd4275be8/python_rapidjson-1.20-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:05e28c3dbb4a0d74ec13af9668ef2b9f302edf83cf7ce1d8316a95364720eec0", size = 2599382, upload-time = "2024-08-05T17:55:43.111Z" }, - { url = "https://files.pythonhosted.org/packages/bc/4c/34778932d0145fdc7087274cd4c0fa421a96acbc96bf9860cbdf3e389dcd/python_rapidjson-1.20-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b733978ecd84fc5df9a778ce821dc1f3113f7bfc2493cac0bb17efb4ae0bb8fa", size = 2537066, upload-time = "2024-08-05T17:55:45.738Z" }, - { url = "https://files.pythonhosted.org/packages/50/16/dfef47ec507d5a5d00281b8db8526d5c36b715afeeae0ceeef4030f1640f/python_rapidjson-1.20-cp312-cp312-win32.whl", hash = "sha256:d87041448cec00e2db5d858625a76dc1b59eef6691a039acff6d92ad8581cfc1", size = 128358, upload-time = "2024-08-05T17:55:48.108Z" }, - { url = "https://files.pythonhosted.org/packages/bc/97/42a550a79ab90ab37fcd8b519cd71bba4b96b85679218100d63b437770c0/python_rapidjson-1.20-cp312-cp312-win_amd64.whl", hash = "sha256:5d3be149ce5475f9605f01240487541057792abad94d3fd0cd56af363cf5a4dc", size = 149067, upload-time = "2024-08-05T17:55:49.834Z" }, - { url = "https://files.pythonhosted.org/packages/18/04/47d9d10c3fa6e57af9462792088187605a07d88ad6f6f2e193fb01eff0fc/python_rapidjson-1.20-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:daee815b4c20ca6e4dbc6bde373dd3f65b53813d775f1c94b765b33b402513a7", size = 229315, upload-time = "2024-08-05T17:55:51.263Z" }, - { url = "https://files.pythonhosted.org/packages/9a/3a/0c4e0af51d7356d9efdef1bf1785d9d9f9e0789a7d2844cc3e9b35ef383f/python_rapidjson-1.20-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:083df379c769b30f9bc40041c91fd9d8f7bb8ca2b3c7170258842aced2098e05", size = 211111, upload-time = "2024-08-05T17:55:52.707Z" }, - { url = "https://files.pythonhosted.org/packages/83/e1/e253de9a774d021f9a6947f845628fae8237f441c63198e8a72e5906d31f/python_rapidjson-1.20-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9399ad75a2e3377f9e6208caabe73eb9354cd01b732407475ccadcd42c577df", size = 1650131, upload-time = "2024-08-05T17:55:54.302Z" }, - { url = "https://files.pythonhosted.org/packages/3e/93/8f723c7f7be055086d6bec2ba9e5ef13e749c3fb3ad5a3dc1d740acee889/python_rapidjson-1.20-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:599ab208ccf6172d6cfac1abe048c837e62612f91f97d198e32773c45346a0b4", size = 1699873, upload-time = "2024-08-05T17:55:55.967Z" }, - { url = "https://files.pythonhosted.org/packages/7d/2e/eb7255601b81a5b70f2bff05caab136e191b66825c16db3e7db1bdaa8314/python_rapidjson-1.20-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf3c0e2a5b97b0d07311f15f0dce4434e43dec865c3794ad1b10d968460fd665", size = 1700484, upload-time = "2024-08-05T17:55:57.846Z" }, - { url = "https://files.pythonhosted.org/packages/90/54/23d8b595dd4fdbdaa6c5f723a4df7a7be78aa702aa0b6dac6c964e6e6d30/python_rapidjson-1.20-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8064b8edb57ddd9e3ffa539cf2ec2f03515751fb0698b40ba5cb66a2123af19", size = 1598344, upload-time = "2024-08-05T17:55:59.586Z" }, - { url = "https://files.pythonhosted.org/packages/3d/3a/3628e199a826e7bc598633ce895516981602ab1d8fce76359005f90ca488/python_rapidjson-1.20-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bc79d7f00f7538e027960ca6bcd1e03ed99fcf660d4d882d1c22f641155d0db0", size = 2454206, upload-time = "2024-08-05T17:56:01.556Z" }, - { url = "https://files.pythonhosted.org/packages/ed/19/eef8629f73b1af21fa778d140e68e72076fe5746357426d6716a0c411dd2/python_rapidjson-1.20-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:87aa0b01b8c20984844f1440b8ff6bdb32de911a1750fed344b9daed33b4b52b", size = 2585553, upload-time = "2024-08-05T17:56:03.637Z" }, - { url = "https://files.pythonhosted.org/packages/d8/9d/217e56c74a65cfaf4441b26b6206b924b41fb339f98776a74e60dd287b46/python_rapidjson-1.20-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4099cb9eae8a0ce19c09e02729eb6d69d5180424f13a2641a6c407d053e47a82", size = 2599513, upload-time = "2024-08-05T17:56:05.795Z" }, - { url = "https://files.pythonhosted.org/packages/54/f6/4d40189f14e4fa5526a91aad9944864c8a4eebc0257e0314a331f3c64170/python_rapidjson-1.20-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4c680cd2b4de760ff6875de71fe6a87bd610aa116593d62e4f81a563be86ae18", size = 2537192, upload-time = "2024-08-05T17:56:07.53Z" }, - { url = "https://files.pythonhosted.org/packages/ee/30/f3f40abfd8d7f0586b88ccfcd747f2e227fe589c16fbb485b1e238d8e641/python_rapidjson-1.20-cp313-cp313-win32.whl", hash = "sha256:9e431a7afc77aa874fed537c9f6bf5fcecaef124ebeae2a2379d3b9e9adce74b", size = 128362, upload-time = "2024-08-05T17:56:09.332Z" }, - { url = "https://files.pythonhosted.org/packages/94/df/7126352e55cb72a5ca99630bd44ffb11bbf61ee35f4e1f34d203a77597c5/python_rapidjson-1.20-cp313-cp313-win_amd64.whl", hash = "sha256:7444bc7e6a04c03d6ed748b5dab0798fa2b3f2b303be8c38d3af405b2cac6d63", size = 149072, upload-time = "2024-08-05T17:56:10.625Z" }, + { url = "https://files.pythonhosted.org/packages/08/e0/a78486cfb25a8c65d5e2a947aaa000bfd211b4705dc4e0657a42c6385cc5/python_rapidjson-1.23-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:56e557fb6a7d7babfeb8ebaa4d096d4ce127477ecf46fe7de7f1edf2e1d8e4d6", size = 216508, upload-time = "2025-12-07T07:19:12.614Z" }, + { url = "https://files.pythonhosted.org/packages/6d/f2/b8d9a47cf55e25d76865d7f1691b2b94b38061c5f3fa4b385848a362366e/python_rapidjson-1.23-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d8e107121f5c1e98cb4f0e5fde443e0f66b45eadc3269bc2416e31261535f444", size = 213921, upload-time = "2025-12-07T07:19:13.908Z" }, + { url = "https://files.pythonhosted.org/packages/8a/ae/700b6f039fa799c3690193424185b1a2f1a49b035dd8cf81b73406dfbfca/python_rapidjson-1.23-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fc45ef1f725b3a9a27cdedcf9997f1f8c5a523ac03882d3925c6f764b33e5e1b", size = 1722258, upload-time = "2025-12-07T07:19:15.249Z" }, + { url = "https://files.pythonhosted.org/packages/95/89/b4d2308a065d9a5ff3afc5c93c21358b5d82f944bbed4e54847231e24f81/python_rapidjson-1.23-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f87de7b994d65da2327fffdc5d3d7166782e3ca99c76c0560c8a7f1e109a5b54", size = 1780680, upload-time = "2025-12-07T07:19:16.71Z" }, + { url = "https://files.pythonhosted.org/packages/61/89/7b0047dfaa014cc456b29cf66913143bd0541225defaacf1727eee13291e/python_rapidjson-1.23-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6067810f0fd57713ec733b0b6ae265ef169e13b2ce04a4938b1807cddd8b4db4", size = 1760351, upload-time = "2025-12-07T07:19:17.946Z" }, + { url = "https://files.pythonhosted.org/packages/70/60/a2dfb056a3ad6ca07c049c9376cfa509648765e805d9588c0f48bb998c33/python_rapidjson-1.23-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:83306643cf31c0833b226d4317e8738b1b5ed4371e310f3c552be994c01a3df0", size = 2570107, upload-time = "2025-12-07T07:19:19.17Z" }, + { url = "https://files.pythonhosted.org/packages/b8/a6/e8873f34a07a524f4cb87a8934c783207674d5587533a50d0f2c55064d7b/python_rapidjson-1.23-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:13797fdcd43e558b81d3344c637bf878878fd6dede84409769d6910f8f6a9024", size = 2696763, upload-time = "2025-12-07T07:19:21.01Z" }, + { url = "https://files.pythonhosted.org/packages/23/cb/ad2a16d6b20a457e8acd745dca416f19cf0de738311d213c544112260cc8/python_rapidjson-1.23-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad674edb9dfe8181fb704a14149e5eb30ae179a92021484ebe8935b8d0f88495", size = 2675144, upload-time = "2025-12-07T07:19:22.609Z" }, + { url = "https://files.pythonhosted.org/packages/65/27/943fef83837f002d990274b82d5193d066aeef128c2ba6c009d549d0e5ad/python_rapidjson-1.23-cp312-cp312-win32.whl", hash = "sha256:0c64958048ce714ccc42c659ef954812ed6de79fe4800322b3926ca46f60ffd9", size = 130858, upload-time = "2025-12-07T07:19:23.887Z" }, + { url = "https://files.pythonhosted.org/packages/89/cd/ef6c1bc784c3a081fabcf867c1b3affcb18ba1ffd9d71aa036f96a2ef979/python_rapidjson-1.23-cp312-cp312-win_amd64.whl", hash = "sha256:cbb0a67a5330d28279a5c3b68068e901deedcd21ade0ec23be1bcc250948ae62", size = 151270, upload-time = "2025-12-07T07:19:25.057Z" }, ] [[package]] -name = "pywin32" -version = "310" +name = "pytz" +version = "2025.2" source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f8/bf/abbd3cdfb8fbc7fb3d4d38d320f2441b1e7cbe29be4f23797b4a2b5d8aac/pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3", size = 320884, upload-time = "2025-03-25T02:25:00.538Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6b/ec/4fdbe47932f671d6e348474ea35ed94227fb5df56a7c30cbbb42cd396ed0/pywin32-310-cp312-cp312-win32.whl", hash = "sha256:8a75a5cc3893e83a108c05d82198880704c44bbaee4d06e442e471d3c9ea4f3d", size = 8796239, upload-time = "2025-03-17T00:55:58.807Z" }, - { url = "https://files.pythonhosted.org/packages/e3/e5/b0627f8bb84e06991bea89ad8153a9e50ace40b2e1195d68e9dff6b03d0f/pywin32-310-cp312-cp312-win_amd64.whl", hash = "sha256:bf5c397c9a9a19a6f62f3fb821fbf36cac08f03770056711f765ec1503972060", size = 9503839, upload-time = "2025-03-17T00:56:00.8Z" }, - { url = "https://files.pythonhosted.org/packages/1f/32/9ccf53748df72301a89713936645a664ec001abd35ecc8578beda593d37d/pywin32-310-cp312-cp312-win_arm64.whl", hash = "sha256:2349cc906eae872d0663d4d6290d13b90621eaf78964bb1578632ff20e152966", size = 8459470, upload-time = "2025-03-17T00:56:02.601Z" }, - { url = "https://files.pythonhosted.org/packages/1c/09/9c1b978ffc4ae53999e89c19c77ba882d9fce476729f23ef55211ea1c034/pywin32-310-cp313-cp313-win32.whl", hash = "sha256:5d241a659c496ada3253cd01cfaa779b048e90ce4b2b38cd44168ad555ce74ab", size = 8794384, upload-time = "2025-03-17T00:56:04.383Z" }, - { url = "https://files.pythonhosted.org/packages/45/3c/b4640f740ffebadd5d34df35fecba0e1cfef8fde9f3e594df91c28ad9b50/pywin32-310-cp313-cp313-win_amd64.whl", hash = "sha256:667827eb3a90208ddbdcc9e860c81bde63a135710e21e4cb3348968e4bd5249e", size = 9503039, upload-time = "2025-03-17T00:56:06.207Z" }, - { url = "https://files.pythonhosted.org/packages/b4/f4/f785020090fb050e7fb6d34b780f2231f302609dc964672f72bfaeb59a28/pywin32-310-cp313-cp313-win_arm64.whl", hash = "sha256:e308f831de771482b7cf692a1f308f8fca701b2d8f9dde6cc440c7da17e47b33", size = 8458152, upload-time = "2025-03-17T00:56:07.819Z" }, + { url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" }, +] + +[[package]] +name = "pyviz-comms" +version = "3.0.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "param" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/ee/2b5367b911bab506662abffe6f342101a9b3edacee91ff9afe62db5fe9a7/pyviz_comms-3.0.6.tar.gz", hash = "sha256:73d66b620390d97959b2c4d8a2c0778d41fe20581be4717f01e46b8fae8c5695", size = 197772, upload-time = "2025-06-20T16:50:30.97Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/5a/f8c0868199bbb231a02616286ce8a4ccb85f5387b9215510297dcfedd214/pyviz_comms-3.0.6-py3-none-any.whl", hash = "sha256:4eba6238cd4a7f4add2d11879ce55411785b7d38a7c5dba42c7a0826ca53e6c2", size = 84275, upload-time = "2025-06-20T16:50:28.826Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/33/422b98d2195232ca1826284a76852ad5a86fe23e31b009c9886b2d0fb8b2/pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196", size = 182063, upload-time = "2025-09-25T21:32:11.445Z" }, + { url = "https://files.pythonhosted.org/packages/89/a0/6cf41a19a1f2f3feab0e9c0b74134aa2ce6849093d5517a0c550fe37a648/pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0", size = 173973, upload-time = "2025-09-25T21:32:12.492Z" }, + { url = "https://files.pythonhosted.org/packages/ed/23/7a778b6bd0b9a8039df8b1b1d80e2e2ad78aa04171592c8a5c43a56a6af4/pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28", size = 775116, upload-time = "2025-09-25T21:32:13.652Z" }, + { url = "https://files.pythonhosted.org/packages/65/30/d7353c338e12baef4ecc1b09e877c1970bd3382789c159b4f89d6a70dc09/pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c", size = 844011, upload-time = "2025-09-25T21:32:15.21Z" }, + { url = "https://files.pythonhosted.org/packages/8b/9d/b3589d3877982d4f2329302ef98a8026e7f4443c765c46cfecc8858c6b4b/pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc", size = 807870, upload-time = "2025-09-25T21:32:16.431Z" }, + { url = "https://files.pythonhosted.org/packages/05/c0/b3be26a015601b822b97d9149ff8cb5ead58c66f981e04fedf4e762f4bd4/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e", size = 761089, upload-time = "2025-09-25T21:32:17.56Z" }, + { url = "https://files.pythonhosted.org/packages/be/8e/98435a21d1d4b46590d5459a22d88128103f8da4c2d4cb8f14f2a96504e1/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea", size = 790181, upload-time = "2025-09-25T21:32:18.834Z" }, + { url = "https://files.pythonhosted.org/packages/74/93/7baea19427dcfbe1e5a372d81473250b379f04b1bd3c4c5ff825e2327202/pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5", size = 137658, upload-time = "2025-09-25T21:32:20.209Z" }, + { url = "https://files.pythonhosted.org/packages/86/bf/899e81e4cce32febab4fb42bb97dcdf66bc135272882d1987881a4b519e9/pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b", size = 154003, upload-time = "2025-09-25T21:32:21.167Z" }, + { url = "https://files.pythonhosted.org/packages/1a/08/67bd04656199bbb51dbed1439b7f27601dfb576fb864099c7ef0c3e55531/pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd", size = 140344, upload-time = "2025-09-25T21:32:22.617Z" }, +] + +[[package]] +name = "redis" +version = "7.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/c8/983d5c6579a411d8a99bc5823cc5712768859b5ce2c8afe1a65b37832c81/redis-7.1.0.tar.gz", hash = "sha256:b1cc3cfa5a2cb9c2ab3ba700864fb0ad75617b41f01352ce5779dabf6d5f9c3c", size = 4796669, upload-time = "2025-11-19T15:54:39.961Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/89/f0/8956f8a86b20d7bb9d6ac0187cf4cd54d8065bc9a1a09eb8011d4d326596/redis-7.1.0-py3-none-any.whl", hash = "sha256:23c52b208f92b56103e17c5d06bdc1a6c2c0b3106583985a76a18f83b265de2b", size = 354159, upload-time = "2025-11-19T15:54:38.064Z" }, +] + +[[package]] +name = "referencing" +version = "0.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "rpds-py" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/22/f5/df4e9027acead3ecc63e50fe1e36aca1523e1719559c499951bb4b53188f/referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8", size = 78036, upload-time = "2025-10-13T15:30:48.871Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/58/ca301544e1fa93ed4f80d724bf5b194f6e4b945841c5bfd555878eea9fcb/referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231", size = 26766, upload-time = "2025-10-13T15:30:47.625Z" }, +] + +[[package]] +name = "regex" +version = "2025.11.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cc/a9/546676f25e573a4cf00fe8e119b78a37b6a8fe2dc95cda877b30889c9c45/regex-2025.11.3.tar.gz", hash = "sha256:1fedc720f9bb2494ce31a58a1631f9c82df6a09b49c19517ea5cc280b4541e01", size = 414669, upload-time = "2025-11-03T21:34:22.089Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/59/9b/7c29be7903c318488983e7d97abcf8ebd3830e4c956c4c540005fcfb0462/regex-2025.11.3-cp312-cp312-win32.whl", hash = "sha256:3839967cf4dc4b985e1570fd8d91078f0c519f30491c60f9ac42a8db039be204", size = 266194, upload-time = "2025-11-03T21:31:51.53Z" }, + { url = "https://files.pythonhosted.org/packages/1a/67/3b92df89f179d7c367be654ab5626ae311cb28f7d5c237b6bb976cd5fbbb/regex-2025.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:e721d1b46e25c481dc5ded6f4b3f66c897c58d2e8cfdf77bbced84339108b0b9", size = 277069, upload-time = "2025-11-03T21:31:53.151Z" }, + { url = "https://files.pythonhosted.org/packages/d7/55/85ba4c066fe5094d35b249c3ce8df0ba623cfd35afb22d6764f23a52a1c5/regex-2025.11.3-cp312-cp312-win_arm64.whl", hash = "sha256:64350685ff08b1d3a6fff33f45a9ca183dc1d58bbfe4981604e70ec9801bbc26", size = 270330, upload-time = "2025-11-03T21:31:54.514Z" }, ] [[package]] name = "requests" -version = "2.32.4" +version = "2.32.5" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "certifi" }, @@ -931,9 +2767,218 @@ dependencies = [ { name = "idna" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e1/0a/929373653770d8a0d7ea76c37de6e41f11eb07559b103b1c02cafb3f7cf8/requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422", size = 135258, upload-time = "2025-06-09T16:43:07.34Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7c/e4/56027c4a6b4ae70ca9de302488c5ca95ad4a39e190093d6c1a8ace08341b/requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c", size = 64847, upload-time = "2025-06-09T16:43:05.728Z" }, + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "requests-oauthlib" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "oauthlib" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/f2/05f29bc3913aea15eb670be136045bf5c5bbf4b99ecb839da9b422bb2c85/requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9", size = 55650, upload-time = "2024-03-22T20:32:29.939Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/5d/63d4ae3b9daea098d5d6f5da83984853c1bbacd5dc826764b249fe119d24/requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36", size = 24179, upload-time = "2024-03-22T20:32:28.055Z" }, +] + +[[package]] +name = "requests-toolbelt" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/61/d7545dafb7ac2230c70d38d31cbfe4cc64f7144dc41f6e4e4b78ecd9f5bb/requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6", size = 206888, upload-time = "2023-05-01T04:11:33.229Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3f/51/d4db610ef29373b879047326cbf6fa98b6c1969d6f6dc423279de2b1be2c/requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06", size = 54481, upload-time = "2023-05-01T04:11:28.427Z" }, +] + +[[package]] +name = "retry" +version = "0.9.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "decorator" }, + { name = "py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9d/72/75d0b85443fbc8d9f38d08d2b1b67cc184ce35280e4a3813cda2f445f3a4/retry-0.9.2.tar.gz", hash = "sha256:f8bfa8b99b69c4506d6f5bd3b0aabf77f98cdb17f3c9fc3f5ca820033336fba4", size = 6448, upload-time = "2016-05-11T13:58:51.541Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/0d/53aea75710af4528a25ed6837d71d117602b01946b307a3912cb3cfcbcba/retry-0.9.2-py2.py3-none-any.whl", hash = "sha256:ccddf89761fa2c726ab29391837d4327f819ea14d244c232a1d24c67a2f98606", size = 7986, upload-time = "2016-05-11T13:58:39.925Z" }, +] + +[[package]] +name = "rich" +version = "14.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fb/d2/8920e102050a0de7bfabeb4c4614a49248cf8d5d7a8d01885fbb24dc767a/rich-14.2.0.tar.gz", hash = "sha256:73ff50c7c0c1c77c8243079283f4edb376f0f6442433aecb8ce7e6d0b92d1fe4", size = 219990, upload-time = "2025-10-09T14:16:53.064Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/7a/b0178788f8dc6cafce37a212c99565fa1fe7872c70c6c9c1e1a372d9d88f/rich-14.2.0-py3-none-any.whl", hash = "sha256:76bc51fe2e57d2b1be1f96c524b890b816e334ab4c1e45888799bfaab0021edd", size = 243393, upload-time = "2025-10-09T14:16:51.245Z" }, +] + +[[package]] +name = "rich-toolkit" +version = "0.17.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "rich" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/97/09/3f9b8d9daaf235195c626f21e03604c05b987404ee3bcacee0c1f67f2a8e/rich_toolkit-0.17.1.tar.gz", hash = "sha256:5af54df8d1dd9c8530e462e1bdcaed625c9b49f5a55b035aa0ba1c17bdb87c9a", size = 187925, upload-time = "2025-12-17T10:49:22.583Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/7b/15e55fa8a76d0d41bf34d965af78acdaf80a315907adb30de8b63c272694/rich_toolkit-0.17.1-py3-none-any.whl", hash = "sha256:96d24bb921ecd225ffce7c526a9149e74006410c05e6d405bd74ffd54d5631ed", size = 31412, upload-time = "2025-12-17T10:49:21.793Z" }, +] + +[[package]] +name = "rignore" +version = "0.7.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e5/f5/8bed2310abe4ae04b67a38374a4d311dd85220f5d8da56f47ae9361be0b0/rignore-0.7.6.tar.gz", hash = "sha256:00d3546cd793c30cb17921ce674d2c8f3a4b00501cb0e3dd0e82217dbeba2671", size = 57140, upload-time = "2025-11-05T21:41:21.968Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0b/0e/012556ef3047a2628842b44e753bb15f4dc46806780ff090f1e8fe4bf1eb/rignore-0.7.6-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:03e82348cb7234f8d9b2834f854400ddbbd04c0f8f35495119e66adbd37827a8", size = 883488, upload-time = "2025-11-05T20:42:41.359Z" }, + { url = "https://files.pythonhosted.org/packages/93/b0/d4f1f3fe9eb3f8e382d45ce5b0547ea01c4b7e0b4b4eb87bcd66a1d2b888/rignore-0.7.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b9e624f6be6116ea682e76c5feb71ea91255c67c86cb75befe774365b2931961", size = 820411, upload-time = "2025-11-05T20:42:24.782Z" }, + { url = "https://files.pythonhosted.org/packages/4a/c8/dea564b36dedac8de21c18e1851789545bc52a0c22ece9843444d5608a6a/rignore-0.7.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bda49950d405aa8d0ebe26af807c4e662dd281d926530f03f29690a2e07d649a", size = 897821, upload-time = "2025-11-05T20:40:52.613Z" }, + { url = "https://files.pythonhosted.org/packages/b3/2b/ee96db17ac1835e024c5d0742eefb7e46de60020385ac883dd3d1cde2c1f/rignore-0.7.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b5fd5ab3840b8c16851d327ed06e9b8be6459702a53e5ab1fc4073b684b3789e", size = 873963, upload-time = "2025-11-05T20:41:07.49Z" }, + { url = "https://files.pythonhosted.org/packages/a5/8c/ad5a57bbb9d14d5c7e5960f712a8a0b902472ea3f4a2138cbf70d1777b75/rignore-0.7.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ced2a248352636a5c77504cb755dc02c2eef9a820a44d3f33061ce1bb8a7f2d2", size = 1169216, upload-time = "2025-11-05T20:41:23.73Z" }, + { url = "https://files.pythonhosted.org/packages/80/e6/5b00bc2a6bc1701e6878fca798cf5d9125eb3113193e33078b6fc0d99123/rignore-0.7.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a04a3b73b75ddc12c9c9b21efcdaab33ca3832941d6f1d67bffd860941cd448a", size = 942942, upload-time = "2025-11-05T20:41:39.393Z" }, + { url = "https://files.pythonhosted.org/packages/85/e5/7f99bd0cc9818a91d0e8b9acc65b792e35750e3bdccd15a7ee75e64efca4/rignore-0.7.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d24321efac92140b7ec910ac7c53ab0f0c86a41133d2bb4b0e6a7c94967f44dd", size = 959787, upload-time = "2025-11-05T20:42:09.765Z" }, + { url = "https://files.pythonhosted.org/packages/55/54/2ffea79a7c1eabcede1926347ebc2a81bc6b81f447d05b52af9af14948b9/rignore-0.7.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:73c7aa109d41e593785c55fdaa89ad80b10330affa9f9d3e3a51fa695f739b20", size = 984245, upload-time = "2025-11-05T20:41:54.062Z" }, + { url = "https://files.pythonhosted.org/packages/41/f7/e80f55dfe0f35787fa482aa18689b9c8251e045076c35477deb0007b3277/rignore-0.7.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1734dc49d1e9501b07852ef44421f84d9f378da9fbeda729e77db71f49cac28b", size = 1078647, upload-time = "2025-11-05T21:40:13.463Z" }, + { url = "https://files.pythonhosted.org/packages/d4/cf/2c64f0b6725149f7c6e7e5a909d14354889b4beaadddaa5fff023ec71084/rignore-0.7.6-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5719ea14ea2b652c0c0894be5dfde954e1853a80dea27dd2fbaa749618d837f5", size = 1139186, upload-time = "2025-11-05T21:40:31.27Z" }, + { url = "https://files.pythonhosted.org/packages/75/95/a86c84909ccc24af0d094b50d54697951e576c252a4d9f21b47b52af9598/rignore-0.7.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:8e23424fc7ce35726854f639cb7968151a792c0c3d9d082f7f67e0c362cfecca", size = 1117604, upload-time = "2025-11-05T21:40:48.07Z" }, + { url = "https://files.pythonhosted.org/packages/7f/5e/13b249613fd5d18d58662490ab910a9f0be758981d1797789913adb4e918/rignore-0.7.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3efdcf1dd84d45f3e2bd2f93303d9be103888f56dfa7c3349b5bf4f0657ec696", size = 1127725, upload-time = "2025-11-05T21:41:05.804Z" }, + { url = "https://files.pythonhosted.org/packages/c7/28/fa5dcd1e2e16982c359128664e3785f202d3eca9b22dd0b2f91c4b3d242f/rignore-0.7.6-cp312-cp312-win32.whl", hash = "sha256:ccca9d1a8b5234c76b71546fc3c134533b013f40495f394a65614a81f7387046", size = 646145, upload-time = "2025-11-05T21:41:51.096Z" }, + { url = "https://files.pythonhosted.org/packages/26/87/69387fb5dd81a0f771936381431780b8cf66fcd2cfe9495e1aaf41548931/rignore-0.7.6-cp312-cp312-win_amd64.whl", hash = "sha256:c96a285e4a8bfec0652e0bfcf42b1aabcdda1e7625f5006d188e3b1c87fdb543", size = 726090, upload-time = "2025-11-05T21:41:36.485Z" }, + { url = "https://files.pythonhosted.org/packages/24/5f/e8418108dcda8087fb198a6f81caadbcda9fd115d61154bf0df4d6d3619b/rignore-0.7.6-cp312-cp312-win_arm64.whl", hash = "sha256:a64a750e7a8277a323f01ca50b7784a764845f6cce2fe38831cb93f0508d0051", size = 656317, upload-time = "2025-11-05T21:41:25.305Z" }, +] + +[[package]] +name = "rpds-py" +version = "0.30.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/af/3f2f423103f1113b36230496629986e0ef7e199d2aa8392452b484b38ced/rpds_py-0.30.0.tar.gz", hash = "sha256:dd8ff7cf90014af0c0f787eea34794ebf6415242ee1d6fa91eaba725cc441e84", size = 69469, upload-time = "2025-11-30T20:24:38.837Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/e7/98a2f4ac921d82f33e03f3835f5bf3a4a40aa1bfdc57975e74a97b2b4bdd/rpds_py-0.30.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a161f20d9a43006833cd7068375a94d035714d73a172b681d8881820600abfad", size = 375086, upload-time = "2025-11-30T20:22:17.93Z" }, + { url = "https://files.pythonhosted.org/packages/4d/a1/bca7fd3d452b272e13335db8d6b0b3ecde0f90ad6f16f3328c6fb150c889/rpds_py-0.30.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6abc8880d9d036ecaafe709079969f56e876fcf107f7a8e9920ba6d5a3878d05", size = 359053, upload-time = "2025-11-30T20:22:19.297Z" }, + { url = "https://files.pythonhosted.org/packages/65/1c/ae157e83a6357eceff62ba7e52113e3ec4834a84cfe07fa4b0757a7d105f/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca28829ae5f5d569bb62a79512c842a03a12576375d5ece7d2cadf8abe96ec28", size = 390763, upload-time = "2025-11-30T20:22:21.661Z" }, + { url = "https://files.pythonhosted.org/packages/d4/36/eb2eb8515e2ad24c0bd43c3ee9cd74c33f7ca6430755ccdb240fd3144c44/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1010ed9524c73b94d15919ca4d41d8780980e1765babf85f9a2f90d247153dd", size = 408951, upload-time = "2025-11-30T20:22:23.408Z" }, + { url = "https://files.pythonhosted.org/packages/d6/65/ad8dc1784a331fabbd740ef6f71ce2198c7ed0890dab595adb9ea2d775a1/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8d1736cfb49381ba528cd5baa46f82fdc65c06e843dab24dd70b63d09121b3f", size = 514622, upload-time = "2025-11-30T20:22:25.16Z" }, + { url = "https://files.pythonhosted.org/packages/63/8e/0cfa7ae158e15e143fe03993b5bcd743a59f541f5952e1546b1ac1b5fd45/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d948b135c4693daff7bc2dcfc4ec57237a29bd37e60c2fabf5aff2bbacf3e2f1", size = 414492, upload-time = "2025-11-30T20:22:26.505Z" }, + { url = "https://files.pythonhosted.org/packages/60/1b/6f8f29f3f995c7ffdde46a626ddccd7c63aefc0efae881dc13b6e5d5bb16/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47f236970bccb2233267d89173d3ad2703cd36a0e2a6e92d0560d333871a3d23", size = 394080, upload-time = "2025-11-30T20:22:27.934Z" }, + { url = "https://files.pythonhosted.org/packages/6d/d5/a266341051a7a3ca2f4b750a3aa4abc986378431fc2da508c5034d081b70/rpds_py-0.30.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:2e6ecb5a5bcacf59c3f912155044479af1d0b6681280048b338b28e364aca1f6", size = 408680, upload-time = "2025-11-30T20:22:29.341Z" }, + { url = "https://files.pythonhosted.org/packages/10/3b/71b725851df9ab7a7a4e33cf36d241933da66040d195a84781f49c50490c/rpds_py-0.30.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a8fa71a2e078c527c3e9dc9fc5a98c9db40bcc8a92b4e8858e36d329f8684b51", size = 423589, upload-time = "2025-11-30T20:22:31.469Z" }, + { url = "https://files.pythonhosted.org/packages/00/2b/e59e58c544dc9bd8bd8384ecdb8ea91f6727f0e37a7131baeff8d6f51661/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:73c67f2db7bc334e518d097c6d1e6fed021bbc9b7d678d6cc433478365d1d5f5", size = 573289, upload-time = "2025-11-30T20:22:32.997Z" }, + { url = "https://files.pythonhosted.org/packages/da/3e/a18e6f5b460893172a7d6a680e86d3b6bc87a54c1f0b03446a3c8c7b588f/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5ba103fb455be00f3b1c2076c9d4264bfcb037c976167a6047ed82f23153f02e", size = 599737, upload-time = "2025-11-30T20:22:34.419Z" }, + { url = "https://files.pythonhosted.org/packages/5c/e2/714694e4b87b85a18e2c243614974413c60aa107fd815b8cbc42b873d1d7/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee9c752c0364588353e627da8a7e808a66873672bcb5f52890c33fd965b394", size = 563120, upload-time = "2025-11-30T20:22:35.903Z" }, + { url = "https://files.pythonhosted.org/packages/6f/ab/d5d5e3bcedb0a77f4f613706b750e50a5a3ba1c15ccd3665ecc636c968fd/rpds_py-0.30.0-cp312-cp312-win32.whl", hash = "sha256:1ab5b83dbcf55acc8b08fc62b796ef672c457b17dbd7820a11d6c52c06839bdf", size = 223782, upload-time = "2025-11-30T20:22:37.271Z" }, + { url = "https://files.pythonhosted.org/packages/39/3b/f786af9957306fdc38a74cef405b7b93180f481fb48453a114bb6465744a/rpds_py-0.30.0-cp312-cp312-win_amd64.whl", hash = "sha256:a090322ca841abd453d43456ac34db46e8b05fd9b3b4ac0c78bcde8b089f959b", size = 240463, upload-time = "2025-11-30T20:22:39.021Z" }, + { url = "https://files.pythonhosted.org/packages/f3/d2/b91dc748126c1559042cfe41990deb92c4ee3e2b415f6b5234969ffaf0cc/rpds_py-0.30.0-cp312-cp312-win_arm64.whl", hash = "sha256:669b1805bd639dd2989b281be2cfd951c6121b65e729d9b843e9639ef1fd555e", size = 230868, upload-time = "2025-11-30T20:22:40.493Z" }, +] + +[[package]] +name = "rsa" +version = "4.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034, upload-time = "2025-04-16T09:51:18.218Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" }, +] + +[[package]] +name = "scikit-image" +version = "0.26.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "imageio" }, + { name = "lazy-loader" }, + { name = "networkx" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pillow" }, + { name = "scipy" }, + { name = "tifffile" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/b4/2528bb43c67d48053a7a649a9666432dc307d66ba02e3a6d5c40f46655df/scikit_image-0.26.0.tar.gz", hash = "sha256:f5f970ab04efad85c24714321fcc91613fcb64ef2a892a13167df2f3e59199fa", size = 22729739, upload-time = "2025-12-20T17:12:21.824Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/e8/e13757982264b33a1621628f86b587e9a73a13f5256dad49b19ba7dc9083/scikit_image-0.26.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d454b93a6fa770ac5ae2d33570f8e7a321bb80d29511ce4b6b78058ebe176e8c", size = 12376452, upload-time = "2025-12-20T17:10:52.796Z" }, + { url = "https://files.pythonhosted.org/packages/e3/be/f8dd17d0510f9911f9f17ba301f7455328bf13dae416560126d428de9568/scikit_image-0.26.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3409e89d66eff5734cd2b672d1c48d2759360057e714e1d92a11df82c87cba37", size = 12061567, upload-time = "2025-12-20T17:10:55.207Z" }, + { url = "https://files.pythonhosted.org/packages/b3/2b/c70120a6880579fb42b91567ad79feb4772f7be72e8d52fec403a3dde0c6/scikit_image-0.26.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c717490cec9e276afb0438dd165b7c3072d6c416709cc0f9f5a4c1070d23a44", size = 13084214, upload-time = "2025-12-20T17:10:57.468Z" }, + { url = "https://files.pythonhosted.org/packages/f4/a2/70401a107d6d7466d64b466927e6b96fcefa99d57494b972608e2f8be50f/scikit_image-0.26.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7df650e79031634ac90b11e64a9eedaf5a5e06fcd09bcd03a34be01745744466", size = 13561683, upload-time = "2025-12-20T17:10:59.49Z" }, + { url = "https://files.pythonhosted.org/packages/13/a5/48bdfd92794c5002d664e0910a349d0a1504671ef5ad358150f21643c79a/scikit_image-0.26.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:cefd85033e66d4ea35b525bb0937d7f42d4cdcfed2d1888e1570d5ce450d3932", size = 14112147, upload-time = "2025-12-20T17:11:02.083Z" }, + { url = "https://files.pythonhosted.org/packages/ee/b5/ac71694da92f5def5953ca99f18a10fe98eac2dd0a34079389b70b4d0394/scikit_image-0.26.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3f5bf622d7c0435884e1e141ebbe4b2804e16b2dd23ae4c6183e2ea99233be70", size = 14661625, upload-time = "2025-12-20T17:11:04.528Z" }, + { url = "https://files.pythonhosted.org/packages/23/4d/a3cc1e96f080e253dad2251bfae7587cf2b7912bcd76fd43fd366ff35a87/scikit_image-0.26.0-cp312-cp312-win_amd64.whl", hash = "sha256:abed017474593cd3056ae0fe948d07d0747b27a085e92df5474f4955dd65aec0", size = 11911059, upload-time = "2025-12-20T17:11:06.61Z" }, + { url = "https://files.pythonhosted.org/packages/35/8a/d1b8055f584acc937478abf4550d122936f420352422a1a625eef2c605d8/scikit_image-0.26.0-cp312-cp312-win_arm64.whl", hash = "sha256:4d57e39ef67a95d26860c8caf9b14b8fb130f83b34c6656a77f191fa6d1d04d8", size = 11348740, upload-time = "2025-12-20T17:11:09.118Z" }, +] + +[[package]] +name = "scipy" +version = "1.16.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0a/ca/d8ace4f98322d01abcd52d381134344bf7b431eba7ed8b42bdea5a3c2ac9/scipy-1.16.3.tar.gz", hash = "sha256:01e87659402762f43bd2fee13370553a17ada367d42e7487800bf2916535aecb", size = 30597883, upload-time = "2025-10-28T17:38:54.068Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/41/5bf55c3f386b1643812f3a5674edf74b26184378ef0f3e7c7a09a7e2ca7f/scipy-1.16.3-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:81fc5827606858cf71446a5e98715ba0e11f0dbc83d71c7409d05486592a45d6", size = 36659043, upload-time = "2025-10-28T17:32:40.285Z" }, + { url = "https://files.pythonhosted.org/packages/1e/0f/65582071948cfc45d43e9870bf7ca5f0e0684e165d7c9ef4e50d783073eb/scipy-1.16.3-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:c97176013d404c7346bf57874eaac5187d969293bf40497140b0a2b2b7482e07", size = 28898986, upload-time = "2025-10-28T17:32:45.325Z" }, + { url = "https://files.pythonhosted.org/packages/96/5e/36bf3f0ac298187d1ceadde9051177d6a4fe4d507e8f59067dc9dd39e650/scipy-1.16.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:2b71d93c8a9936046866acebc915e2af2e292b883ed6e2cbe5c34beb094b82d9", size = 20889814, upload-time = "2025-10-28T17:32:49.277Z" }, + { url = "https://files.pythonhosted.org/packages/80/35/178d9d0c35394d5d5211bbff7ac4f2986c5488b59506fef9e1de13ea28d3/scipy-1.16.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:3d4a07a8e785d80289dfe66b7c27d8634a773020742ec7187b85ccc4b0e7b686", size = 23565795, upload-time = "2025-10-28T17:32:53.337Z" }, + { url = "https://files.pythonhosted.org/packages/fa/46/d1146ff536d034d02f83c8afc3c4bab2eddb634624d6529a8512f3afc9da/scipy-1.16.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0553371015692a898e1aa858fed67a3576c34edefa6b7ebdb4e9dde49ce5c203", size = 33349476, upload-time = "2025-10-28T17:32:58.353Z" }, + { url = "https://files.pythonhosted.org/packages/79/2e/415119c9ab3e62249e18c2b082c07aff907a273741b3f8160414b0e9193c/scipy-1.16.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:72d1717fd3b5e6ec747327ce9bda32d5463f472c9dce9f54499e81fbd50245a1", size = 35676692, upload-time = "2025-10-28T17:33:03.88Z" }, + { url = "https://files.pythonhosted.org/packages/27/82/df26e44da78bf8d2aeaf7566082260cfa15955a5a6e96e6a29935b64132f/scipy-1.16.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1fb2472e72e24d1530debe6ae078db70fb1605350c88a3d14bc401d6306dbffe", size = 36019345, upload-time = "2025-10-28T17:33:09.773Z" }, + { url = "https://files.pythonhosted.org/packages/82/31/006cbb4b648ba379a95c87262c2855cd0d09453e500937f78b30f02fa1cd/scipy-1.16.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c5192722cffe15f9329a3948c4b1db789fbb1f05c97899187dcf009b283aea70", size = 38678975, upload-time = "2025-10-28T17:33:15.809Z" }, + { url = "https://files.pythonhosted.org/packages/c2/7f/acbd28c97e990b421af7d6d6cd416358c9c293fc958b8529e0bd5d2a2a19/scipy-1.16.3-cp312-cp312-win_amd64.whl", hash = "sha256:56edc65510d1331dae01ef9b658d428e33ed48b4f77b1d51caf479a0253f96dc", size = 38555926, upload-time = "2025-10-28T17:33:21.388Z" }, + { url = "https://files.pythonhosted.org/packages/ce/69/c5c7807fd007dad4f48e0a5f2153038dc96e8725d3345b9ee31b2b7bed46/scipy-1.16.3-cp312-cp312-win_arm64.whl", hash = "sha256:a8a26c78ef223d3e30920ef759e25625a0ecdd0d60e5a8818b7513c3e5384cf2", size = 25463014, upload-time = "2025-10-28T17:33:25.975Z" }, +] + +[[package]] +name = "scipy-stubs" +version = "1.16.3.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "optype", extra = ["numpy"] }, +] +sdist = { url = "https://files.pythonhosted.org/packages/08/91/1700d2a1a9f64f19bb019a547e510b99a6af1fef49641a0bce86bc85fb8e/scipy_stubs-1.16.3.3.tar.gz", hash = "sha256:af47578875d5557567225a16ec1b9b38a48c4c4377d92396413ebd65406c44ee", size = 361468, upload-time = "2025-12-08T13:45:38.37Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/e2/3b8826f281f59301e3284989b19cfc56fdccf799134c1befedd38482a23a/scipy_stubs-1.16.3.3-py3-none-any.whl", hash = "sha256:f6316b36cd0fb272c994ae5b10c4a73c644a7e156ed8d32bcd9c35303d0e1b7e", size = 561750, upload-time = "2025-12-08T13:45:36.568Z" }, +] + +[[package]] +name = "seaborn" +version = "0.13.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "matplotlib" }, + { name = "numpy" }, + { name = "pandas" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/86/59/a451d7420a77ab0b98f7affa3a1d78a313d2f7281a57afb1a34bae8ab412/seaborn-0.13.2.tar.gz", hash = "sha256:93e60a40988f4d65e9f4885df477e2fdaff6b73a9ded434c1ab356dd57eefff7", size = 1457696, upload-time = "2024-01-25T13:21:52.551Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/11/00d3c3dfc25ad54e731d91449895a79e4bf2384dc3ac01809010ba88f6d5/seaborn-0.13.2-py3-none-any.whl", hash = "sha256:636f8336facf092165e27924f223d3c62ca560b1f2bb5dff7ab7fad265361987", size = 294914, upload-time = "2024-01-25T13:21:49.598Z" }, +] + +[[package]] +name = "sentry-sdk" +version = "2.48.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/40/f0/0e9dc590513d5e742d7799e2038df3a05167cba084c6ca4f3cdd75b55164/sentry_sdk-2.48.0.tar.gz", hash = "sha256:5213190977ff7fdff8a58b722fb807f8d5524a80488626ebeda1b5676c0c1473", size = 384828, upload-time = "2025-12-16T14:55:41.722Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/19/8d77f9992e5cbfcaa9133c3bf63b4fbbb051248802e1e803fed5c552fbb2/sentry_sdk-2.48.0-py2.py3-none-any.whl", hash = "sha256:6b12ac256769d41825d9b7518444e57fa35b5642df4c7c5e322af4d2c8721172", size = 414555, upload-time = "2025-12-16T14:55:40.152Z" }, ] [[package]] @@ -945,6 +2990,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" }, ] +[[package]] +name = "shellingham" +version = "1.5.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310, upload-time = "2023-10-24T04:13:40.426Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" }, +] + [[package]] name = "six" version = "1.17.0" @@ -954,6 +3008,186 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, ] +[[package]] +name = "soupsieve" +version = "2.8.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/89/23/adf3796d740536d63a6fbda113d07e60c734b6ed5d3058d1e47fc0495e47/soupsieve-2.8.1.tar.gz", hash = "sha256:4cf733bc50fa805f5df4b8ef4740fc0e0fa6218cf3006269afd3f9d6d80fd350", size = 117856, upload-time = "2025-12-18T13:50:34.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/f3/b67d6ea49ca9154453b6d70b34ea22f3996b9fa55da105a79d8732227adc/soupsieve-2.8.1-py3-none-any.whl", hash = "sha256:a11fe2a6f3d76ab3cf2de04eb339c1be5b506a8a47f2ceb6d139803177f85434", size = 36710, upload-time = "2025-12-18T13:50:33.267Z" }, +] + +[[package]] +name = "sqlalchemy" +version = "2.0.45" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "greenlet", marker = "platform_machine == 'AMD64' or platform_machine == 'WIN32' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'ppc64le' or platform_machine == 'win32' or platform_machine == 'x86_64'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/be/f9/5e4491e5ccf42f5d9cfc663741d261b3e6e1683ae7812114e7636409fcc6/sqlalchemy-2.0.45.tar.gz", hash = "sha256:1632a4bda8d2d25703fdad6363058d882541bdaaee0e5e3ddfa0cd3229efce88", size = 9869912, upload-time = "2025-12-09T21:05:16.737Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2d/c7/1900b56ce19bff1c26f39a4ce427faec7716c81ac792bfac8b6a9f3dca93/sqlalchemy-2.0.45-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b3ee2aac15169fb0d45822983631466d60b762085bc4535cd39e66bea362df5f", size = 3333760, upload-time = "2025-12-09T22:11:02.66Z" }, + { url = "https://files.pythonhosted.org/packages/0a/93/3be94d96bb442d0d9a60e55a6bb6e0958dd3457751c6f8502e56ef95fed0/sqlalchemy-2.0.45-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba547ac0b361ab4f1608afbc8432db669bd0819b3e12e29fb5fa9529a8bba81d", size = 3348268, upload-time = "2025-12-09T22:13:49.054Z" }, + { url = "https://files.pythonhosted.org/packages/48/4b/f88ded696e61513595e4a9778f9d3f2bf7332cce4eb0c7cedaabddd6687b/sqlalchemy-2.0.45-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:215f0528b914e5c75ef2559f69dca86878a3beeb0c1be7279d77f18e8d180ed4", size = 3278144, upload-time = "2025-12-09T22:11:04.14Z" }, + { url = "https://files.pythonhosted.org/packages/ed/6a/310ecb5657221f3e1bd5288ed83aa554923fb5da48d760a9f7622afeb065/sqlalchemy-2.0.45-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:107029bf4f43d076d4011f1afb74f7c3e2ea029ec82eb23d8527d5e909e97aa6", size = 3313907, upload-time = "2025-12-09T22:13:50.598Z" }, + { url = "https://files.pythonhosted.org/packages/5c/39/69c0b4051079addd57c84a5bfb34920d87456dd4c90cf7ee0df6efafc8ff/sqlalchemy-2.0.45-cp312-cp312-win32.whl", hash = "sha256:0c9f6ada57b58420a2c0277ff853abe40b9e9449f8d7d231763c6bc30f5c4953", size = 2112182, upload-time = "2025-12-09T21:39:30.824Z" }, + { url = "https://files.pythonhosted.org/packages/f7/4e/510db49dd89fc3a6e994bee51848c94c48c4a00dc905e8d0133c251f41a7/sqlalchemy-2.0.45-cp312-cp312-win_amd64.whl", hash = "sha256:8defe5737c6d2179c7997242d6473587c3beb52e557f5ef0187277009f73e5e1", size = 2139200, upload-time = "2025-12-09T21:39:32.321Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl", hash = "sha256:5225a288e4c8cc2308dbdd874edad6e7d0fd38eac1e9e5f23503425c8eee20d0", size = 1936672, upload-time = "2025-12-09T21:54:52.608Z" }, +] + +[[package]] +name = "sqlparse" +version = "0.5.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/90/76/437d71068094df0726366574cf3432a4ed754217b436eb7429415cf2d480/sqlparse-0.5.5.tar.gz", hash = "sha256:e20d4a9b0b8585fdf63b10d30066c7c94c5d7a7ec47c889a2d83a3caa93ff28e", size = 120815, upload-time = "2025-12-19T07:17:45.073Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/49/4b/359f28a903c13438ef59ebeee215fb25da53066db67b305c125f1c6d2a25/sqlparse-0.5.5-py3-none-any.whl", hash = "sha256:12a08b3bf3eec877c519589833aed092e2444e68240a3577e8e26148acc7b1ba", size = 46138, upload-time = "2025-12-19T07:17:46.573Z" }, +] + +[[package]] +name = "starlette" +version = "0.50.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ba/b8/73a0e6a6e079a9d9cfa64113d771e421640b6f679a52eeb9b32f72d871a1/starlette-0.50.0.tar.gz", hash = "sha256:a2a17b22203254bcbc2e1f926d2d55f3f9497f769416b3190768befe598fa3ca", size = 2646985, upload-time = "2025-11-01T15:25:27.516Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/52/1064f510b141bd54025f9b55105e26d1fa970b9be67ad766380a3c9b74b0/starlette-0.50.0-py3-none-any.whl", hash = "sha256:9e5391843ec9b6e472eed1365a78c8098cfceb7a74bfd4d6b1c0c0095efb3bca", size = 74033, upload-time = "2025-11-01T15:25:25.461Z" }, +] + +[[package]] +name = "sympy" +version = "1.14.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mpmath" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/83/d3/803453b36afefb7c2bb238361cd4ae6125a569b4db67cd9e79846ba2d68c/sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517", size = 7793921, upload-time = "2025-04-27T18:05:01.611Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" }, +] + +[[package]] +name = "tenacity" +version = "9.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0a/d4/2b0cd0fe285e14b36db076e78c93766ff1d529d70408bd1d2a5a84f1d929/tenacity-9.1.2.tar.gz", hash = "sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb", size = 48036, upload-time = "2025-04-02T08:25:09.966Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" }, +] + +[[package]] +name = "termcolor" +version = "3.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/46/79/cf31d7a93a8fdc6aa0fbb665be84426a8c5a557d9240b6239e9e11e35fc5/termcolor-3.3.0.tar.gz", hash = "sha256:348871ca648ec6a9a983a13ab626c0acce02f515b9e1983332b17af7979521c5", size = 14434, upload-time = "2025-12-29T12:55:21.882Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/33/d1/8bb87d21e9aeb323cc03034f5eaf2c8f69841e40e4853c2627edf8111ed3/termcolor-3.3.0-py3-none-any.whl", hash = "sha256:cf642efadaf0a8ebbbf4bc7a31cec2f9b5f21a9f726f4ccbb08192c9c26f43a5", size = 7734, upload-time = "2025-12-29T12:55:20.718Z" }, +] + +[[package]] +name = "tifffile" +version = "2025.12.20" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f8/a6/85e8ecfd7cb4167f8bd17136b2d42cba296fbc08a247bba70d5747e2046a/tifffile-2025.12.20.tar.gz", hash = "sha256:cb8a4fee327d15b3e3eeac80bbdd8a53b323c80473330bcfb99418ee4c1c827f", size = 373364, upload-time = "2025-12-21T06:23:54.241Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1b/fe/e59859aa1134fac065d36864752daf13215c98b379cb5d93f954dc0ec830/tifffile-2025.12.20-py3-none-any.whl", hash = "sha256:bc0345a20675149353cfcb3f1c48d0a3654231ee26bd46beebaab4d2168feeb6", size = 232031, upload-time = "2025-12-21T06:23:53.003Z" }, +] + +[[package]] +name = "tokenizers" +version = "0.22.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "huggingface-hub" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1c/46/fb6854cec3278fbfa4a75b50232c77622bc517ac886156e6afbfa4d8fc6e/tokenizers-0.22.1.tar.gz", hash = "sha256:61de6522785310a309b3407bac22d99c4db5dba349935e99e4d15ea2226af2d9", size = 363123, upload-time = "2025-09-19T09:49:23.424Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/33/f4b2d94ada7ab297328fc671fed209368ddb82f965ec2224eb1892674c3a/tokenizers-0.22.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:59fdb013df17455e5f950b4b834a7b3ee2e0271e6378ccb33aa74d178b513c73", size = 3069318, upload-time = "2025-09-19T09:49:11.848Z" }, + { url = "https://files.pythonhosted.org/packages/1c/58/2aa8c874d02b974990e89ff95826a4852a8b2a273c7d1b4411cdd45a4565/tokenizers-0.22.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:8d4e484f7b0827021ac5f9f71d4794aaef62b979ab7608593da22b1d2e3c4edc", size = 2926478, upload-time = "2025-09-19T09:49:09.759Z" }, + { url = "https://files.pythonhosted.org/packages/1e/3b/55e64befa1e7bfea963cf4b787b2cea1011362c4193f5477047532ce127e/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19d2962dd28bc67c1f205ab180578a78eef89ac60ca7ef7cbe9635a46a56422a", size = 3256994, upload-time = "2025-09-19T09:48:56.701Z" }, + { url = "https://files.pythonhosted.org/packages/71/0b/fbfecf42f67d9b7b80fde4aabb2b3110a97fac6585c9470b5bff103a80cb/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:38201f15cdb1f8a6843e6563e6e79f4abd053394992b9bbdf5213ea3469b4ae7", size = 3153141, upload-time = "2025-09-19T09:48:59.749Z" }, + { url = "https://files.pythonhosted.org/packages/17/a9/b38f4e74e0817af8f8ef925507c63c6ae8171e3c4cb2d5d4624bf58fca69/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1cbe5454c9a15df1b3443c726063d930c16f047a3cc724b9e6e1a91140e5a21", size = 3508049, upload-time = "2025-09-19T09:49:05.868Z" }, + { url = "https://files.pythonhosted.org/packages/d2/48/dd2b3dac46bb9134a88e35d72e1aa4869579eacc1a27238f1577270773ff/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7d094ae6312d69cc2a872b54b91b309f4f6fbce871ef28eb27b52a98e4d0214", size = 3710730, upload-time = "2025-09-19T09:49:01.832Z" }, + { url = "https://files.pythonhosted.org/packages/93/0e/ccabc8d16ae4ba84a55d41345207c1e2ea88784651a5a487547d80851398/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afd7594a56656ace95cdd6df4cca2e4059d294c5cfb1679c57824b605556cb2f", size = 3412560, upload-time = "2025-09-19T09:49:03.867Z" }, + { url = "https://files.pythonhosted.org/packages/d0/c6/dc3a0db5a6766416c32c034286d7c2d406da1f498e4de04ab1b8959edd00/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2ef6063d7a84994129732b47e7915e8710f27f99f3a3260b8a38fc7ccd083f4", size = 3250221, upload-time = "2025-09-19T09:49:07.664Z" }, + { url = "https://files.pythonhosted.org/packages/d7/a6/2c8486eef79671601ff57b093889a345dd3d576713ef047776015dc66de7/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ba0a64f450b9ef412c98f6bcd2a50c6df6e2443b560024a09fa6a03189726879", size = 9345569, upload-time = "2025-09-19T09:49:14.214Z" }, + { url = "https://files.pythonhosted.org/packages/6b/16/32ce667f14c35537f5f605fe9bea3e415ea1b0a646389d2295ec348d5657/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:331d6d149fa9c7d632cde4490fb8bbb12337fa3a0232e77892be656464f4b446", size = 9271599, upload-time = "2025-09-19T09:49:16.639Z" }, + { url = "https://files.pythonhosted.org/packages/51/7c/a5f7898a3f6baa3fc2685c705e04c98c1094c523051c805cdd9306b8f87e/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:607989f2ea68a46cb1dfbaf3e3aabdf3f21d8748312dbeb6263d1b3b66c5010a", size = 9533862, upload-time = "2025-09-19T09:49:19.146Z" }, + { url = "https://files.pythonhosted.org/packages/36/65/7e75caea90bc73c1dd8d40438adf1a7bc26af3b8d0a6705ea190462506e1/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a0f307d490295717726598ef6fa4f24af9d484809223bbc253b201c740a06390", size = 9681250, upload-time = "2025-09-19T09:49:21.501Z" }, + { url = "https://files.pythonhosted.org/packages/30/2c/959dddef581b46e6209da82df3b78471e96260e2bc463f89d23b1bf0e52a/tokenizers-0.22.1-cp39-abi3-win32.whl", hash = "sha256:b5120eed1442765cd90b903bb6cfef781fd8fe64e34ccaecbae4c619b7b12a82", size = 2472003, upload-time = "2025-09-19T09:49:27.089Z" }, + { url = "https://files.pythonhosted.org/packages/b3/46/e33a8c93907b631a99377ef4c5f817ab453d0b34f93529421f42ff559671/tokenizers-0.22.1-cp39-abi3-win_amd64.whl", hash = "sha256:65fd6e3fb11ca1e78a6a93602490f134d1fdeb13bcef99389d5102ea318ed138", size = 2674684, upload-time = "2025-09-19T09:49:24.953Z" }, +] + +[[package]] +name = "tool" +version = "0.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "argh" }, + { name = "pydispatcher" }, + { name = "pyyaml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ab/ec/2b258e54a13bc53a4352e538276b4b9ffdf0d08b3f2972ee062e71dc55a0/tool-0.8.0.tar.gz", hash = "sha256:5c596600abac4dc93d7854b0640fcc7577b7387f5b5f13bc873aed130aa8e881", size = 51557, upload-time = "2011-11-25T14:50:47.079Z" } + +[[package]] +name = "torch" +version = "2.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "filelock" }, + { name = "fsspec" }, + { name = "jinja2" }, + { name = "networkx" }, + { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cufile-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusparselt-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nvshmem-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "setuptools" }, + { name = "sympy" }, + { name = "triton", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "typing-extensions" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/27/07c645c7673e73e53ded71705045d6cb5bae94c4b021b03aa8d03eee90ab/torch-2.9.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:da5f6f4d7f4940a173e5572791af238cb0b9e21b1aab592bd8b26da4c99f1cd6", size = 104126592, upload-time = "2025-11-12T15:20:41.62Z" }, + { url = "https://files.pythonhosted.org/packages/19/17/e377a460603132b00760511299fceba4102bd95db1a0ee788da21298ccff/torch-2.9.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:27331cd902fb4322252657f3902adf1c4f6acad9dcad81d8df3ae14c7c4f07c4", size = 899742281, upload-time = "2025-11-12T15:22:17.602Z" }, + { url = "https://files.pythonhosted.org/packages/b1/1a/64f5769025db846a82567fa5b7d21dba4558a7234ee631712ee4771c436c/torch-2.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:81a285002d7b8cfd3fdf1b98aa8df138d41f1a8334fd9ea37511517cedf43083", size = 110940568, upload-time = "2025-11-12T15:21:18.689Z" }, + { url = "https://files.pythonhosted.org/packages/6e/ab/07739fd776618e5882661d04c43f5b5586323e2f6a2d7d84aac20d8f20bd/torch-2.9.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:c0d25d1d8e531b8343bea0ed811d5d528958f1dcbd37e7245bc686273177ad7e", size = 74479191, upload-time = "2025-11-12T15:21:25.816Z" }, +] + +[[package]] +name = "torchvision" +version = "0.24.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "pillow" }, + { name = "torch" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/f0/af/18e2c6b9538a045f60718a0c5a058908ccb24f88fde8e6f0fc12d5ff7bd3/torchvision-0.24.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e48bf6a8ec95872eb45763f06499f87bd2fb246b9b96cb00aae260fda2f96193", size = 1891433, upload-time = "2025-11-12T15:25:03.232Z" }, + { url = "https://files.pythonhosted.org/packages/9d/43/600e5cfb0643d10d633124f5982d7abc2170dfd7ce985584ff16edab3e76/torchvision-0.24.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:7fb7590c737ebe3e1c077ad60c0e5e2e56bb26e7bccc3b9d04dbfc34fd09f050", size = 2386737, upload-time = "2025-11-12T15:25:08.288Z" }, + { url = "https://files.pythonhosted.org/packages/93/b1/db2941526ecddd84884132e2742a55c9311296a6a38627f9e2627f5ac889/torchvision-0.24.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:66a98471fc18cad9064123106d810a75f57f0838eee20edc56233fd8484b0cc7", size = 8049868, upload-time = "2025-11-12T15:25:13.058Z" }, + { url = "https://files.pythonhosted.org/packages/69/98/16e583f59f86cd59949f59d52bfa8fc286f86341a229a9d15cbe7a694f0c/torchvision-0.24.1-cp312-cp312-win_amd64.whl", hash = "sha256:4aa6cb806eb8541e92c9b313e96192c6b826e9eb0042720e2fa250d021079952", size = 4302006, upload-time = "2025-11-12T15:25:16.184Z" }, +] + [[package]] name = "tqdm" version = "4.67.1" @@ -971,37 +3205,117 @@ name = "trinity-client-aida" version = "0.1.0" source = { virtual = "." } dependencies = [ + { name = "agentaction" }, + { name = "aio-pika" }, { name = "apscheduler" }, + { name = "bs4" }, + { name = "callbacks" }, { name = "celery" }, - { name = "geventhttpclient" }, - { name = "google-search-results" }, + { name = "celery-types" }, + { name = "chromadb" }, + { name = "dashscope" }, + { name = "dominate" }, + { name = "dotenv" }, + { name = "fastapi", extra = ["standard"] }, + { name = "image" }, + { name = "langchain" }, + { name = "langchain-community" }, + { name = "load" }, + { name = "load-dotenv" }, + { name = "loguru" }, + { name = "minio" }, + { name = "mmcv" }, { name = "moviepy" }, { name = "numpy" }, + { name = "ollama" }, + { name = "opencv-python" }, + { name = "pandas" }, { name = "pandas-stubs" }, - { name = "pika-stubs" }, + { name = "pika" }, + { name = "pillow" }, + { name = "pyasyncore" }, + { name = "pydantic" }, + { name = "pydantic-core" }, + { name = "pydantic-settings" }, + { name = "pymilvus" }, + { name = "pymysql" }, { name = "python-multipart" }, + { name = "pyviz-comms" }, + { name = "redis" }, + { name = "retry" }, + { name = "scikit-image" }, + { name = "scipy" }, + { name = "scipy-stubs" }, + { name = "seaborn" }, + { name = "tool" }, + { name = "torch" }, + { name = "torchvision" }, { name = "tritonclient", extra = ["all"] }, - { name = "types-urllib3" }, + { name = "uvicorn" }, ] [package.metadata] requires-dist = [ - { name = "apscheduler", specifier = ">=3.11.0" }, - { name = "celery", specifier = ">=5.5.3" }, - { name = "geventhttpclient", specifier = ">=2.3.4" }, - { name = "google-search-results", specifier = ">=2.4.2" }, - { name = "moviepy", specifier = ">=2.2.1" }, - { name = "numpy", specifier = "==1.26.4" }, - { name = "pandas-stubs", specifier = "==2.2.3.250527" }, - { name = "pika-stubs", specifier = "==0.1.3" }, - { name = "python-multipart", specifier = ">=0.0.20" }, - { name = "tritonclient", extras = ["all"], specifier = ">=2.58.0" }, - { name = "types-urllib3", specifier = "==1.26.25.14" }, + { name = "agentaction" }, + { name = "aio-pika", specifier = ">=9.5.8" }, + { name = "apscheduler", specifier = ">=3.11.1" }, + { name = "bs4", specifier = ">=0.0.2" }, + { name = "callbacks", specifier = ">=0.3.0" }, + { name = "celery", specifier = ">=5.6.0" }, + { name = "celery-types", specifier = ">=0.23.0" }, + { name = "chromadb", specifier = ">=1.3.7" }, + { name = "dashscope", specifier = ">=1.25.5" }, + { name = "dominate", specifier = ">=2.9.1" }, + { name = "dotenv", specifier = ">=0.9.9" }, + { name = "fastapi", extras = ["standard"], specifier = ">=0.125.0" }, + { name = "image", specifier = ">=1.5.33" }, + { name = "langchain", specifier = ">=1.2.0" }, + { name = "langchain-community", specifier = ">=0.4.1" }, + { name = "load", specifier = ">=1.0.14" }, + { name = "load-dotenv", specifier = ">=0.1.0" }, + { name = "loguru", specifier = ">=0.7.3" }, + { name = "minio", specifier = ">=7.2.20" }, + { name = "mmcv", specifier = ">=2.2.0" }, + { name = "moviepy", specifier = "==1.0.3" }, + { name = "numpy", specifier = "<2" }, + { name = "ollama", specifier = ">=0.6.1" }, + { name = "opencv-python", specifier = ">=4.11.0.86" }, + { name = "pandas", specifier = ">=2.3.3" }, + { name = "pandas-stubs", specifier = "~=2.3.3" }, + { name = "pika", specifier = ">=1.3.2" }, + { name = "pillow", specifier = ">=12.0.0" }, + { name = "pyasyncore", specifier = ">=1.0.4" }, + { name = "pydantic", specifier = ">=2.12.5" }, + { name = "pydantic-core", specifier = ">=2.41.5" }, + { name = "pydantic-settings", specifier = ">=2.12.0" }, + { name = "pymilvus", specifier = ">=2.6.5" }, + { name = "pymysql", specifier = ">=1.1.2" }, + { name = "python-multipart", specifier = ">=0.0.21" }, + { name = "pyviz-comms", specifier = ">=3.0.6" }, + { name = "redis", specifier = ">=7.1.0" }, + { name = "retry", specifier = ">=0.9.2" }, + { name = "scikit-image", specifier = ">=0.26.0" }, + { name = "scipy", specifier = ">=1.16.3" }, + { name = "scipy-stubs", specifier = "~=1.16.3" }, + { name = "seaborn", specifier = ">=0.13.2" }, + { name = "tool", specifier = ">=0.8.0" }, + { name = "torch", specifier = ">=2.9.1" }, + { name = "torchvision", specifier = ">=0.24.1" }, + { name = "tritonclient", extras = ["all"], specifier = ">=2.63.0" }, + { name = "uvicorn", specifier = ">=0.38.0" }, +] + +[[package]] +name = "triton" +version = "3.5.1" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f2/50/9a8358d3ef58162c0a415d173cfb45b67de60176e1024f71fbc4d24c0b6d/triton-3.5.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d2c6b915a03888ab931a9fd3e55ba36785e1fe70cbea0b40c6ef93b20fc85232", size = 170470207, upload-time = "2025-11-11T17:41:00.253Z" }, ] [[package]] name = "tritonclient" -version = "2.58.0" +version = "2.64.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy" }, @@ -1009,9 +3323,9 @@ dependencies = [ { name = "urllib3" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/cf/c8/7ef73066476d54e505ebed7435293469a2358f3a173106d2ff9eee70b91f/tritonclient-2.58.0-py3-none-any.whl", hash = "sha256:f456c3d982cc4f0eaaac49e1175a0e86f20fa810d2afc44c0fae3587a97ba67c", size = 98240, upload-time = "2025-05-31T22:07:25.032Z" }, - { url = "https://files.pythonhosted.org/packages/24/d5/c4ae120fba6621c88c270878e5bffd81c88cf870dd82011e35c2f625cd35/tritonclient-2.58.0-py3-none-manylinux1_x86_64.whl", hash = "sha256:671bf5c6fe441191904978f947ae7db72e4f82b9894301d3c62219a5a2f0c4ee", size = 14450319, upload-time = "2025-05-31T22:07:42.837Z" }, - { url = "https://files.pythonhosted.org/packages/54/8a/9f10a452a0dcc1156434d93624f1d88351ac0547b990e8b5220c89839de3/tritonclient-2.58.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0e25fc64ffaca89ec9b76f8a016a8f0e7391c11b3db334daef1dcc77ce1ca493", size = 13629753, upload-time = "2025-05-31T22:08:07.676Z" }, + { url = "https://files.pythonhosted.org/packages/55/cd/57180039cbd31cbcfa79bf1062c818431331ccc7d61603e0a0775e928a74/tritonclient-2.64.0-py3-none-any.whl", hash = "sha256:f31569ab491d7815f1b8c28d07f489adf817a68b98548d5d933c9845a2879e8b", size = 98299, upload-time = "2025-12-24T02:08:37.195Z" }, + { url = "https://files.pythonhosted.org/packages/47/8a/a04f810e40b874980ca8e907e18faff819777c128dda5005c407314e8e54/tritonclient-2.64.0-py3-none-manylinux1_x86_64.whl", hash = "sha256:4d1653658ba59b380b732969e67842b2c935b147abd1e89458fd64a9335b1fc7", size = 111851, upload-time = "2025-12-24T02:08:17.006Z" }, + { url = "https://files.pythonhosted.org/packages/37/82/5cd615a687ed7c52b8a7563d8ca8a126f3077b78c3e6c5e2c3d9e6cc2542/tritonclient-2.64.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:d3a52ce099840b576aeea6c1e2e7d7a9b109f3c468e52c6212d80d5b49972688", size = 111851, upload-time = "2025-12-24T02:07:40.313Z" }, ] [package.optional-dependencies] @@ -1022,35 +3336,89 @@ all = [ { name = "grpcio" }, { name = "numpy" }, { name = "packaging" }, + { name = "perf-analyzer" }, { name = "protobuf" }, { name = "python-rapidjson" }, ] [[package]] -name = "types-pytz" -version = "2025.2.0.20250516" +name = "typer" +version = "0.21.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/bd/72/b0e711fd90409f5a76c75349055d3eb19992c110f0d2d6aabbd6cfbc14bf/types_pytz-2025.2.0.20250516.tar.gz", hash = "sha256:e1216306f8c0d5da6dafd6492e72eb080c9a166171fa80dd7a1990fd8be7a7b3", size = 10940, upload-time = "2025-05-16T03:07:01.91Z" } +dependencies = [ + { name = "click" }, + { name = "rich" }, + { name = "shellingham" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/30/ff9ede605e3bd086b4dd842499814e128500621f7951ca1e5ce84bbf61b1/typer-0.21.0.tar.gz", hash = "sha256:c87c0d2b6eee3b49c5c64649ec92425492c14488096dfbc8a0c2799b2f6f9c53", size = 106781, upload-time = "2025-12-25T09:54:53.651Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c1/ba/e205cd11c1c7183b23c97e4bcd1de7bc0633e2e867601c32ecfc6ad42675/types_pytz-2025.2.0.20250516-py3-none-any.whl", hash = "sha256:e0e0c8a57e2791c19f718ed99ab2ba623856b11620cb6b637e5f62ce285a7451", size = 10136, upload-time = "2025-05-16T03:07:01.075Z" }, + { url = "https://files.pythonhosted.org/packages/e1/e4/5ebc1899d31d2b1601b32d21cfb4bba022ae6fce323d365f0448031b1660/typer-0.21.0-py3-none-any.whl", hash = "sha256:c79c01ca6b30af9fd48284058a7056ba0d3bf5cf10d0ff3d0c5b11b68c258ac6", size = 47109, upload-time = "2025-12-25T09:54:51.918Z" }, ] [[package]] -name = "types-urllib3" -version = "1.26.25.14" +name = "typer-slim" +version = "0.21.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/73/de/b9d7a68ad39092368fb21dd6194b362b98a1daeea5dcfef5e1adb5031c7e/types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f", size = 11239, upload-time = "2023-07-20T15:19:31.307Z" } +dependencies = [ + { name = "click" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f9/3b/2f60ce16f578b1db5b8816d37d6a4d9786b33b76407fc8c13b0b86312c31/typer_slim-0.21.0.tar.gz", hash = "sha256:f2dbd150cfa0fead2242e21fa9f654dfc64773763ddf07c6be9a49ad34f79557", size = 106841, upload-time = "2025-12-25T09:54:55.998Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/11/7b/3fc711b2efea5e85a7a0bbfe269ea944aa767bbba5ec52f9ee45d362ccf3/types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e", size = 15377, upload-time = "2023-07-20T15:19:30.379Z" }, + { url = "https://files.pythonhosted.org/packages/b4/84/e97abf10e4a699194ff07fd586ec7f4cf867d9d04bead559a65f9e7aff84/typer_slim-0.21.0-py3-none-any.whl", hash = "sha256:92aee2188ac6fc2b2924bd75bb61a340b78bd8cd51fd9735533ce5a856812c8e", size = 47174, upload-time = "2025-12-25T09:54:54.609Z" }, +] + +[[package]] +name = "types-pytz" +version = "2025.2.0.20251108" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/40/ff/c047ddc68c803b46470a357454ef76f4acd8c1088f5cc4891cdd909bfcf6/types_pytz-2025.2.0.20251108.tar.gz", hash = "sha256:fca87917836ae843f07129567b74c1929f1870610681b4c92cb86a3df5817bdb", size = 10961, upload-time = "2025-11-08T02:55:57.001Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/c1/56ef16bf5dcd255155cc736d276efa6ae0a5c26fd685e28f0412a4013c01/types_pytz-2025.2.0.20251108-py3-none-any.whl", hash = "sha256:0f1c9792cab4eb0e46c52f8845c8f77cf1e313cb3d68bf826aa867fe4717d91c", size = 10116, upload-time = "2025-11-08T02:55:56.194Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "typing-inspect" +version = "0.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dc/74/1789779d91f1961fa9438e9a8710cdae6bd138c80d7303996933d117264a/typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78", size = 13825, upload-time = "2023-05-24T20:25:47.612Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/65/f3/107a22063bf27bdccf2024833d3445f4eea42b2e598abfbd46f6a63b6cb0/typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f", size = 8827, upload-time = "2023-05-24T20:25:45.287Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, ] [[package]] name = "tzdata" -version = "2025.2" +version = "2025.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/95/32/1a225d6164441be760d75c2c42e2780dc0873fe382da3e98a2e1e48361e5/tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9", size = 196380, upload-time = "2025-03-23T13:54:43.652Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5e/a7/c202b344c5ca7daf398f3b8a477eeb205cf3b6f32e7ec3a6bac0629ca975/tzdata-2025.3.tar.gz", hash = "sha256:de39c2ca5dc7b0344f2eba86f49d614019d29f060fc4ebc8a417896a620b56a7", size = 196772, upload-time = "2025-12-13T17:45:35.667Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" }, + { url = "https://files.pythonhosted.org/packages/c7/b0/003792df09decd6849a5e39c28b513c06e84436a54440380862b5aeff25d/tzdata-2025.3-py2.py3-none-any.whl", hash = "sha256:06a47e5700f3081aab02b2e513160914ff0694bce9947d6b76ebd6bf57cfc5d1", size = 348521, upload-time = "2025-12-13T17:45:33.889Z" }, ] [[package]] @@ -1067,11 +3435,71 @@ wheels = [ [[package]] name = "urllib3" -version = "2.5.0" +version = "2.3.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d", size = 307268, upload-time = "2024-12-22T07:47:30.032Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" }, + { url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369, upload-time = "2024-12-22T07:47:28.074Z" }, +] + +[[package]] +name = "uuid-utils" +version = "0.12.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/0e/512fb221e4970c2f75ca9dae412d320b7d9ddc9f2b15e04ea8e44710396c/uuid_utils-0.12.0.tar.gz", hash = "sha256:252bd3d311b5d6b7f5dfce7a5857e27bb4458f222586bb439463231e5a9cbd64", size = 20889, upload-time = "2025-12-01T17:29:55.494Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/43/de5cd49a57b6293b911b6a9a62fc03e55db9f964da7d5882d9edbee1e9d2/uuid_utils-0.12.0-cp39-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:3b9b30707659292f207b98f294b0e081f6d77e1fbc760ba5b41331a39045f514", size = 603197, upload-time = "2025-12-01T17:29:30.104Z" }, + { url = "https://files.pythonhosted.org/packages/02/fa/5fd1d8c9234e44f0c223910808cde0de43bb69f7df1349e49b1afa7f2baa/uuid_utils-0.12.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:add3d820c7ec14ed37317375bea30249699c5d08ff4ae4dbee9fc9bce3bfbf65", size = 305168, upload-time = "2025-12-01T17:29:31.384Z" }, + { url = "https://files.pythonhosted.org/packages/c8/c6/8633ac9942bf9dc97a897b5154e5dcffa58816ec4dd780b3b12b559ff05c/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b8fce83ecb3b16af29c7809669056c4b6e7cc912cab8c6d07361645de12dd79", size = 340580, upload-time = "2025-12-01T17:29:32.362Z" }, + { url = "https://files.pythonhosted.org/packages/f3/88/8a61307b04b4da1c576373003e6d857a04dade52ab035151d62cb84d5cb5/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec921769afcb905035d785582b0791d02304a7850fbd6ce924c1a8976380dfc6", size = 346771, upload-time = "2025-12-01T17:29:33.708Z" }, + { url = "https://files.pythonhosted.org/packages/1c/fb/aab2dcf94b991e62aa167457c7825b9b01055b884b888af926562864398c/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6f3b060330f5899a92d5c723547dc6a95adef42433e9748f14c66859a7396664", size = 474781, upload-time = "2025-12-01T17:29:35.237Z" }, + { url = "https://files.pythonhosted.org/packages/5a/7a/dbd5e49c91d6c86dba57158bbfa0e559e1ddf377bb46dcfd58aea4f0d567/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:908dfef7f0bfcf98d406e5dc570c25d2f2473e49b376de41792b6e96c1d5d291", size = 343685, upload-time = "2025-12-01T17:29:36.677Z" }, + { url = "https://files.pythonhosted.org/packages/1a/19/8c4b1d9f450159733b8be421a4e1fb03533709b80ed3546800102d085572/uuid_utils-0.12.0-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4c6a24148926bd0ca63e8a2dabf4cc9dc329a62325b3ad6578ecd60fbf926506", size = 366482, upload-time = "2025-12-01T17:29:37.979Z" }, + { url = "https://files.pythonhosted.org/packages/82/43/c79a6e45687647f80a159c8ba34346f287b065452cc419d07d2212d38420/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:64a91e632669f059ef605f1771d28490b1d310c26198e46f754e8846dddf12f4", size = 523132, upload-time = "2025-12-01T17:29:39.293Z" }, + { url = "https://files.pythonhosted.org/packages/5a/a2/b2d75a621260a40c438aa88593827dfea596d18316520a99e839f7a5fb9d/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:93c082212470bb4603ca3975916c205a9d7ef1443c0acde8fbd1e0f5b36673c7", size = 614218, upload-time = "2025-12-01T17:29:40.315Z" }, + { url = "https://files.pythonhosted.org/packages/13/6b/ba071101626edd5a6dabf8525c9a1537ff3d885dbc210540574a03901fef/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:431b1fb7283ba974811b22abd365f2726f8f821ab33f0f715be389640e18d039", size = 546241, upload-time = "2025-12-01T17:29:41.656Z" }, + { url = "https://files.pythonhosted.org/packages/01/12/9a942b81c0923268e6d85bf98d8f0a61fcbcd5e432fef94fdf4ce2ef8748/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2ffd7838c40149100299fa37cbd8bab5ee382372e8e65a148002a37d380df7c8", size = 511842, upload-time = "2025-12-01T17:29:43.107Z" }, + { url = "https://files.pythonhosted.org/packages/a9/a7/c326f5163dd48b79368b87d8a05f5da4668dd228a3f5ca9d79d5fee2fc40/uuid_utils-0.12.0-cp39-abi3-win32.whl", hash = "sha256:487f17c0fee6cbc1d8b90fe811874174a9b1b5683bf2251549e302906a50fed3", size = 179088, upload-time = "2025-12-01T17:29:44.492Z" }, + { url = "https://files.pythonhosted.org/packages/38/92/41c8734dd97213ee1d5ae435cf4499705dc4f2751e3b957fd12376f61784/uuid_utils-0.12.0-cp39-abi3-win_amd64.whl", hash = "sha256:9598e7c9da40357ae8fffc5d6938b1a7017f09a1acbcc95e14af8c65d48c655a", size = 183003, upload-time = "2025-12-01T17:29:45.47Z" }, + { url = "https://files.pythonhosted.org/packages/c9/f9/52ab0359618987331a1f739af837d26168a4b16281c9c3ab46519940c628/uuid_utils-0.12.0-cp39-abi3-win_arm64.whl", hash = "sha256:c9bea7c5b2aa6f57937ebebeee4d4ef2baad10f86f1b97b58a3f6f34c14b4e84", size = 182975, upload-time = "2025-12-01T17:29:46.444Z" }, +] + +[[package]] +name = "uvicorn" +version = "0.40.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c3/d1/8f3c683c9561a4e6689dd3b1d345c815f10f86acd044ee1fb9a4dcd0b8c5/uvicorn-0.40.0.tar.gz", hash = "sha256:839676675e87e73694518b5574fd0f24c9d97b46bea16df7b8c05ea1a51071ea", size = 81761, upload-time = "2025-12-21T14:16:22.45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3d/d8/2083a1daa7439a66f3a48589a57d576aa117726762618f6bb09fe3798796/uvicorn-0.40.0-py3-none-any.whl", hash = "sha256:c6c8f55bc8bf13eb6fa9ff87ad62308bbbc33d0b67f84293151efe87e0d5f2ee", size = 68502, upload-time = "2025-12-21T14:16:21.041Z" }, +] + +[package.optional-dependencies] +standard = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "httptools" }, + { name = "python-dotenv" }, + { name = "pyyaml" }, + { name = "uvloop", marker = "platform_python_implementation != 'PyPy' and sys_platform != 'cygwin' and sys_platform != 'win32'" }, + { name = "watchfiles" }, + { name = "websockets" }, +] + +[[package]] +name = "uvloop" +version = "0.22.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/06/f0/18d39dbd1971d6d62c4629cc7fa67f74821b0dc1f5a77af43719de7936a7/uvloop-0.22.1.tar.gz", hash = "sha256:6c84bae345b9147082b17371e3dd5d42775bddce91f885499017f4607fdaf39f", size = 2443250, upload-time = "2025-10-16T22:17:19.342Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3d/ff/7f72e8170be527b4977b033239a83a68d5c881cc4775fca255c677f7ac5d/uvloop-0.22.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:fe94b4564e865d968414598eea1a6de60adba0c040ba4ed05ac1300de402cd42", size = 1359936, upload-time = "2025-10-16T22:16:29.436Z" }, + { url = "https://files.pythonhosted.org/packages/c3/c6/e5d433f88fd54d81ef4be58b2b7b0cea13c442454a1db703a1eea0db1a59/uvloop-0.22.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:51eb9bd88391483410daad430813d982010f9c9c89512321f5b60e2cddbdddd6", size = 752769, upload-time = "2025-10-16T22:16:30.493Z" }, + { url = "https://files.pythonhosted.org/packages/24/68/a6ac446820273e71aa762fa21cdcc09861edd3536ff47c5cd3b7afb10eeb/uvloop-0.22.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:700e674a166ca5778255e0e1dc4e9d79ab2acc57b9171b79e65feba7184b3370", size = 4317413, upload-time = "2025-10-16T22:16:31.644Z" }, + { url = "https://files.pythonhosted.org/packages/5f/6f/e62b4dfc7ad6518e7eff2516f680d02a0f6eb62c0c212e152ca708a0085e/uvloop-0.22.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b5b1ac819a3f946d3b2ee07f09149578ae76066d70b44df3fa990add49a82e4", size = 4426307, upload-time = "2025-10-16T22:16:32.917Z" }, + { url = "https://files.pythonhosted.org/packages/90/60/97362554ac21e20e81bcef1150cb2a7e4ffdaf8ea1e5b2e8bf7a053caa18/uvloop-0.22.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e047cc068570bac9866237739607d1313b9253c3051ad84738cbb095be0537b2", size = 4131970, upload-time = "2025-10-16T22:16:34.015Z" }, + { url = "https://files.pythonhosted.org/packages/99/39/6b3f7d234ba3964c428a6e40006340f53ba37993f46ed6e111c6e9141d18/uvloop-0.22.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:512fec6815e2dd45161054592441ef76c830eddaad55c8aa30952e6fe1ed07c0", size = 4296343, upload-time = "2025-10-16T22:16:35.149Z" }, ] [[package]] @@ -1084,110 +3512,194 @@ wheels = [ ] [[package]] -name = "wcwidth" -version = "0.2.13" +name = "watchfiles" +version = "1.1.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301, upload-time = "2024-01-06T02:10:57.829Z" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c2/c9/8869df9b2a2d6c59d79220a4db37679e74f807c559ffe5265e08b227a210/watchfiles-1.1.1.tar.gz", hash = "sha256:a173cb5c16c4f40ab19cecf48a534c409f7ea983ab8fed0741304a1c0a31b3f2", size = 94440, upload-time = "2025-10-14T15:06:21.08Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166, upload-time = "2024-01-06T02:10:55.763Z" }, + { url = "https://files.pythonhosted.org/packages/74/d5/f039e7e3c639d9b1d09b07ea412a6806d38123f0508e5f9b48a87b0a76cc/watchfiles-1.1.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:8c89f9f2f740a6b7dcc753140dd5e1ab9215966f7a3530d0c0705c83b401bd7d", size = 404745, upload-time = "2025-10-14T15:04:46.731Z" }, + { url = "https://files.pythonhosted.org/packages/a5/96/a881a13aa1349827490dab2d363c8039527060cfcc2c92cc6d13d1b1049e/watchfiles-1.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bd404be08018c37350f0d6e34676bd1e2889990117a2b90070b3007f172d0610", size = 391769, upload-time = "2025-10-14T15:04:48.003Z" }, + { url = "https://files.pythonhosted.org/packages/4b/5b/d3b460364aeb8da471c1989238ea0e56bec24b6042a68046adf3d9ddb01c/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8526e8f916bb5b9a0a777c8317c23ce65de259422bba5b31325a6fa6029d33af", size = 449374, upload-time = "2025-10-14T15:04:49.179Z" }, + { url = "https://files.pythonhosted.org/packages/b9/44/5769cb62d4ed055cb17417c0a109a92f007114a4e07f30812a73a4efdb11/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2edc3553362b1c38d9f06242416a5d8e9fe235c204a4072e988ce2e5bb1f69f6", size = 459485, upload-time = "2025-10-14T15:04:50.155Z" }, + { url = "https://files.pythonhosted.org/packages/19/0c/286b6301ded2eccd4ffd0041a1b726afda999926cf720aab63adb68a1e36/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30f7da3fb3f2844259cba4720c3fc7138eb0f7b659c38f3bfa65084c7fc7abce", size = 488813, upload-time = "2025-10-14T15:04:51.059Z" }, + { url = "https://files.pythonhosted.org/packages/c7/2b/8530ed41112dd4a22f4dcfdb5ccf6a1baad1ff6eed8dc5a5f09e7e8c41c7/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8979280bdafff686ba5e4d8f97840f929a87ed9cdf133cbbd42f7766774d2aa", size = 594816, upload-time = "2025-10-14T15:04:52.031Z" }, + { url = "https://files.pythonhosted.org/packages/ce/d2/f5f9fb49489f184f18470d4f99f4e862a4b3e9ac2865688eb2099e3d837a/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dcc5c24523771db3a294c77d94771abcfcb82a0e0ee8efd910c37c59ec1b31bb", size = 475186, upload-time = "2025-10-14T15:04:53.064Z" }, + { url = "https://files.pythonhosted.org/packages/cf/68/5707da262a119fb06fbe214d82dd1fe4a6f4af32d2d14de368d0349eb52a/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db5d7ae38ff20153d542460752ff397fcf5c96090c1230803713cf3147a6803", size = 456812, upload-time = "2025-10-14T15:04:55.174Z" }, + { url = "https://files.pythonhosted.org/packages/66/ab/3cbb8756323e8f9b6f9acb9ef4ec26d42b2109bce830cc1f3468df20511d/watchfiles-1.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:28475ddbde92df1874b6c5c8aaeb24ad5be47a11f87cde5a28ef3835932e3e94", size = 630196, upload-time = "2025-10-14T15:04:56.22Z" }, + { url = "https://files.pythonhosted.org/packages/78/46/7152ec29b8335f80167928944a94955015a345440f524d2dfe63fc2f437b/watchfiles-1.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:36193ed342f5b9842edd3532729a2ad55c4160ffcfa3700e0d54be496b70dd43", size = 622657, upload-time = "2025-10-14T15:04:57.521Z" }, + { url = "https://files.pythonhosted.org/packages/0a/bf/95895e78dd75efe9a7f31733607f384b42eb5feb54bd2eb6ed57cc2e94f4/watchfiles-1.1.1-cp312-cp312-win32.whl", hash = "sha256:859e43a1951717cc8de7f4c77674a6d389b106361585951d9e69572823f311d9", size = 272042, upload-time = "2025-10-14T15:04:59.046Z" }, + { url = "https://files.pythonhosted.org/packages/87/0a/90eb755f568de2688cb220171c4191df932232c20946966c27a59c400850/watchfiles-1.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:91d4c9a823a8c987cce8fa2690923b069966dabb196dd8d137ea2cede885fde9", size = 288410, upload-time = "2025-10-14T15:05:00.081Z" }, + { url = "https://files.pythonhosted.org/packages/36/76/f322701530586922fbd6723c4f91ace21364924822a8772c549483abed13/watchfiles-1.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:a625815d4a2bdca61953dbba5a39d60164451ef34c88d751f6c368c3ea73d404", size = 278209, upload-time = "2025-10-14T15:05:01.168Z" }, +] + +[[package]] +name = "wcwidth" +version = "0.2.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/30/6b0809f4510673dc723187aeaf24c7f5459922d01e2f794277a3dfb90345/wcwidth-0.2.14.tar.gz", hash = "sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605", size = 102293, upload-time = "2025-09-22T16:29:53.023Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/b5/123f13c975e9f27ab9c0770f514345bd406d0e8d3b7a0723af9d43f710af/wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1", size = 37286, upload-time = "2025-09-22T16:29:51.641Z" }, +] + +[[package]] +name = "websocket-client" +version = "1.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2c/41/aa4bf9664e4cda14c3b39865b12251e8e7d239f4cd0e3cc1b6c2ccde25c1/websocket_client-1.9.0.tar.gz", hash = "sha256:9e813624b6eb619999a97dc7958469217c3176312b3a16a4bd1bc7e08a46ec98", size = 70576, upload-time = "2025-10-07T21:16:36.495Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/34/db/b10e48aa8fff7407e67470363eac595018441cf32d5e1001567a7aeba5d2/websocket_client-1.9.0-py3-none-any.whl", hash = "sha256:af248a825037ef591efbf6ed20cc5faa03d3b47b9e5a2230a529eeee1c1fc3ef", size = 82616, upload-time = "2025-10-07T21:16:34.951Z" }, +] + +[[package]] +name = "websockets" +version = "15.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/e6/26d09fab466b7ca9c7737474c52be4f76a40301b08362eb2dbc19dcc16c1/websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee", size = 177016, upload-time = "2025-03-05T20:03:41.606Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/6b/4545a0d843594f5d0771e86463606a3988b5a09ca5123136f8a76580dd63/websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3", size = 175437, upload-time = "2025-03-05T20:02:16.706Z" }, + { url = "https://files.pythonhosted.org/packages/f4/71/809a0f5f6a06522af902e0f2ea2757f71ead94610010cf570ab5c98e99ed/websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665", size = 173096, upload-time = "2025-03-05T20:02:18.832Z" }, + { url = "https://files.pythonhosted.org/packages/3d/69/1a681dd6f02180916f116894181eab8b2e25b31e484c5d0eae637ec01f7c/websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2", size = 173332, upload-time = "2025-03-05T20:02:20.187Z" }, + { url = "https://files.pythonhosted.org/packages/a6/02/0073b3952f5bce97eafbb35757f8d0d54812b6174ed8dd952aa08429bcc3/websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215", size = 183152, upload-time = "2025-03-05T20:02:22.286Z" }, + { url = "https://files.pythonhosted.org/packages/74/45/c205c8480eafd114b428284840da0b1be9ffd0e4f87338dc95dc6ff961a1/websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5", size = 182096, upload-time = "2025-03-05T20:02:24.368Z" }, + { url = "https://files.pythonhosted.org/packages/14/8f/aa61f528fba38578ec553c145857a181384c72b98156f858ca5c8e82d9d3/websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65", size = 182523, upload-time = "2025-03-05T20:02:25.669Z" }, + { url = "https://files.pythonhosted.org/packages/ec/6d/0267396610add5bc0d0d3e77f546d4cd287200804fe02323797de77dbce9/websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe", size = 182790, upload-time = "2025-03-05T20:02:26.99Z" }, + { url = "https://files.pythonhosted.org/packages/02/05/c68c5adbf679cf610ae2f74a9b871ae84564462955d991178f95a1ddb7dd/websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4", size = 182165, upload-time = "2025-03-05T20:02:30.291Z" }, + { url = "https://files.pythonhosted.org/packages/29/93/bb672df7b2f5faac89761cb5fa34f5cec45a4026c383a4b5761c6cea5c16/websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597", size = 182160, upload-time = "2025-03-05T20:02:31.634Z" }, + { url = "https://files.pythonhosted.org/packages/ff/83/de1f7709376dc3ca9b7eeb4b9a07b4526b14876b6d372a4dc62312bebee0/websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9", size = 176395, upload-time = "2025-03-05T20:02:33.017Z" }, + { url = "https://files.pythonhosted.org/packages/7d/71/abf2ebc3bbfa40f391ce1428c7168fb20582d0ff57019b69ea20fa698043/websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7", size = 176841, upload-time = "2025-03-05T20:02:34.498Z" }, + { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" }, +] + +[[package]] +name = "win32-setctime" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b3/8f/705086c9d734d3b663af0e9bb3d4de6578d08f46b1b101c2442fd9aecaa2/win32_setctime-1.2.0.tar.gz", hash = "sha256:ae1fdf948f5640aae05c511ade119313fb6a30d7eabe25fef9764dca5873c4c0", size = 4867, upload-time = "2024-12-07T15:28:28.314Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e1/07/c6fe3ad3e685340704d314d765b7912993bcb8dc198f0e7a89382d37974b/win32_setctime-1.2.0-py3-none-any.whl", hash = "sha256:95d644c4e708aba81dc3704a116d8cbc974d70b3bdb8be1d150e36be6e9d1390", size = 4083, upload-time = "2024-12-07T15:28:26.465Z" }, +] + +[[package]] +name = "xxhash" +version = "3.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/02/84/30869e01909fb37a6cc7e18688ee8bf1e42d57e7e0777636bd47524c43c7/xxhash-3.6.0.tar.gz", hash = "sha256:f0162a78b13a0d7617b2845b90c763339d1f1d82bb04a4b07f4ab535cc5e05d6", size = 85160, upload-time = "2025-10-02T14:37:08.097Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/07/d9412f3d7d462347e4511181dea65e47e0d0e16e26fbee2ea86a2aefb657/xxhash-3.6.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:01362c4331775398e7bb34e3ab403bc9ee9f7c497bc7dee6272114055277dd3c", size = 32744, upload-time = "2025-10-02T14:34:34.622Z" }, + { url = "https://files.pythonhosted.org/packages/79/35/0429ee11d035fc33abe32dca1b2b69e8c18d236547b9a9b72c1929189b9a/xxhash-3.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b7b2df81a23f8cb99656378e72501b2cb41b1827c0f5a86f87d6b06b69f9f204", size = 30816, upload-time = "2025-10-02T14:34:36.043Z" }, + { url = "https://files.pythonhosted.org/packages/b7/f2/57eb99aa0f7d98624c0932c5b9a170e1806406cdbcdb510546634a1359e0/xxhash-3.6.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:dc94790144e66b14f67b10ac8ed75b39ca47536bf8800eb7c24b50271ea0c490", size = 194035, upload-time = "2025-10-02T14:34:37.354Z" }, + { url = "https://files.pythonhosted.org/packages/4c/ed/6224ba353690d73af7a3f1c7cdb1fc1b002e38f783cb991ae338e1eb3d79/xxhash-3.6.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:93f107c673bccf0d592cdba077dedaf52fe7f42dcd7676eba1f6d6f0c3efffd2", size = 212914, upload-time = "2025-10-02T14:34:38.6Z" }, + { url = "https://files.pythonhosted.org/packages/38/86/fb6b6130d8dd6b8942cc17ab4d90e223653a89aa32ad2776f8af7064ed13/xxhash-3.6.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2aa5ee3444c25b69813663c9f8067dcfaa2e126dc55e8dddf40f4d1c25d7effa", size = 212163, upload-time = "2025-10-02T14:34:39.872Z" }, + { url = "https://files.pythonhosted.org/packages/ee/dc/e84875682b0593e884ad73b2d40767b5790d417bde603cceb6878901d647/xxhash-3.6.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f7f99123f0e1194fa59cc69ad46dbae2e07becec5df50a0509a808f90a0f03f0", size = 445411, upload-time = "2025-10-02T14:34:41.569Z" }, + { url = "https://files.pythonhosted.org/packages/11/4f/426f91b96701ec2f37bb2b8cec664eff4f658a11f3fa9d94f0a887ea6d2b/xxhash-3.6.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:49e03e6fe2cac4a1bc64952dd250cf0dbc5ef4ebb7b8d96bce82e2de163c82a2", size = 193883, upload-time = "2025-10-02T14:34:43.249Z" }, + { url = "https://files.pythonhosted.org/packages/53/5a/ddbb83eee8e28b778eacfc5a85c969673e4023cdeedcfcef61f36731610b/xxhash-3.6.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bd17fede52a17a4f9a7bc4472a5867cb0b160deeb431795c0e4abe158bc784e9", size = 210392, upload-time = "2025-10-02T14:34:45.042Z" }, + { url = "https://files.pythonhosted.org/packages/1e/c2/ff69efd07c8c074ccdf0a4f36fcdd3d27363665bcdf4ba399abebe643465/xxhash-3.6.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:6fb5f5476bef678f69db04f2bd1efbed3030d2aba305b0fc1773645f187d6a4e", size = 197898, upload-time = "2025-10-02T14:34:46.302Z" }, + { url = "https://files.pythonhosted.org/packages/58/ca/faa05ac19b3b622c7c9317ac3e23954187516298a091eb02c976d0d3dd45/xxhash-3.6.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:843b52f6d88071f87eba1631b684fcb4b2068cd2180a0224122fe4ef011a9374", size = 210655, upload-time = "2025-10-02T14:34:47.571Z" }, + { url = "https://files.pythonhosted.org/packages/d4/7a/06aa7482345480cc0cb597f5c875b11a82c3953f534394f620b0be2f700c/xxhash-3.6.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7d14a6cfaf03b1b6f5f9790f76880601ccc7896aff7ab9cd8978a939c1eb7e0d", size = 414001, upload-time = "2025-10-02T14:34:49.273Z" }, + { url = "https://files.pythonhosted.org/packages/23/07/63ffb386cd47029aa2916b3d2f454e6cc5b9f5c5ada3790377d5430084e7/xxhash-3.6.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:418daf3db71e1413cfe211c2f9a528456936645c17f46b5204705581a45390ae", size = 191431, upload-time = "2025-10-02T14:34:50.798Z" }, + { url = "https://files.pythonhosted.org/packages/0f/93/14fde614cadb4ddf5e7cebf8918b7e8fac5ae7861c1875964f17e678205c/xxhash-3.6.0-cp312-cp312-win32.whl", hash = "sha256:50fc255f39428a27299c20e280d6193d8b63b8ef8028995323bf834a026b4fbb", size = 30617, upload-time = "2025-10-02T14:34:51.954Z" }, + { url = "https://files.pythonhosted.org/packages/13/5d/0d125536cbe7565a83d06e43783389ecae0c0f2ed037b48ede185de477c0/xxhash-3.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:c0f2ab8c715630565ab8991b536ecded9416d615538be8ecddce43ccf26cbc7c", size = 31534, upload-time = "2025-10-02T14:34:53.276Z" }, + { url = "https://files.pythonhosted.org/packages/54/85/6ec269b0952ec7e36ba019125982cf11d91256a778c7c3f98a4c5043d283/xxhash-3.6.0-cp312-cp312-win_arm64.whl", hash = "sha256:eae5c13f3bc455a3bbb68bdc513912dc7356de7e2280363ea235f71f54064829", size = 27876, upload-time = "2025-10-02T14:34:54.371Z" }, +] + +[[package]] +name = "yapf" +version = "0.43.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "platformdirs" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/23/97/b6f296d1e9cc1ec25c7604178b48532fa5901f721bcf1b8d8148b13e5588/yapf-0.43.0.tar.gz", hash = "sha256:00d3aa24bfedff9420b2e0d5d9f5ab6d9d4268e72afbf59bb3fa542781d5218e", size = 254907, upload-time = "2024-11-14T00:11:41.584Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/37/81/6acd6601f61e31cfb8729d3da6d5df966f80f374b78eff83760714487338/yapf-0.43.0-py3-none-any.whl", hash = "sha256:224faffbc39c428cb095818cf6ef5511fdab6f7430a10783fdfb292ccf2852ca", size = 256158, upload-time = "2024-11-14T00:11:39.37Z" }, ] [[package]] name = "yarl" -version = "1.20.1" +version = "1.22.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "idna" }, { name = "multidict" }, { name = "propcache" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3c/fb/efaa23fa4e45537b827620f04cf8f3cd658b76642205162e072703a5b963/yarl-1.20.1.tar.gz", hash = "sha256:d017a4997ee50c91fd5466cef416231bb82177b93b029906cefc542ce14c35ac", size = 186428, upload-time = "2025-06-10T00:46:09.923Z" } +sdist = { url = "https://files.pythonhosted.org/packages/57/63/0c6ebca57330cd313f6102b16dd57ffaf3ec4c83403dcb45dbd15c6f3ea1/yarl-1.22.0.tar.gz", hash = "sha256:bebf8557577d4401ba8bd9ff33906f1376c877aa78d1fe216ad01b4d6745af71", size = 187169, upload-time = "2025-10-06T14:12:55.963Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5f/9a/cb7fad7d73c69f296eda6815e4a2c7ed53fc70c2f136479a91c8e5fbdb6d/yarl-1.20.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdcc4cd244e58593a4379fe60fdee5ac0331f8eb70320a24d591a3be197b94a9", size = 133667, upload-time = "2025-06-10T00:43:44.369Z" }, - { url = "https://files.pythonhosted.org/packages/67/38/688577a1cb1e656e3971fb66a3492501c5a5df56d99722e57c98249e5b8a/yarl-1.20.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b29a2c385a5f5b9c7d9347e5812b6f7ab267193c62d282a540b4fc528c8a9d2a", size = 91025, upload-time = "2025-06-10T00:43:46.295Z" }, - { url = "https://files.pythonhosted.org/packages/50/ec/72991ae51febeb11a42813fc259f0d4c8e0507f2b74b5514618d8b640365/yarl-1.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1112ae8154186dfe2de4732197f59c05a83dc814849a5ced892b708033f40dc2", size = 89709, upload-time = "2025-06-10T00:43:48.22Z" }, - { url = "https://files.pythonhosted.org/packages/99/da/4d798025490e89426e9f976702e5f9482005c548c579bdae792a4c37769e/yarl-1.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90bbd29c4fe234233f7fa2b9b121fb63c321830e5d05b45153a2ca68f7d310ee", size = 352287, upload-time = "2025-06-10T00:43:49.924Z" }, - { url = "https://files.pythonhosted.org/packages/1a/26/54a15c6a567aac1c61b18aa0f4b8aa2e285a52d547d1be8bf48abe2b3991/yarl-1.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:680e19c7ce3710ac4cd964e90dad99bf9b5029372ba0c7cbfcd55e54d90ea819", size = 345429, upload-time = "2025-06-10T00:43:51.7Z" }, - { url = "https://files.pythonhosted.org/packages/d6/95/9dcf2386cb875b234353b93ec43e40219e14900e046bf6ac118f94b1e353/yarl-1.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a979218c1fdb4246a05efc2cc23859d47c89af463a90b99b7c56094daf25a16", size = 365429, upload-time = "2025-06-10T00:43:53.494Z" }, - { url = "https://files.pythonhosted.org/packages/91/b2/33a8750f6a4bc224242a635f5f2cff6d6ad5ba651f6edcccf721992c21a0/yarl-1.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255b468adf57b4a7b65d8aad5b5138dce6a0752c139965711bdcb81bc370e1b6", size = 363862, upload-time = "2025-06-10T00:43:55.766Z" }, - { url = "https://files.pythonhosted.org/packages/98/28/3ab7acc5b51f4434b181b0cee8f1f4b77a65919700a355fb3617f9488874/yarl-1.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a97d67108e79cfe22e2b430d80d7571ae57d19f17cda8bb967057ca8a7bf5bfd", size = 355616, upload-time = "2025-06-10T00:43:58.056Z" }, - { url = "https://files.pythonhosted.org/packages/36/a3/f666894aa947a371724ec7cd2e5daa78ee8a777b21509b4252dd7bd15e29/yarl-1.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8570d998db4ddbfb9a590b185a0a33dbf8aafb831d07a5257b4ec9948df9cb0a", size = 339954, upload-time = "2025-06-10T00:43:59.773Z" }, - { url = "https://files.pythonhosted.org/packages/f1/81/5f466427e09773c04219d3450d7a1256138a010b6c9f0af2d48565e9ad13/yarl-1.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:97c75596019baae7c71ccf1d8cc4738bc08134060d0adfcbe5642f778d1dca38", size = 365575, upload-time = "2025-06-10T00:44:02.051Z" }, - { url = "https://files.pythonhosted.org/packages/2e/e3/e4b0ad8403e97e6c9972dd587388940a032f030ebec196ab81a3b8e94d31/yarl-1.20.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1c48912653e63aef91ff988c5432832692ac5a1d8f0fb8a33091520b5bbe19ef", size = 365061, upload-time = "2025-06-10T00:44:04.196Z" }, - { url = "https://files.pythonhosted.org/packages/ac/99/b8a142e79eb86c926f9f06452eb13ecb1bb5713bd01dc0038faf5452e544/yarl-1.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4c3ae28f3ae1563c50f3d37f064ddb1511ecc1d5584e88c6b7c63cf7702a6d5f", size = 364142, upload-time = "2025-06-10T00:44:06.527Z" }, - { url = "https://files.pythonhosted.org/packages/34/f2/08ed34a4a506d82a1a3e5bab99ccd930a040f9b6449e9fd050320e45845c/yarl-1.20.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c5e9642f27036283550f5f57dc6156c51084b458570b9d0d96100c8bebb186a8", size = 381894, upload-time = "2025-06-10T00:44:08.379Z" }, - { url = "https://files.pythonhosted.org/packages/92/f8/9a3fbf0968eac704f681726eff595dce9b49c8a25cd92bf83df209668285/yarl-1.20.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2c26b0c49220d5799f7b22c6838409ee9bc58ee5c95361a4d7831f03cc225b5a", size = 383378, upload-time = "2025-06-10T00:44:10.51Z" }, - { url = "https://files.pythonhosted.org/packages/af/85/9363f77bdfa1e4d690957cd39d192c4cacd1c58965df0470a4905253b54f/yarl-1.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:564ab3d517e3d01c408c67f2e5247aad4019dcf1969982aba3974b4093279004", size = 374069, upload-time = "2025-06-10T00:44:12.834Z" }, - { url = "https://files.pythonhosted.org/packages/35/99/9918c8739ba271dcd935400cff8b32e3cd319eaf02fcd023d5dcd487a7c8/yarl-1.20.1-cp312-cp312-win32.whl", hash = "sha256:daea0d313868da1cf2fac6b2d3a25c6e3a9e879483244be38c8e6a41f1d876a5", size = 81249, upload-time = "2025-06-10T00:44:14.731Z" }, - { url = "https://files.pythonhosted.org/packages/eb/83/5d9092950565481b413b31a23e75dd3418ff0a277d6e0abf3729d4d1ce25/yarl-1.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:48ea7d7f9be0487339828a4de0360d7ce0efc06524a48e1810f945c45b813698", size = 86710, upload-time = "2025-06-10T00:44:16.716Z" }, - { url = "https://files.pythonhosted.org/packages/8a/e1/2411b6d7f769a07687acee88a062af5833cf1966b7266f3d8dfb3d3dc7d3/yarl-1.20.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:0b5ff0fbb7c9f1b1b5ab53330acbfc5247893069e7716840c8e7d5bb7355038a", size = 131811, upload-time = "2025-06-10T00:44:18.933Z" }, - { url = "https://files.pythonhosted.org/packages/b2/27/584394e1cb76fb771371770eccad35de400e7b434ce3142c2dd27392c968/yarl-1.20.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:14f326acd845c2b2e2eb38fb1346c94f7f3b01a4f5c788f8144f9b630bfff9a3", size = 90078, upload-time = "2025-06-10T00:44:20.635Z" }, - { url = "https://files.pythonhosted.org/packages/bf/9a/3246ae92d4049099f52d9b0fe3486e3b500e29b7ea872d0f152966fc209d/yarl-1.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f60e4ad5db23f0b96e49c018596707c3ae89f5d0bd97f0ad3684bcbad899f1e7", size = 88748, upload-time = "2025-06-10T00:44:22.34Z" }, - { url = "https://files.pythonhosted.org/packages/a3/25/35afe384e31115a1a801fbcf84012d7a066d89035befae7c5d4284df1e03/yarl-1.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49bdd1b8e00ce57e68ba51916e4bb04461746e794e7c4d4bbc42ba2f18297691", size = 349595, upload-time = "2025-06-10T00:44:24.314Z" }, - { url = "https://files.pythonhosted.org/packages/28/2d/8aca6cb2cabc8f12efcb82749b9cefecbccfc7b0384e56cd71058ccee433/yarl-1.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:66252d780b45189975abfed839616e8fd2dbacbdc262105ad7742c6ae58f3e31", size = 342616, upload-time = "2025-06-10T00:44:26.167Z" }, - { url = "https://files.pythonhosted.org/packages/0b/e9/1312633d16b31acf0098d30440ca855e3492d66623dafb8e25b03d00c3da/yarl-1.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59174e7332f5d153d8f7452a102b103e2e74035ad085f404df2e40e663a22b28", size = 361324, upload-time = "2025-06-10T00:44:27.915Z" }, - { url = "https://files.pythonhosted.org/packages/bc/a0/688cc99463f12f7669eec7c8acc71ef56a1521b99eab7cd3abb75af887b0/yarl-1.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e3968ec7d92a0c0f9ac34d5ecfd03869ec0cab0697c91a45db3fbbd95fe1b653", size = 359676, upload-time = "2025-06-10T00:44:30.041Z" }, - { url = "https://files.pythonhosted.org/packages/af/44/46407d7f7a56e9a85a4c207724c9f2c545c060380718eea9088f222ba697/yarl-1.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1a4fbb50e14396ba3d375f68bfe02215d8e7bc3ec49da8341fe3157f59d2ff5", size = 352614, upload-time = "2025-06-10T00:44:32.171Z" }, - { url = "https://files.pythonhosted.org/packages/b1/91/31163295e82b8d5485d31d9cf7754d973d41915cadce070491778d9c9825/yarl-1.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11a62c839c3a8eac2410e951301309426f368388ff2f33799052787035793b02", size = 336766, upload-time = "2025-06-10T00:44:34.494Z" }, - { url = "https://files.pythonhosted.org/packages/b4/8e/c41a5bc482121f51c083c4c2bcd16b9e01e1cf8729e380273a952513a21f/yarl-1.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:041eaa14f73ff5a8986b4388ac6bb43a77f2ea09bf1913df7a35d4646db69e53", size = 364615, upload-time = "2025-06-10T00:44:36.856Z" }, - { url = "https://files.pythonhosted.org/packages/e3/5b/61a3b054238d33d70ea06ebba7e58597891b71c699e247df35cc984ab393/yarl-1.20.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:377fae2fef158e8fd9d60b4c8751387b8d1fb121d3d0b8e9b0be07d1b41e83dc", size = 360982, upload-time = "2025-06-10T00:44:39.141Z" }, - { url = "https://files.pythonhosted.org/packages/df/a3/6a72fb83f8d478cb201d14927bc8040af901811a88e0ff2da7842dd0ed19/yarl-1.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1c92f4390e407513f619d49319023664643d3339bd5e5a56a3bebe01bc67ec04", size = 369792, upload-time = "2025-06-10T00:44:40.934Z" }, - { url = "https://files.pythonhosted.org/packages/7c/af/4cc3c36dfc7c077f8dedb561eb21f69e1e9f2456b91b593882b0b18c19dc/yarl-1.20.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d25ddcf954df1754ab0f86bb696af765c5bfaba39b74095f27eececa049ef9a4", size = 382049, upload-time = "2025-06-10T00:44:42.854Z" }, - { url = "https://files.pythonhosted.org/packages/19/3a/e54e2c4752160115183a66dc9ee75a153f81f3ab2ba4bf79c3c53b33de34/yarl-1.20.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:909313577e9619dcff8c31a0ea2aa0a2a828341d92673015456b3ae492e7317b", size = 384774, upload-time = "2025-06-10T00:44:45.275Z" }, - { url = "https://files.pythonhosted.org/packages/9c/20/200ae86dabfca89060ec6447649f219b4cbd94531e425e50d57e5f5ac330/yarl-1.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:793fd0580cb9664548c6b83c63b43c477212c0260891ddf86809e1c06c8b08f1", size = 374252, upload-time = "2025-06-10T00:44:47.31Z" }, - { url = "https://files.pythonhosted.org/packages/83/75/11ee332f2f516b3d094e89448da73d557687f7d137d5a0f48c40ff211487/yarl-1.20.1-cp313-cp313-win32.whl", hash = "sha256:468f6e40285de5a5b3c44981ca3a319a4b208ccc07d526b20b12aeedcfa654b7", size = 81198, upload-time = "2025-06-10T00:44:49.164Z" }, - { url = "https://files.pythonhosted.org/packages/ba/ba/39b1ecbf51620b40ab402b0fc817f0ff750f6d92712b44689c2c215be89d/yarl-1.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:495b4ef2fea40596bfc0affe3837411d6aa3371abcf31aac0ccc4bdd64d4ef5c", size = 86346, upload-time = "2025-06-10T00:44:51.182Z" }, - { url = "https://files.pythonhosted.org/packages/43/c7/669c52519dca4c95153c8ad96dd123c79f354a376346b198f438e56ffeb4/yarl-1.20.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:f60233b98423aab21d249a30eb27c389c14929f47be8430efa7dbd91493a729d", size = 138826, upload-time = "2025-06-10T00:44:52.883Z" }, - { url = "https://files.pythonhosted.org/packages/6a/42/fc0053719b44f6ad04a75d7f05e0e9674d45ef62f2d9ad2c1163e5c05827/yarl-1.20.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:6f3eff4cc3f03d650d8755c6eefc844edde99d641d0dcf4da3ab27141a5f8ddf", size = 93217, upload-time = "2025-06-10T00:44:54.658Z" }, - { url = "https://files.pythonhosted.org/packages/4f/7f/fa59c4c27e2a076bba0d959386e26eba77eb52ea4a0aac48e3515c186b4c/yarl-1.20.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:69ff8439d8ba832d6bed88af2c2b3445977eba9a4588b787b32945871c2444e3", size = 92700, upload-time = "2025-06-10T00:44:56.784Z" }, - { url = "https://files.pythonhosted.org/packages/2f/d4/062b2f48e7c93481e88eff97a6312dca15ea200e959f23e96d8ab898c5b8/yarl-1.20.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cf34efa60eb81dd2645a2e13e00bb98b76c35ab5061a3989c7a70f78c85006d", size = 347644, upload-time = "2025-06-10T00:44:59.071Z" }, - { url = "https://files.pythonhosted.org/packages/89/47/78b7f40d13c8f62b499cc702fdf69e090455518ae544c00a3bf4afc9fc77/yarl-1.20.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:8e0fe9364ad0fddab2688ce72cb7a8e61ea42eff3c7caeeb83874a5d479c896c", size = 323452, upload-time = "2025-06-10T00:45:01.605Z" }, - { url = "https://files.pythonhosted.org/packages/eb/2b/490d3b2dc66f52987d4ee0d3090a147ea67732ce6b4d61e362c1846d0d32/yarl-1.20.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f64fbf81878ba914562c672024089e3401974a39767747691c65080a67b18c1", size = 346378, upload-time = "2025-06-10T00:45:03.946Z" }, - { url = "https://files.pythonhosted.org/packages/66/ad/775da9c8a94ce925d1537f939a4f17d782efef1f973039d821cbe4bcc211/yarl-1.20.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6342d643bf9a1de97e512e45e4b9560a043347e779a173250824f8b254bd5ce", size = 353261, upload-time = "2025-06-10T00:45:05.992Z" }, - { url = "https://files.pythonhosted.org/packages/4b/23/0ed0922b47a4f5c6eb9065d5ff1e459747226ddce5c6a4c111e728c9f701/yarl-1.20.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56dac5f452ed25eef0f6e3c6a066c6ab68971d96a9fb441791cad0efba6140d3", size = 335987, upload-time = "2025-06-10T00:45:08.227Z" }, - { url = "https://files.pythonhosted.org/packages/3e/49/bc728a7fe7d0e9336e2b78f0958a2d6b288ba89f25a1762407a222bf53c3/yarl-1.20.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7d7f497126d65e2cad8dc5f97d34c27b19199b6414a40cb36b52f41b79014be", size = 329361, upload-time = "2025-06-10T00:45:10.11Z" }, - { url = "https://files.pythonhosted.org/packages/93/8f/b811b9d1f617c83c907e7082a76e2b92b655400e61730cd61a1f67178393/yarl-1.20.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:67e708dfb8e78d8a19169818eeb5c7a80717562de9051bf2413aca8e3696bf16", size = 346460, upload-time = "2025-06-10T00:45:12.055Z" }, - { url = "https://files.pythonhosted.org/packages/70/fd/af94f04f275f95da2c3b8b5e1d49e3e79f1ed8b6ceb0f1664cbd902773ff/yarl-1.20.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:595c07bc79af2494365cc96ddeb772f76272364ef7c80fb892ef9d0649586513", size = 334486, upload-time = "2025-06-10T00:45:13.995Z" }, - { url = "https://files.pythonhosted.org/packages/84/65/04c62e82704e7dd0a9b3f61dbaa8447f8507655fd16c51da0637b39b2910/yarl-1.20.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7bdd2f80f4a7df852ab9ab49484a4dee8030023aa536df41f2d922fd57bf023f", size = 342219, upload-time = "2025-06-10T00:45:16.479Z" }, - { url = "https://files.pythonhosted.org/packages/91/95/459ca62eb958381b342d94ab9a4b6aec1ddec1f7057c487e926f03c06d30/yarl-1.20.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:c03bfebc4ae8d862f853a9757199677ab74ec25424d0ebd68a0027e9c639a390", size = 350693, upload-time = "2025-06-10T00:45:18.399Z" }, - { url = "https://files.pythonhosted.org/packages/a6/00/d393e82dd955ad20617abc546a8f1aee40534d599ff555ea053d0ec9bf03/yarl-1.20.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:344d1103e9c1523f32a5ed704d576172d2cabed3122ea90b1d4e11fe17c66458", size = 355803, upload-time = "2025-06-10T00:45:20.677Z" }, - { url = "https://files.pythonhosted.org/packages/9e/ed/c5fb04869b99b717985e244fd93029c7a8e8febdfcffa06093e32d7d44e7/yarl-1.20.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:88cab98aa4e13e1ade8c141daeedd300a4603b7132819c484841bb7af3edce9e", size = 341709, upload-time = "2025-06-10T00:45:23.221Z" }, - { url = "https://files.pythonhosted.org/packages/24/fd/725b8e73ac2a50e78a4534ac43c6addf5c1c2d65380dd48a9169cc6739a9/yarl-1.20.1-cp313-cp313t-win32.whl", hash = "sha256:b121ff6a7cbd4abc28985b6028235491941b9fe8fe226e6fdc539c977ea1739d", size = 86591, upload-time = "2025-06-10T00:45:25.793Z" }, - { url = "https://files.pythonhosted.org/packages/94/c3/b2e9f38bc3e11191981d57ea08cab2166e74ea770024a646617c9cddd9f6/yarl-1.20.1-cp313-cp313t-win_amd64.whl", hash = "sha256:541d050a355bbbc27e55d906bc91cb6fe42f96c01413dd0f4ed5a5240513874f", size = 93003, upload-time = "2025-06-10T00:45:27.752Z" }, - { url = "https://files.pythonhosted.org/packages/b4/2d/2345fce04cfd4bee161bf1e7d9cdc702e3e16109021035dbb24db654a622/yarl-1.20.1-py3-none-any.whl", hash = "sha256:83b8eb083fe4683c6115795d9fc1cfaf2cbbefb19b3a1cb68f6527460f483a77", size = 46542, upload-time = "2025-06-10T00:46:07.521Z" }, + { url = "https://files.pythonhosted.org/packages/75/ff/46736024fee3429b80a165a732e38e5d5a238721e634ab41b040d49f8738/yarl-1.22.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e340382d1afa5d32b892b3ff062436d592ec3d692aeea3bef3a5cfe11bbf8c6f", size = 142000, upload-time = "2025-10-06T14:09:44.631Z" }, + { url = "https://files.pythonhosted.org/packages/5a/9a/b312ed670df903145598914770eb12de1bac44599549b3360acc96878df8/yarl-1.22.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f1e09112a2c31ffe8d80be1b0988fa6a18c5d5cad92a9ffbb1c04c91bfe52ad2", size = 94338, upload-time = "2025-10-06T14:09:46.372Z" }, + { url = "https://files.pythonhosted.org/packages/ba/f5/0601483296f09c3c65e303d60c070a5c19fcdbc72daa061e96170785bc7d/yarl-1.22.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:939fe60db294c786f6b7c2d2e121576628468f65453d86b0fe36cb52f987bd74", size = 94909, upload-time = "2025-10-06T14:09:48.648Z" }, + { url = "https://files.pythonhosted.org/packages/60/41/9a1fe0b73dbcefce72e46cf149b0e0a67612d60bfc90fb59c2b2efdfbd86/yarl-1.22.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e1651bf8e0398574646744c1885a41198eba53dc8a9312b954073f845c90a8df", size = 372940, upload-time = "2025-10-06T14:09:50.089Z" }, + { url = "https://files.pythonhosted.org/packages/17/7a/795cb6dfee561961c30b800f0ed616b923a2ec6258b5def2a00bf8231334/yarl-1.22.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b8a0588521a26bf92a57a1705b77b8b59044cdceccac7151bd8d229e66b8dedb", size = 345825, upload-time = "2025-10-06T14:09:52.142Z" }, + { url = "https://files.pythonhosted.org/packages/d7/93/a58f4d596d2be2ae7bab1a5846c4d270b894958845753b2c606d666744d3/yarl-1.22.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:42188e6a615c1a75bcaa6e150c3fe8f3e8680471a6b10150c5f7e83f47cc34d2", size = 386705, upload-time = "2025-10-06T14:09:54.128Z" }, + { url = "https://files.pythonhosted.org/packages/61/92/682279d0e099d0e14d7fd2e176bd04f48de1484f56546a3e1313cd6c8e7c/yarl-1.22.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f6d2cb59377d99718913ad9a151030d6f83ef420a2b8f521d94609ecc106ee82", size = 396518, upload-time = "2025-10-06T14:09:55.762Z" }, + { url = "https://files.pythonhosted.org/packages/db/0f/0d52c98b8a885aeda831224b78f3be7ec2e1aa4a62091f9f9188c3c65b56/yarl-1.22.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:50678a3b71c751d58d7908edc96d332af328839eea883bb554a43f539101277a", size = 377267, upload-time = "2025-10-06T14:09:57.958Z" }, + { url = "https://files.pythonhosted.org/packages/22/42/d2685e35908cbeaa6532c1fc73e89e7f2efb5d8a7df3959ea8e37177c5a3/yarl-1.22.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e8fbaa7cec507aa24ea27a01456e8dd4b6fab829059b69844bd348f2d467124", size = 365797, upload-time = "2025-10-06T14:09:59.527Z" }, + { url = "https://files.pythonhosted.org/packages/a2/83/cf8c7bcc6355631762f7d8bdab920ad09b82efa6b722999dfb05afa6cfac/yarl-1.22.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:433885ab5431bc3d3d4f2f9bd15bfa1614c522b0f1405d62c4f926ccd69d04fa", size = 365535, upload-time = "2025-10-06T14:10:01.139Z" }, + { url = "https://files.pythonhosted.org/packages/25/e1/5302ff9b28f0c59cac913b91fe3f16c59a033887e57ce9ca5d41a3a94737/yarl-1.22.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:b790b39c7e9a4192dc2e201a282109ed2985a1ddbd5ac08dc56d0e121400a8f7", size = 382324, upload-time = "2025-10-06T14:10:02.756Z" }, + { url = "https://files.pythonhosted.org/packages/bf/cd/4617eb60f032f19ae3a688dc990d8f0d89ee0ea378b61cac81ede3e52fae/yarl-1.22.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:31f0b53913220599446872d757257be5898019c85e7971599065bc55065dc99d", size = 383803, upload-time = "2025-10-06T14:10:04.552Z" }, + { url = "https://files.pythonhosted.org/packages/59/65/afc6e62bb506a319ea67b694551dab4a7e6fb7bf604e9bd9f3e11d575fec/yarl-1.22.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a49370e8f711daec68d09b821a34e1167792ee2d24d405cbc2387be4f158b520", size = 374220, upload-time = "2025-10-06T14:10:06.489Z" }, + { url = "https://files.pythonhosted.org/packages/e7/3d/68bf18d50dc674b942daec86a9ba922d3113d8399b0e52b9897530442da2/yarl-1.22.0-cp312-cp312-win32.whl", hash = "sha256:70dfd4f241c04bd9239d53b17f11e6ab672b9f1420364af63e8531198e3f5fe8", size = 81589, upload-time = "2025-10-06T14:10:09.254Z" }, + { url = "https://files.pythonhosted.org/packages/c8/9a/6ad1a9b37c2f72874f93e691b2e7ecb6137fb2b899983125db4204e47575/yarl-1.22.0-cp312-cp312-win_amd64.whl", hash = "sha256:8884d8b332a5e9b88e23f60bb166890009429391864c685e17bd73a9eda9105c", size = 87213, upload-time = "2025-10-06T14:10:11.369Z" }, + { url = "https://files.pythonhosted.org/packages/44/c5/c21b562d1680a77634d748e30c653c3ca918beb35555cff24986fff54598/yarl-1.22.0-cp312-cp312-win_arm64.whl", hash = "sha256:ea70f61a47f3cc93bdf8b2f368ed359ef02a01ca6393916bc8ff877427181e74", size = 81330, upload-time = "2025-10-06T14:10:13.112Z" }, + { url = "https://files.pythonhosted.org/packages/73/ae/b48f95715333080afb75a4504487cbe142cae1268afc482d06692d605ae6/yarl-1.22.0-py3-none-any.whl", hash = "sha256:1380560bdba02b6b6c90de54133c81c9f2a453dee9912fe58c1dcced1edb7cff", size = 46814, upload-time = "2025-10-06T14:12:53.872Z" }, +] + +[[package]] +name = "zipp" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, ] [[package]] name = "zope-event" -version = "5.0" +version = "6.1" source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "setuptools" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/46/c2/427f1867bb96555d1d34342f1dd97f8c420966ab564d58d18469a1db8736/zope.event-5.0.tar.gz", hash = "sha256:bac440d8d9891b4068e2b5a2c5e2c9765a9df762944bda6955f96bb9b91e67cd", size = 17350, upload-time = "2023-06-23T06:28:35.709Z" } +sdist = { url = "https://files.pythonhosted.org/packages/46/33/d3eeac228fc14de76615612ee208be2d8a5b5b0fada36bf9b62d6b40600c/zope_event-6.1.tar.gz", hash = "sha256:6052a3e0cb8565d3d4ef1a3a7809336ac519bc4fe38398cb8d466db09adef4f0", size = 18739, upload-time = "2025-11-07T08:05:49.934Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fe/42/f8dbc2b9ad59e927940325a22d6d3931d630c3644dae7e2369ef5d9ba230/zope.event-5.0-py3-none-any.whl", hash = "sha256:2832e95014f4db26c47a13fdaef84cef2f4df37e66b59d8f1f4a8f319a632c26", size = 6824, upload-time = "2023-06-23T06:28:32.652Z" }, + { url = "https://files.pythonhosted.org/packages/c2/b0/956902e5e1302f8c5d124e219c6bf214e2649f92ad5fce85b05c039a04c9/zope_event-6.1-py3-none-any.whl", hash = "sha256:0ca78b6391b694272b23ec1335c0294cc471065ed10f7f606858fc54566c25a0", size = 6414, upload-time = "2025-11-07T08:05:48.874Z" }, ] [[package]] name = "zope-interface" -version = "7.2" +version = "8.1.1" source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "setuptools" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/30/93/9210e7606be57a2dfc6277ac97dcc864fd8d39f142ca194fdc186d596fda/zope.interface-7.2.tar.gz", hash = "sha256:8b49f1a3d1ee4cdaf5b32d2e738362c7f5e40ac8b46dd7d1a65e82a4872728fe", size = 252960, upload-time = "2024-11-28T08:45:39.224Z" } +sdist = { url = "https://files.pythonhosted.org/packages/71/c9/5ec8679a04d37c797d343f650c51ad67d178f0001c363e44b6ac5f97a9da/zope_interface-8.1.1.tar.gz", hash = "sha256:51b10e6e8e238d719636a401f44f1e366146912407b58453936b781a19be19ec", size = 254748, upload-time = "2025-11-15T08:32:52.404Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/68/0b/c7516bc3bad144c2496f355e35bd699443b82e9437aa02d9867653203b4a/zope.interface-7.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:086ee2f51eaef1e4a52bd7d3111a0404081dadae87f84c0ad4ce2649d4f708b7", size = 208959, upload-time = "2024-11-28T08:47:47.788Z" }, - { url = "https://files.pythonhosted.org/packages/a2/e9/1463036df1f78ff8c45a02642a7bf6931ae4a38a4acd6a8e07c128e387a7/zope.interface-7.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:21328fcc9d5b80768bf051faa35ab98fb979080c18e6f84ab3f27ce703bce465", size = 209357, upload-time = "2024-11-28T08:47:50.897Z" }, - { url = "https://files.pythonhosted.org/packages/07/a8/106ca4c2add440728e382f1b16c7d886563602487bdd90004788d45eb310/zope.interface-7.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6dd02ec01f4468da0f234da9d9c8545c5412fef80bc590cc51d8dd084138a89", size = 264235, upload-time = "2024-11-28T09:18:15.56Z" }, - { url = "https://files.pythonhosted.org/packages/fc/ca/57286866285f4b8a4634c12ca1957c24bdac06eae28fd4a3a578e30cf906/zope.interface-7.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e7da17f53e25d1a3bde5da4601e026adc9e8071f9f6f936d0fe3fe84ace6d54", size = 259253, upload-time = "2024-11-28T08:48:29.025Z" }, - { url = "https://files.pythonhosted.org/packages/96/08/2103587ebc989b455cf05e858e7fbdfeedfc3373358320e9c513428290b1/zope.interface-7.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cab15ff4832580aa440dc9790b8a6128abd0b88b7ee4dd56abacbc52f212209d", size = 264702, upload-time = "2024-11-28T08:48:37.363Z" }, - { url = "https://files.pythonhosted.org/packages/5f/c7/3c67562e03b3752ba4ab6b23355f15a58ac2d023a6ef763caaca430f91f2/zope.interface-7.2-cp312-cp312-win_amd64.whl", hash = "sha256:29caad142a2355ce7cfea48725aa8bcf0067e2b5cc63fcf5cd9f97ad12d6afb5", size = 212466, upload-time = "2024-11-28T08:49:14.397Z" }, - { url = "https://files.pythonhosted.org/packages/c6/3b/e309d731712c1a1866d61b5356a069dd44e5b01e394b6cb49848fa2efbff/zope.interface-7.2-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:3e0350b51e88658d5ad126c6a57502b19d5f559f6cb0a628e3dc90442b53dd98", size = 208961, upload-time = "2024-11-28T08:48:29.865Z" }, - { url = "https://files.pythonhosted.org/packages/49/65/78e7cebca6be07c8fc4032bfbb123e500d60efdf7b86727bb8a071992108/zope.interface-7.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15398c000c094b8855d7d74f4fdc9e73aa02d4d0d5c775acdef98cdb1119768d", size = 209356, upload-time = "2024-11-28T08:48:33.297Z" }, - { url = "https://files.pythonhosted.org/packages/11/b1/627384b745310d082d29e3695db5f5a9188186676912c14b61a78bbc6afe/zope.interface-7.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:802176a9f99bd8cc276dcd3b8512808716492f6f557c11196d42e26c01a69a4c", size = 264196, upload-time = "2024-11-28T09:18:17.584Z" }, - { url = "https://files.pythonhosted.org/packages/b8/f6/54548df6dc73e30ac6c8a7ff1da73ac9007ba38f866397091d5a82237bd3/zope.interface-7.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb23f58a446a7f09db85eda09521a498e109f137b85fb278edb2e34841055398", size = 259237, upload-time = "2024-11-28T08:48:31.71Z" }, - { url = "https://files.pythonhosted.org/packages/b6/66/ac05b741c2129fdf668b85631d2268421c5cd1a9ff99be1674371139d665/zope.interface-7.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a71a5b541078d0ebe373a81a3b7e71432c61d12e660f1d67896ca62d9628045b", size = 264696, upload-time = "2024-11-28T08:48:41.161Z" }, - { url = "https://files.pythonhosted.org/packages/0a/2f/1bccc6f4cc882662162a1158cda1a7f616add2ffe322b28c99cb031b4ffc/zope.interface-7.2-cp313-cp313-win_amd64.whl", hash = "sha256:4893395d5dd2ba655c38ceb13014fd65667740f09fa5bb01caa1e6284e48c0cd", size = 212472, upload-time = "2024-11-28T08:49:56.587Z" }, + { url = "https://files.pythonhosted.org/packages/08/3d/f5b8dd2512f33bfab4faba71f66f6873603d625212206dd36f12403ae4ca/zope_interface-8.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a16715808408db7252b8c1597ed9008bdad7bf378ed48eb9b0595fad4170e49d", size = 208660, upload-time = "2025-11-15T08:36:53.579Z" }, + { url = "https://files.pythonhosted.org/packages/e5/41/c331adea9b11e05ff9ac4eb7d3032b24c36a3654ae9f2bf4ef2997048211/zope_interface-8.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce6b58752acc3352c4aa0b55bbeae2a941d61537e6afdad2467a624219025aae", size = 208851, upload-time = "2025-11-15T08:36:54.854Z" }, + { url = "https://files.pythonhosted.org/packages/25/00/7a8019c3bb8b119c5f50f0a4869183a4b699ca004a7f87ce98382e6b364c/zope_interface-8.1.1-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:807778883d07177713136479de7fd566f9056a13aef63b686f0ab4807c6be259", size = 259292, upload-time = "2025-11-15T08:36:56.409Z" }, + { url = "https://files.pythonhosted.org/packages/1a/fc/b70e963bf89345edffdd5d16b61e789fdc09365972b603e13785360fea6f/zope_interface-8.1.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:50e5eb3b504a7d63dc25211b9298071d5b10a3eb754d6bf2f8ef06cb49f807ab", size = 264741, upload-time = "2025-11-15T08:36:57.675Z" }, + { url = "https://files.pythonhosted.org/packages/96/fe/7d0b5c0692b283901b34847f2b2f50d805bfff4b31de4021ac9dfb516d2a/zope_interface-8.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eee6f93b2512ec9466cf30c37548fd3ed7bc4436ab29cd5943d7a0b561f14f0f", size = 264281, upload-time = "2025-11-15T08:36:58.968Z" }, + { url = "https://files.pythonhosted.org/packages/2b/2c/a7cebede1cf2757be158bcb151fe533fa951038cfc5007c7597f9f86804b/zope_interface-8.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:80edee6116d569883c58ff8efcecac3b737733d646802036dc337aa839a5f06b", size = 212327, upload-time = "2025-11-15T08:37:00.4Z" }, +] + +[[package]] +name = "zstandard" +version = "0.25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fd/aa/3e0508d5a5dd96529cdc5a97011299056e14c6505b678fd58938792794b1/zstandard-0.25.0.tar.gz", hash = "sha256:7713e1179d162cf5c7906da876ec2ccb9c3a9dcbdffef0cc7f70c3667a205f0b", size = 711513, upload-time = "2025-09-14T22:15:54.002Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/82/fc/f26eb6ef91ae723a03e16eddb198abcfce2bc5a42e224d44cc8b6765e57e/zstandard-0.25.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7b3c3a3ab9daa3eed242d6ecceead93aebbb8f5f84318d82cee643e019c4b73b", size = 795738, upload-time = "2025-09-14T22:16:56.237Z" }, + { url = "https://files.pythonhosted.org/packages/aa/1c/d920d64b22f8dd028a8b90e2d756e431a5d86194caa78e3819c7bf53b4b3/zstandard-0.25.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:913cbd31a400febff93b564a23e17c3ed2d56c064006f54efec210d586171c00", size = 640436, upload-time = "2025-09-14T22:16:57.774Z" }, + { url = "https://files.pythonhosted.org/packages/53/6c/288c3f0bd9fcfe9ca41e2c2fbfd17b2097f6af57b62a81161941f09afa76/zstandard-0.25.0-cp312-cp312-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:011d388c76b11a0c165374ce660ce2c8efa8e5d87f34996aa80f9c0816698b64", size = 5343019, upload-time = "2025-09-14T22:16:59.302Z" }, + { url = "https://files.pythonhosted.org/packages/1e/15/efef5a2f204a64bdb5571e6161d49f7ef0fffdbca953a615efbec045f60f/zstandard-0.25.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6dffecc361d079bb48d7caef5d673c88c8988d3d33fb74ab95b7ee6da42652ea", size = 5063012, upload-time = "2025-09-14T22:17:01.156Z" }, + { url = "https://files.pythonhosted.org/packages/b7/37/a6ce629ffdb43959e92e87ebdaeebb5ac81c944b6a75c9c47e300f85abdf/zstandard-0.25.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:7149623bba7fdf7e7f24312953bcf73cae103db8cae49f8154dd1eadc8a29ecb", size = 5394148, upload-time = "2025-09-14T22:17:03.091Z" }, + { url = "https://files.pythonhosted.org/packages/e3/79/2bf870b3abeb5c070fe2d670a5a8d1057a8270f125ef7676d29ea900f496/zstandard-0.25.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:6a573a35693e03cf1d67799fd01b50ff578515a8aeadd4595d2a7fa9f3ec002a", size = 5451652, upload-time = "2025-09-14T22:17:04.979Z" }, + { url = "https://files.pythonhosted.org/packages/53/60/7be26e610767316c028a2cbedb9a3beabdbe33e2182c373f71a1c0b88f36/zstandard-0.25.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5a56ba0db2d244117ed744dfa8f6f5b366e14148e00de44723413b2f3938a902", size = 5546993, upload-time = "2025-09-14T22:17:06.781Z" }, + { url = "https://files.pythonhosted.org/packages/85/c7/3483ad9ff0662623f3648479b0380d2de5510abf00990468c286c6b04017/zstandard-0.25.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:10ef2a79ab8e2974e2075fb984e5b9806c64134810fac21576f0668e7ea19f8f", size = 5046806, upload-time = "2025-09-14T22:17:08.415Z" }, + { url = "https://files.pythonhosted.org/packages/08/b3/206883dd25b8d1591a1caa44b54c2aad84badccf2f1de9e2d60a446f9a25/zstandard-0.25.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aaf21ba8fb76d102b696781bddaa0954b782536446083ae3fdaa6f16b25a1c4b", size = 5576659, upload-time = "2025-09-14T22:17:10.164Z" }, + { url = "https://files.pythonhosted.org/packages/9d/31/76c0779101453e6c117b0ff22565865c54f48f8bd807df2b00c2c404b8e0/zstandard-0.25.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1869da9571d5e94a85a5e8d57e4e8807b175c9e4a6294e3b66fa4efb074d90f6", size = 4953933, upload-time = "2025-09-14T22:17:11.857Z" }, + { url = "https://files.pythonhosted.org/packages/18/e1/97680c664a1bf9a247a280a053d98e251424af51f1b196c6d52f117c9720/zstandard-0.25.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:809c5bcb2c67cd0ed81e9229d227d4ca28f82d0f778fc5fea624a9def3963f91", size = 5268008, upload-time = "2025-09-14T22:17:13.627Z" }, + { url = "https://files.pythonhosted.org/packages/1e/73/316e4010de585ac798e154e88fd81bb16afc5c5cb1a72eeb16dd37e8024a/zstandard-0.25.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f27662e4f7dbf9f9c12391cb37b4c4c3cb90ffbd3b1fb9284dadbbb8935fa708", size = 5433517, upload-time = "2025-09-14T22:17:16.103Z" }, + { url = "https://files.pythonhosted.org/packages/5b/60/dd0f8cfa8129c5a0ce3ea6b7f70be5b33d2618013a161e1ff26c2b39787c/zstandard-0.25.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:99c0c846e6e61718715a3c9437ccc625de26593fea60189567f0118dc9db7512", size = 5814292, upload-time = "2025-09-14T22:17:17.827Z" }, + { url = "https://files.pythonhosted.org/packages/fc/5f/75aafd4b9d11b5407b641b8e41a57864097663699f23e9ad4dbb91dc6bfe/zstandard-0.25.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:474d2596a2dbc241a556e965fb76002c1ce655445e4e3bf38e5477d413165ffa", size = 5360237, upload-time = "2025-09-14T22:17:19.954Z" }, + { url = "https://files.pythonhosted.org/packages/ff/8d/0309daffea4fcac7981021dbf21cdb2e3427a9e76bafbcdbdf5392ff99a4/zstandard-0.25.0-cp312-cp312-win32.whl", hash = "sha256:23ebc8f17a03133b4426bcc04aabd68f8236eb78c3760f12783385171b0fd8bd", size = 436922, upload-time = "2025-09-14T22:17:24.398Z" }, + { url = "https://files.pythonhosted.org/packages/79/3b/fa54d9015f945330510cb5d0b0501e8253c127cca7ebe8ba46a965df18c5/zstandard-0.25.0-cp312-cp312-win_amd64.whl", hash = "sha256:ffef5a74088f1e09947aecf91011136665152e0b4b359c42be3373897fb39b01", size = 506276, upload-time = "2025-09-14T22:17:21.429Z" }, + { url = "https://files.pythonhosted.org/packages/ea/6b/8b51697e5319b1f9ac71087b0af9a40d8a6288ff8025c36486e0c12abcc4/zstandard-0.25.0-cp312-cp312-win_arm64.whl", hash = "sha256:181eb40e0b6a29b3cd2849f825e0fa34397f649170673d385f3598ae17cca2e9", size = 462679, upload-time = "2025-09-14T22:17:23.147Z" }, ]