diff --git a/.dockerignore b/.dockerignore index 0b6bf22..6406e77 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,2 +1,6 @@ seg_cache -test \ No newline at end of file +test +.venv +__pycache__/ +*.pyc +.git/ \ No newline at end of file diff --git a/README.md b/README.md index 6085b85..b444530 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,6 @@ $ conda activate trinity_client_aida $ pip install -r requirements.txt $ conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia -y - $ pip install mmcv==1.4.2 -f https://download.openmmlab.com/mmcv/dist/cu117/torch1.13/index.html 1. 启动服务器 diff --git a/app/service/attribute/service_att_recognition.py b/app/service/attribute/service_att_recognition.py index c007184..ff3d169 100644 --- a/app/service/attribute/service_att_recognition.py +++ b/app/service/attribute/service_att_recognition.py @@ -3,7 +3,6 @@ from pprint import pprint import cv2 -import mmcv import numpy as np import pandas as pd import torch @@ -12,6 +11,7 @@ from minio import Minio from app.core.config import settings, DESIGN_MODEL_URL from app.schemas.attribute_retrieve import AttributeRecognitionModel +from app.service.utils.image_normalize import my_imnormalize from app.service.utils.new_oss_client import oss_get_image minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) @@ -109,10 +109,9 @@ class AttributeRecognition: @staticmethod def preprocess(img): - img = mmcv.imread(img) img_scale = (224, 224) img = cv2.resize(img, img_scale) - img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) + img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0) return preprocessed_img diff --git a/app/service/attribute/service_category_recognition.py b/app/service/attribute/service_category_recognition.py index 5a04ba2..f3bba9b 100644 --- a/app/service/attribute/service_category_recognition.py +++ b/app/service/attribute/service_category_recognition.py @@ -10,7 +10,6 @@ from minio import Minio from skimage import transform import cv2 -import mmcv import numpy as np import pandas as pd import tritonclient.http as httpclient @@ -18,6 +17,7 @@ import torch from app.core.config import settings, DESIGN_MODEL_URL from app.schemas.attribute_retrieve import CategoryRecognitionModel +from app.service.utils.image_normalize import my_imnormalize from app.service.utils.new_oss_client import oss_get_image minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) @@ -39,11 +39,10 @@ class CategoryRecognition: @staticmethod def preprocess(img): - img = mmcv.imread(img) # ori_shape = img.shape[:2] img_scale = (224, 224) img = cv2.resize(img, img_scale) - img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) + img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0) return preprocessed_img diff --git a/app/service/brand_dna/service.py b/app/service/brand_dna/service.py index 350f395..6736df5 100644 --- a/app/service/brand_dna/service.py +++ b/app/service/brand_dna/service.py @@ -1,7 +1,6 @@ import logging import cv2 -import mmcv import numpy as np import pandas as pd import torch @@ -14,6 +13,7 @@ from app.core.config import settings from app.schemas.brand_dna import BrandDnaModel from app.service.attribute.config import const from app.service.utils.generate_uuid import generate_uuid +from app.service.utils.image_normalize import my_imnormalize from app.service.utils.new_oss_client import oss_upload_image, oss_get_image minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE) @@ -202,7 +202,7 @@ class BrandDna: # 服装分割预处理 @staticmethod def seg_product_preprocess(image): - img = mmcv.imread(image) + img = image ori_shape = img.shape[:2] img_scale_w, img_scale_h = ori_shape if ori_shape[0] > 1024: @@ -211,9 +211,9 @@ class BrandDna: img_scale_h = 1024 # 如果图片size任意一边 大于 1024, 则会resize 成1024 if ori_shape != (img_scale_w, img_scale_h): - # mmcv.imresize(img, img_scale_h, img_scale_w) # 老代码 引以为戒!哈哈哈~ h和w写反了 + # my_imnormalize(img, img_scale_h, img_scale_w) # 老代码 引以为戒!哈哈哈~ h和w写反了 img = cv2.resize(img, (img_scale_h, img_scale_w)) - img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) + img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0) return preprocessed_img, ori_shape @@ -227,11 +227,10 @@ class BrandDna: # 类别检测模型预处理 @staticmethod def category_preprocess(img): - img = mmcv.imread(img) # ori_shape = img.shape[:2] img_scale = (224, 224) img = cv2.resize(img, img_scale) - img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) + img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0) return preprocessed_img diff --git a/app/service/design_batch/utils/design_ensemble.py b/app/service/design_batch/utils/design_ensemble.py index 193da0e..2f107ef 100644 --- a/app/service/design_batch/utils/design_ensemble.py +++ b/app/service/design_batch/utils/design_ensemble.py @@ -10,13 +10,13 @@ import logging import cv2 -import mmcv import numpy as np import torch import torch.nn.functional as F import tritonclient.http as httpclient from app.core.config import DESIGN_MODEL_URL, DESIGN_MODEL_NAME +from app.service.utils.image_normalize import my_imnormalize """ keypoint @@ -25,13 +25,13 @@ from app.core.config import DESIGN_MODEL_URL, DESIGN_MODEL_NAME def keypoint_preprocess(img_path): - img = mmcv.imread(img_path) + img = img_path img_scale = (256, 256) h, w = img.shape[:2] img = cv2.resize(img, img_scale) w_scale = img_scale[0] / w h_scale = img_scale[1] / h - img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) + img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0) return preprocessed_img, (w_scale, h_scale) @@ -74,7 +74,7 @@ def keypoint_postprocess(output, scale_factor): # KNet def seg_preprocess(img_path): - img = mmcv.imread(img_path) + img = img_path ori_shape = img.shape[:2] img_scale_w, img_scale_h = ori_shape if ori_shape[0] > 1024: @@ -83,9 +83,9 @@ def seg_preprocess(img_path): img_scale_h = 1024 # 如果图片size任意一边 大于 1024, 则会resize 成1024 if ori_shape != (img_scale_w, img_scale_h): - # mmcv.imresize(img, img_scale_h, img_scale_w) # 老代码 引以为戒!哈哈哈~ h和w写反了 + # my_imnormalize(img, img_scale_h, img_scale_w) # 老代码 引以为戒!哈哈哈~ h和w写反了 img = cv2.resize(img, (img_scale_h, img_scale_w)) - img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) + img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0) return preprocessed_img, ori_shape diff --git a/app/service/design_fast/utils/design_ensemble.py b/app/service/design_fast/utils/design_ensemble.py index 9aa674c..0b7b88f 100644 --- a/app/service/design_fast/utils/design_ensemble.py +++ b/app/service/design_fast/utils/design_ensemble.py @@ -10,12 +10,12 @@ import logging import cv2 -import mmcv import numpy as np import torch import tritonclient.http as httpclient from app.core.config import DESIGN_MODEL_URL, DESIGN_MODEL_NAME +from app.service.utils.image_normalize import my_imnormalize """ keypoint @@ -24,14 +24,14 @@ from app.core.config import DESIGN_MODEL_URL, DESIGN_MODEL_NAME def keypoint_preprocess(img_path): - img = mmcv.imread(img_path) + img = img_path img = cv2.copyMakeBorder(img, 25, 25, 25, 25, cv2.BORDER_CONSTANT, value=[255, 255, 255]) img_scale = (256, 256) h, w = img.shape[:2] img = cv2.resize(img, img_scale) w_scale = img_scale[0] / w h_scale = img_scale[1] / h - img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) + img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0) return preprocessed_img, (w_scale, h_scale) @@ -78,7 +78,7 @@ def keypoint_postprocess(output, scale_factor): # KNet def seg_preprocess(img_path): - img = mmcv.imread(img_path) + img = img_path ori_shape = img.shape[:2] img_scale_w, img_scale_h = ori_shape if ori_shape[0] > 1024: @@ -87,12 +87,12 @@ def seg_preprocess(img_path): img_scale_h = 1024 # 如果图片size任意一边 大于 1024, 则会resize 成1024 if ori_shape != (img_scale_w, img_scale_h): - # mmcv.imresize(img, img_scale_h, img_scale_w) # 老代码 引以为戒!哈哈哈~ h和w写反了 + # my_imnormalize(img, img_scale_h, img_scale_w) # 老代码 引以为戒!哈哈哈~ h和w写反了 img = cv2.resize(img, (img_scale_h, img_scale_w)) # 扩充25的白边 img = cv2.copyMakeBorder(img, 25, 25, 25, 25, cv2.BORDER_CONSTANT, value=[255, 255, 255]) - img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) + img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0) return preprocessed_img, ori_shape diff --git a/app/service/generate_image/service_agent_tool_generate_image.py b/app/service/generate_image/service_agent_tool_generate_image.py index 76f5de8..322d2b5 100644 --- a/app/service/generate_image/service_agent_tool_generate_image.py +++ b/app/service/generate_image/service_agent_tool_generate_image.py @@ -11,7 +11,6 @@ import logging import uuid import cv2 -import mmcv import numpy as np import pandas as pd import torch @@ -21,6 +20,7 @@ from minio import Minio from tritonclient.utils import np_to_triton_dtype from app.core.config import settings, FAST_GI_MODEL_URL, GI_MODEL_URL, DESIGN_MODEL_URL, FAST_GI_MODEL_NAME, GI_MODEL_NAME +from app.service.utils.image_normalize import my_imnormalize from app.service.utils.new_oss_client import oss_upload_image logger = logging.getLogger() @@ -86,10 +86,9 @@ class AgentToolGenerateImage: @staticmethod def preprocess(img): - img = mmcv.imread(img) img_scale = (224, 224) img = cv2.resize(img, img_scale) - img = mmcv.imnormalize( + img = my_imnormalize( img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) diff --git a/app/service/generate_image/service_generate_image.py b/app/service/generate_image/service_generate_image.py index d9772b5..dc8dccb 100644 --- a/app/service/generate_image/service_generate_image.py +++ b/app/service/generate_image/service_generate_image.py @@ -189,10 +189,10 @@ if __name__ == '__main__': tasks_id="123-89", prompt="a single item of sketch of dress, 4k, white background", image_url="aida-collection-element/89/Sketchboard/95f20cdc-e059-435c-b8b1-d04cc9e80c3d.png", - mode='img2img', + mode='txt2img', category="sketch", gender="Female", - version="fast" + version="hight" ) server = GenerateImage(rd) print(server.get_result()) diff --git a/app/service/generate_image/utils/image_processing.py b/app/service/generate_image/utils/image_processing.py index 692ffc9..32be2d8 100644 --- a/app/service/generate_image/utils/image_processing.py +++ b/app/service/generate_image/utils/image_processing.py @@ -2,23 +2,23 @@ import logging import time import cv2 -import mmcv import numpy as np import torch import tritonclient.http as httpclient from app.core.config import settings, DESIGN_MODEL_URL, DESIGN_MODEL_NAME from app.service.generate_image.utils.upload_sd_image import upload_stain_png_sd, upload_face_png_sd +from app.service.utils.image_normalize import my_imnormalize logger = logging.getLogger() def seg_preprocess(img_path): - img = mmcv.imread(img_path) + img = img_path ori_shape = img.shape[:2] img_scale = ori_shape img = cv2.resize(img, img_scale) - img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) + img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0) return preprocessed_img, ori_shape @@ -242,10 +242,9 @@ def stain_detection(image, user_id, category, tasks_id, spot_size=100): def generate_category_recognition(image, gender): def preprocess(img): - img = mmcv.imread(img) img_scale = (224, 224) img = cv2.resize(img, img_scale) - img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) + img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0) return preprocessed_img diff --git a/app/service/lineart/service.py b/app/service/lineart/service.py index c459b3c..e34840e 100644 --- a/app/service/lineart/service.py +++ b/app/service/lineart/service.py @@ -1,7 +1,6 @@ import logging import cv2 -import mmcv import numpy as np import torch import torch.nn.functional as F @@ -10,6 +9,7 @@ from minio import Minio from app.core.config import settings from app.core.config import DESIGN_MODEL_URL from app.schemas.image2sketch import Image2SketchModel +from app.service.utils.image_normalize import my_imnormalize from app.service.utils.new_oss_client import oss_get_image, oss_upload_image logger = logging.getLogger() @@ -67,7 +67,7 @@ class LineArtService: @staticmethod def line_art_preprocess(image): - img = mmcv.imread(image) + img = image ori_shape = img.shape[:2] img_scale_w, img_scale_h = ori_shape if ori_shape[0] > 1024: @@ -76,9 +76,9 @@ class LineArtService: img_scale_h = 1024 # 如果图片size任意一边 大于 1024, 则会resize 成1024 if ori_shape != (img_scale_w, img_scale_h): - # mmcv.imresize(img, img_scale_h, img_scale_w) # 老代码 引以为戒!哈哈哈~ h和w写反了 + # my_imnormalize(img, img_scale_h, img_scale_w) # 老代码 引以为戒!哈哈哈~ h和w写反了 img = cv2.resize(img, (img_scale_h, img_scale_w)) - img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) + img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True) preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0) return preprocessed_img, ori_shape diff --git a/app/service/utils/image_normalize.py b/app/service/utils/image_normalize.py new file mode 100644 index 0000000..f8f2939 --- /dev/null +++ b/app/service/utils/image_normalize.py @@ -0,0 +1,27 @@ +import cv2 +import np + + +def my_imnormalize(img, mean, std, to_rgb=True): + """Inplace normalize an image with mean and std. + + Args: + img (ndarray): Image to be normalized. + mean (ndarray): The mean to be used for normalize. + std (ndarray): The std to be used for normalize. + to_rgb (bool): Whether to convert to rgb. + + Returns: + ndarray: The normalized image. + """ + # cv2 inplace normalization does not accept uint8 + img = img.copy().astype(np.float32) + + assert img.dtype != np.uint8 + mean = np.float64(mean.reshape(1, -1)) + stdinv = 1 / np.float64(std.reshape(1, -1)) + if to_rgb: + cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace + cv2.subtract(img, mean, img) # inplace + cv2.multiply(img, stdinv, img) # inplace + return img diff --git a/pyproject.toml b/pyproject.toml index e143cde..61e4f8a 100755 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,8 +23,8 @@ dependencies = [ "load-dotenv>=0.1.0", "loguru>=0.7.3", "minio>=7.2.20", - "mmcv>=2.2.0", "moviepy==1.0.3", + "np>=1.0.2", "numpy<2", "ollama>=0.6.1", "opencv-python>=4.11.0.86", diff --git a/requirements.txt b/requirements.txt index 4eefcb7..e1bf4bd 100644 Binary files a/requirements.txt and b/requirements.txt differ diff --git a/requirements_2.txt b/requirements_2.txt index abeec7f..2647c3a 100644 Binary files a/requirements_2.txt and b/requirements_2.txt differ diff --git a/uv.lock b/uv.lock index 67a7efd..02a7447 100755 --- a/uv.lock +++ b/uv.lock @@ -8,15 +8,6 @@ resolution-markers = [ "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')", ] -[[package]] -name = "addict" -version = "2.4.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/85/ef/fd7649da8af11d93979831e8f1f8097e85e82d5bfeabc8c68b39175d8e75/addict-2.4.0.tar.gz", hash = "sha256:b3b2210e0e067a281f5646c8c5db92e99b7231ea8b0eb5f74dbdf9e259d4e494", size = 9186, upload-time = "2020-11-21T16:21:31.416Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/6a/00/b08f23b7d7e1e14ce01419a467b583edbb93c6cdb8654e54a9cc579cd61f/addict-2.4.0-py3-none-any.whl", hash = "sha256:249bb56bbfd3cdc2a004ea0ff4c2b6ddc84d53bc2194761636eb314d5cfa5dfc", size = 3832, upload-time = "2020-11-21T16:21:29.588Z" }, -] - [[package]] name = "agentaction" version = "0.1.7" @@ -1671,43 +1662,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3e/9a/b697530a882588a84db616580f2ba5d1d515c815e11c30d219145afeec87/minio-7.2.20-py3-none-any.whl", hash = "sha256:eb33dd2fb80e04c3726a76b13241c6be3c4c46f8d81e1d58e757786f6501897e", size = 93751, upload-time = "2025-11-27T00:37:13.993Z" }, ] -[[package]] -name = "mmcv" -version = "2.2.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "addict" }, - { name = "mmengine" }, - { name = "numpy" }, - { name = "opencv-python" }, - { name = "packaging" }, - { name = "pillow" }, - { name = "pyyaml" }, - { name = "regex", marker = "sys_platform == 'win32'" }, - { name = "yapf" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/e9/a2/57a733e7e84985a8a0e3101dfb8170fc9db92435c16afad253069ae3f9df/mmcv-2.2.0.tar.gz", hash = "sha256:ac479247e808d8802f89eadf04d4118de86bdfe81361ec5aed0cc1bf731c67c9", size = 479121, upload-time = "2024-04-24T14:24:28.064Z" } - -[[package]] -name = "mmengine" -version = "0.10.7" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "addict" }, - { name = "matplotlib" }, - { name = "numpy" }, - { name = "opencv-python" }, - { name = "pyyaml" }, - { name = "regex", marker = "sys_platform == 'win32'" }, - { name = "rich" }, - { name = "termcolor" }, - { name = "yapf" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/17/14/959360bbd8374e23fc1b720906999add16a3ac071a501636db12c5861ff5/mmengine-0.10.7.tar.gz", hash = "sha256:d20ffcc31127567e53dceff132612a87f0081de06cbb7ab2bdb7439125a69225", size = 378090, upload-time = "2025-03-04T12:23:09.568Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/98/8e/f98332248aad102511bea4ae19c0ddacd2f0a994f3ca4c82b7a369e0af8b/mmengine-0.10.7-py3-none-any.whl", hash = "sha256:262ac976a925562f78cd5fd14dd1bc9b680ed0aa81f0d85b723ef782f99c54ee", size = 452720, upload-time = "2025-03-04T12:23:06.339Z" }, -] - [[package]] name = "mmh3" version = "5.2.0" @@ -1801,6 +1755,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9e/c9/b2622292ea83fbb4ec318f5b9ab867d0a28ab43c5717bb85b0a5f6b3b0a4/networkx-3.6.1-py3-none-any.whl", hash = "sha256:d47fbf302e7d9cbbb9e2555a0d267983d2aa476bac30e90dfbe5669bd57f3762", size = 2068504, upload-time = "2025-12-08T17:02:38.159Z" }, ] +[[package]] +name = "np" +version = "1.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/40/7d/749666e5a9976dcbc4d16d487bbe571efc6bbf4cdf3f4620c0ccc52b57ef/np-1.0.2.tar.gz", hash = "sha256:781265283f3823663ad8fb48741aae62abcf4c78bc19f908f8aa7c1d3eb132f8", size = 7419, upload-time = "2017-10-05T11:26:00.956Z" } + [[package]] name = "numpy" version = "1.26.4" @@ -2269,15 +2229,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/bc/96/aaa61ce33cc98421fb6088af2a03be4157b1e7e0e87087c888e2370a7f45/pillow-12.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:7dfb439562f234f7d57b1ac6bc8fe7f838a4bd49c79230e0f6a1da93e82f1fad", size = 2436012, upload-time = "2025-10-15T18:22:23.621Z" }, ] -[[package]] -name = "platformdirs" -version = "4.5.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" }, -] - [[package]] name = "posthog" version = "5.4.0" @@ -2746,17 +2697,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/2c/58/ca301544e1fa93ed4f80d724bf5b194f6e4b945841c5bfd555878eea9fcb/referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231", size = 26766, upload-time = "2025-10-13T15:30:47.625Z" }, ] -[[package]] -name = "regex" -version = "2025.11.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cc/a9/546676f25e573a4cf00fe8e119b78a37b6a8fe2dc95cda877b30889c9c45/regex-2025.11.3.tar.gz", hash = "sha256:1fedc720f9bb2494ce31a58a1631f9c82df6a09b49c19517ea5cc280b4541e01", size = 414669, upload-time = "2025-11-03T21:34:22.089Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/59/9b/7c29be7903c318488983e7d97abcf8ebd3830e4c956c4c540005fcfb0462/regex-2025.11.3-cp312-cp312-win32.whl", hash = "sha256:3839967cf4dc4b985e1570fd8d91078f0c519f30491c60f9ac42a8db039be204", size = 266194, upload-time = "2025-11-03T21:31:51.53Z" }, - { url = "https://files.pythonhosted.org/packages/1a/67/3b92df89f179d7c367be654ab5626ae311cb28f7d5c237b6bb976cd5fbbb/regex-2025.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:e721d1b46e25c481dc5ded6f4b3f66c897c58d2e8cfdf77bbced84339108b0b9", size = 277069, upload-time = "2025-11-03T21:31:53.151Z" }, - { url = "https://files.pythonhosted.org/packages/d7/55/85ba4c066fe5094d35b249c3ce8df0ba623cfd35afb22d6764f23a52a1c5/regex-2025.11.3-cp312-cp312-win_arm64.whl", hash = "sha256:64350685ff08b1d3a6fff33f45a9ca183dc1d58bbfe4981604e70ec9801bbc26", size = 270330, upload-time = "2025-11-03T21:31:54.514Z" }, -] - [[package]] name = "requests" version = "2.32.5" @@ -3224,8 +3164,8 @@ dependencies = [ { name = "load-dotenv" }, { name = "loguru" }, { name = "minio" }, - { name = "mmcv" }, { name = "moviepy" }, + { name = "np" }, { name = "numpy" }, { name = "ollama" }, { name = "opencv-python" }, @@ -3275,8 +3215,8 @@ requires-dist = [ { name = "load-dotenv", specifier = ">=0.1.0" }, { name = "loguru", specifier = ">=0.7.3" }, { name = "minio", specifier = ">=7.2.20" }, - { name = "mmcv", specifier = ">=2.2.0" }, { name = "moviepy", specifier = "==1.0.3" }, + { name = "np", specifier = ">=1.0.2" }, { name = "numpy", specifier = "<2" }, { name = "ollama", specifier = ">=0.6.1" }, { name = "opencv-python", specifier = ">=4.11.0.86" }, @@ -3605,18 +3545,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/54/85/6ec269b0952ec7e36ba019125982cf11d91256a778c7c3f98a4c5043d283/xxhash-3.6.0-cp312-cp312-win_arm64.whl", hash = "sha256:eae5c13f3bc455a3bbb68bdc513912dc7356de7e2280363ea235f71f54064829", size = 27876, upload-time = "2025-10-02T14:34:54.371Z" }, ] -[[package]] -name = "yapf" -version = "0.43.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "platformdirs" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/23/97/b6f296d1e9cc1ec25c7604178b48532fa5901f721bcf1b8d8148b13e5588/yapf-0.43.0.tar.gz", hash = "sha256:00d3aa24bfedff9420b2e0d5d9f5ab6d9d4268e72afbf59bb3fa542781d5218e", size = 254907, upload-time = "2024-11-14T00:11:41.584Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/37/81/6acd6601f61e31cfb8729d3da6d5df966f80f374b78eff83760714487338/yapf-0.43.0-py3-none-any.whl", hash = "sha256:224faffbc39c428cb095818cf6ef5511fdab6f7430a10783fdfb292ccf2852ca", size = 256158, upload-time = "2024-11-14T00:11:39.37Z" }, -] - [[package]] name = "yarl" version = "1.22.0"