Compare commits
105 Commits
dev-ltx
...
ea7522a45d
| Author | SHA1 | Date | |
|---|---|---|---|
| ea7522a45d | |||
| 7543d6b346 | |||
| 3ca4003e30 | |||
| 59e8a88a01 | |||
| ac6f74438d | |||
| e0d519bfb3 | |||
| 3414f2c1aa | |||
| 160bf1a6b1 | |||
| 79eb3fb859 | |||
| 4395d67288 | |||
| 674514ec11 | |||
| e9ca1d301b | |||
| a4d55fdb14 | |||
| 7f2f79d029 | |||
| 6d9e96305b | |||
| d93c50ce2b | |||
| 316c2fef67 | |||
| e25f49a776 | |||
| 33b4dd4a7f | |||
| ac8ca4dd46 | |||
| db88d9b813 | |||
| 6ea9837f83 | |||
| 7e48420ba7 | |||
| 13002eefda | |||
| 09e25f423e | |||
| bcc82ba065 | |||
| dcc88adfc0 | |||
| e4fd7b2fb9 | |||
| ba93d33a17 | |||
| 292da1de2b | |||
| c6ebfae942 | |||
| 4dd8416911 | |||
| bafcb68028 | |||
| c03b7e263e | |||
| 200414e5ad | |||
| 23a6a30cc4 | |||
| 4d0688afd5 | |||
| 9a00fce0eb | |||
| 4656eeee91 | |||
| f017d7e212 | |||
| fe25f5878b | |||
| c1b80c58f1 | |||
| 2cc17a1210 | |||
| be92d48abb | |||
| 57be559cf2 | |||
| f8382f280f | |||
| c24862507f | |||
| d5452098f3 | |||
| 315e298ba8 | |||
| ec26c8b507 | |||
| e02ca351b6 | |||
| c987f498bc | |||
| 3aa8dfa0f4 | |||
| 265f4de50e | |||
| a996a1853d | |||
| 1cbd019ffd | |||
| e2a49e2f3a | |||
| 66037c94e6 | |||
| 754e8d7735 | |||
| cdaeb6daac | |||
| 863d9287dc | |||
| ddef6af1cf | |||
| fdffb1e724 | |||
| ecf10611c2 | |||
| f78809b22a | |||
| 63a2f5e007 | |||
| aeb67f366a | |||
| c244e313ae | |||
| 15934085e0 | |||
| 40b41d02a4 | |||
| 1a1fd46f81 | |||
| dcd8e26f0f | |||
| fd94a3b4f0 | |||
| 682c589238 | |||
| 3f9309235a | |||
| a578aa4fc5 | |||
| ebd665b241 | |||
| ec649152e3 | |||
| 833e1bc924 | |||
| 7ed5911336 | |||
| b09538e294 | |||
| 313863a6a7 | |||
| 9ca1a2ba1f | |||
| 2b7e4013ee | |||
| 5b2bb3ce7c | |||
| 6739a92d28 | |||
| f23b99d326 | |||
| 10d41cd32f | |||
| bb7b85bfb8 | |||
| 6ecb6be59c | |||
| 64285cd5f3 | |||
| fe6a5fb029 | |||
| 5217847d49 | |||
| 0a9fc51310 | |||
| cf052f9632 | |||
| 19a8ea9a93 | |||
| 09ff2f1ab7 | |||
| 109a23197a | |||
|
|
2135a180be | ||
|
|
09032c0564 | ||
|
|
167faa10c8 | ||
|
|
0a048bf37f | ||
|
|
05045dda76 | ||
|
|
30f9a99df2 | ||
|
|
3932b8359a |
@@ -1,2 +1,6 @@
|
||||
seg_cache
|
||||
test
|
||||
test
|
||||
.venv
|
||||
__pycache__/
|
||||
*.pyc
|
||||
.git/
|
||||
@@ -7,7 +7,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
env:
|
||||
REMOTE_DEPLOY_PATH: /workspace/Trinity/Fastapi_AiDA_Trinity_Dev
|
||||
REMOTE_DEPLOY_PATH: /workspace/AiDA_Workspace/Python_Server_Workspace/Dev
|
||||
|
||||
steps:
|
||||
- name: 1.检出代码
|
||||
@@ -35,6 +35,4 @@ jobs:
|
||||
cd ${{ env.REMOTE_DEPLOY_PATH }}
|
||||
|
||||
docker-compose down 2>&1
|
||||
docker-compose up -d --build --remove-orphans 2>&1
|
||||
|
||||
docker image prune -f 2>&1
|
||||
docker-compose up -d 2>&1
|
||||
@@ -1,15 +1,15 @@
|
||||
name: 定时 AiDA python develop 分支构建部署
|
||||
on:
|
||||
# 使用 schedule 触发器,遵循标准的 Cron 格式 (分钟 小时-8 日期 月份 星期)
|
||||
schedule:
|
||||
- cron: '30 9 * * *'
|
||||
# schedule:
|
||||
# - cron: '30 9 * * *'
|
||||
|
||||
jobs:
|
||||
scheduled_deploy:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
env:
|
||||
REMOTE_DEPLOY_PATH: /workspace/Trinity/Fastapi_AiDA_Trinity_Dev
|
||||
REMOTE_DEPLOY_PATH: /workspace/AiDA_Workspace/Python_Server_Workspace/Dev
|
||||
|
||||
steps:
|
||||
- name: 1.检出代码
|
||||
|
||||
@@ -1,23 +1,19 @@
|
||||
name: git commit AiDA python develop 分支构建部署
|
||||
name: 手动 AiDA python develop 分支构建部署
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
|
||||
jobs:
|
||||
scheduled_deploy:
|
||||
runs-on: ubuntu-latest
|
||||
if: "contains(github.event.head_commit.message, '[run build]')"
|
||||
|
||||
env:
|
||||
REMOTE_DEPLOY_PATH: /workspace/Trinity/Fastapi_AiDA_Trinity_Dev
|
||||
REMOTE_DEPLOY_PATH: /workspace/AiDA_Workspace/Python_Server_Workspace/Dev
|
||||
|
||||
steps:
|
||||
- name: 1.检出代码
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: 'develop'
|
||||
ref: 'dev-ltx'
|
||||
|
||||
- name: 2.复制文件到服务器
|
||||
uses: appleboy/scp-action@v0.1.7
|
||||
@@ -28,7 +24,7 @@ jobs:
|
||||
source: "."
|
||||
target: ${{ env.REMOTE_DEPLOY_PATH }}
|
||||
|
||||
- name: Restart Docker containers
|
||||
- name: 3.重启docker-compose
|
||||
uses: appleboy/ssh-action@v0.1.10
|
||||
with:
|
||||
host: ${{ secrets.SERVER_HOST }}
|
||||
40
.gitea/workflows/prod_build_manual.yaml
Normal file
40
.gitea/workflows/prod_build_manual.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
name: 定时 AiDA python prod 分支构建部署
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
scheduled_deploy:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
env:
|
||||
REMOTE_DEPLOY_PATH: /workspace/AiDA_Workspace/Python_Server_Workspace/Prod
|
||||
|
||||
steps:
|
||||
- name: 1.检出代码
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: 'master'
|
||||
|
||||
- name: 2.复制文件到服务器
|
||||
uses: appleboy/scp-action@v0.1.7
|
||||
with:
|
||||
host: ${{ secrets.SERVER_HOST }}
|
||||
username: ${{ secrets.SERVER_USER }}
|
||||
password: ${{ secrets.SERVER_PASSWORD }}
|
||||
source: "."
|
||||
target: ${{ env.REMOTE_DEPLOY_PATH }}
|
||||
|
||||
- name: Restart Docker containers
|
||||
uses: appleboy/ssh-action@v0.1.10
|
||||
with:
|
||||
host: ${{ secrets.SERVER_HOST }}
|
||||
username: ${{ secrets.SERVER_USER }}
|
||||
password: ${{ secrets.SERVER_PASSWORD }}
|
||||
script: |
|
||||
# 进入项目目录
|
||||
cd ${{ env.REMOTE_DEPLOY_PATH }}
|
||||
|
||||
docker-compose down 2>&1
|
||||
docker-compose up -d 2>&1
|
||||
|
||||
docker image prune -f 2>&1
|
||||
42
.gitea/workflows/prod_build_scheduled.yaml
Normal file
42
.gitea/workflows/prod_build_scheduled.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
name: 定时 AiDA python prod 分支构建部署
|
||||
on:
|
||||
# 使用 schedule 触发器,遵循标准的 Cron 格式 (分钟 小时-8 日期 月份 星期)
|
||||
schedule:
|
||||
- cron: '07 13 23 1 *'
|
||||
|
||||
jobs:
|
||||
scheduled_deploy:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
env:
|
||||
REMOTE_DEPLOY_PATH: /workspace/AiDA_Workspace/Python_Server_Workspace/Prod
|
||||
|
||||
steps:
|
||||
- name: 1.检出代码
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: 'master'
|
||||
|
||||
- name: 2.复制文件到服务器
|
||||
uses: appleboy/scp-action@v0.1.7
|
||||
with:
|
||||
host: ${{ secrets.SERVER_HOST }}
|
||||
username: ${{ secrets.SERVER_USER }}
|
||||
password: ${{ secrets.SERVER_PASSWORD }}
|
||||
source: "."
|
||||
target: ${{ env.REMOTE_DEPLOY_PATH }}
|
||||
|
||||
- name: Restart Docker containers
|
||||
uses: appleboy/ssh-action@v0.1.10
|
||||
with:
|
||||
host: ${{ secrets.SERVER_HOST }}
|
||||
username: ${{ secrets.SERVER_USER }}
|
||||
password: ${{ secrets.SERVER_PASSWORD }}
|
||||
script: |
|
||||
# 进入项目目录
|
||||
cd ${{ env.REMOTE_DEPLOY_PATH }}
|
||||
|
||||
docker-compose down 2>&1
|
||||
docker-compose up -d 2>&1
|
||||
|
||||
docker image prune -f 2>&1
|
||||
@@ -20,7 +20,6 @@
|
||||
$ conda activate trinity_client_aida
|
||||
$ pip install -r requirements.txt
|
||||
$ conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia -y
|
||||
$ pip install mmcv==1.4.2 -f https://download.openmmlab.com/mmcv/dist/cu117/torch1.13/index.html
|
||||
|
||||
|
||||
1. 启动服务器
|
||||
|
||||
@@ -4,6 +4,7 @@ import logging
|
||||
import requests
|
||||
from fastapi import APIRouter, HTTPException, BackgroundTasks
|
||||
|
||||
from app.core.config import settings
|
||||
from app.schemas.design import DesignModel, ModelProgressModel, DesignStreamModel, SAMRequestModel
|
||||
from app.schemas.response_template import ResponseModel
|
||||
from app.service.design_fast.design_generate import design_generate, design_generate_v2
|
||||
@@ -394,7 +395,8 @@ async def seg_anything(request_data: SAMRequestModel):
|
||||
通过传入图片路径和点击的点坐标,返回分割后的掩码数据。
|
||||
|
||||
### 参数说明:
|
||||
- **user_id**:用户id 用于存储分割图
|
||||
- **bucket**: minio bucket name
|
||||
- **object_name**: minio object name
|
||||
- **image_path**: 图片在服务器或云端的相对路径。
|
||||
- **type**: 推理类型
|
||||
- **box**: 框选矩形点位信息
|
||||
@@ -407,7 +409,8 @@ async def seg_anything(request_data: SAMRequestModel):
|
||||
```json
|
||||
point
|
||||
{
|
||||
"user_id": 1,
|
||||
"bucket": "test",
|
||||
"object_name": "7068-400a-ac94-c01647fa5f6f.png",
|
||||
"image_path": "aida-users/89/sketch/4e8fe37d-7068-400a-ac94-c01647fa5f6f.png",
|
||||
"type":"point",
|
||||
"points": [[310, 403], [493, 375], [261, 266], [404, 484]],
|
||||
@@ -416,7 +419,8 @@ async def seg_anything(request_data: SAMRequestModel):
|
||||
|
||||
box
|
||||
{
|
||||
"user_id": 1,
|
||||
"bucket": "test",
|
||||
"object_name": "7068-400a-ac94-c01647fa5f6f.png",
|
||||
"image_path": "aida-users/89/sketch/4e8fe37d-7068-400a-ac94-c01647fa5f6f.png",
|
||||
"type":"box",
|
||||
"box": [350, 286, 544, 520]
|
||||
@@ -425,7 +429,7 @@ async def seg_anything(request_data: SAMRequestModel):
|
||||
"""
|
||||
try:
|
||||
logger.info(f"seg_anything request item is : @@@@@@:{json.dumps(request_data.dict(), indent=4)}")
|
||||
data = requests.post("http://10.1.1.240:10075/predict", json=request_data.dict())
|
||||
data = requests.post(f"http://{settings.B_4_X_4090_SERVICE_HOST}:10075/predict", json=request_data.dict())
|
||||
logger.info(f"seg_anything response @@@@@@:{json.dumps(json.loads(data.content), indent=4)}")
|
||||
return ResponseModel(data=json.loads(data.content))
|
||||
except Exception as e:
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
import httpx
|
||||
import requests
|
||||
from fastapi import APIRouter, BackgroundTasks, HTTPException
|
||||
|
||||
from app.schemas.generate_image import GenerateImageModel, GenerateProductImageModel, GenerateSingleLogoImageModel, GenerateRelightImageModel, GenerateMultiViewModel, BatchGenerateProductImageModel, BatchGenerateRelightImageModel, AgentTollGenerateImageModel
|
||||
from app.core.config import settings
|
||||
from app.schemas.generate_image import GenerateImageModel, GenerateProductImageModel, GenerateSingleLogoImageModel, GenerateRelightImageModel, GenerateMultiViewModel, BatchGenerateProductImageModel, BatchGenerateRelightImageModel, AgentTollGenerateImageModel, Flux2ToProductImgModel, GenerateSloganImageModel, GenerateImageFlux2KleinModel
|
||||
from app.schemas.pose_transform import BatchPoseTransformModel
|
||||
from app.schemas.response_template import ResponseModel
|
||||
from app.service.generate_batch_image.service import start_product_batch_generate, start_relight_batch_generate, start_pose_transform_batch_generate
|
||||
@@ -20,6 +23,61 @@ logger = logging.getLogger()
|
||||
'''generate image'''
|
||||
|
||||
|
||||
# flux2 klein
|
||||
@router.post("/generate_image_flux2_klein")
|
||||
async def generate_image_flux2_klein(request_item: GenerateImageFlux2KleinModel):
|
||||
"""
|
||||
创建一个具有以下参数的请求体:
|
||||
- **bucket_name**: OSS桶名 (必填)
|
||||
- **object_name**: OSS对象名(文件路径)(必填)
|
||||
|
||||
- **width**: 图片宽度,默认1024像素 (非必填,1024)
|
||||
- **height**: 图片高度,默认1024像素 (非必填,默认1024)
|
||||
- **prompt**: 文本提示词,用于模型推理等场景 (非必填,默认"")
|
||||
- **steps**: 推理步数,控制模型生成过程的迭代次数 (非必填,默认4)
|
||||
- **guidance**: 引导系数,调节提示词对生成结果的影响程度 (非必填,默认 4.0 )
|
||||
|
||||
### 示例参数:
|
||||
```
|
||||
{
|
||||
"bucket_name": "aida-users",
|
||||
"object_name": "89/moodboard/5fdc698c-cb9b-4b36-afa9ce4-1-89.png",
|
||||
"prompt": "a single item of sketch of dress, 4k, white background"
|
||||
}
|
||||
```
|
||||
### 输出示例:
|
||||
```
|
||||
{
|
||||
"code": 200,
|
||||
"msg": "OK!",
|
||||
"data": {
|
||||
"output_path": "aida-users/89/moodboard/5fdc698c-cb9b-4b36-afa9ce4-1-89.png"
|
||||
}
|
||||
}
|
||||
```
|
||||
"""
|
||||
try:
|
||||
logger.info(f"generate_image_flux2_gen_img request: {json.dumps(request_item.model_dump(), indent=4)}")
|
||||
async with httpx.AsyncClient(timeout=120) as client:
|
||||
resp = await client.post(
|
||||
f"http://{settings.FLUX2_GEN_IMG_MODEL_URL}/predict",
|
||||
json=request_item.model_dump(),
|
||||
)
|
||||
if resp.status_code == 200:
|
||||
result = resp.json()
|
||||
logger.info(f"flux2_gen_img response: {json.dumps(result, indent=4)}")
|
||||
return ResponseModel(data=result)
|
||||
else:
|
||||
error = resp.json()
|
||||
logger.info(f"flux2_gen_img response: {json.dumps(error, indent=4)}")
|
||||
return ResponseModel(data=error, msg="ERROR!", code=500)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"generate_image_flux2_gen_img Run Exception @@@@@@:{e}")
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
|
||||
|
||||
# sdxl
|
||||
@router.post("/generate_image")
|
||||
def generate_image(request_item: GenerateImageModel, background_tasks: BackgroundTasks):
|
||||
"""
|
||||
@@ -154,6 +212,62 @@ def generate_single_logo_image(tasks_id: str):
|
||||
return ResponseModel(data=data['data'])
|
||||
|
||||
|
||||
"""slogan """
|
||||
|
||||
|
||||
@router.post("/generate_slogan")
|
||||
async def generate_slogan(request_data: GenerateSloganImageModel):
|
||||
"""
|
||||
### 请求体示例:
|
||||
```json
|
||||
{
|
||||
"num_point": 16,
|
||||
"image_url": "aida-slogan/6886785f-0aac-4052-b6fd-7ae20a841d8d.png",
|
||||
"prompt": "123",
|
||||
"tasks_id": "string-89"
|
||||
}
|
||||
```
|
||||
"""
|
||||
try:
|
||||
logger.info(f"generate_slogan request item is : @@@@@@:{json.dumps(request_data.dict(), indent=4)}")
|
||||
data = requests.post(f"http://{settings.A6000_SERVICE_HOST}:10020/api/slogan", json=request_data.dict())
|
||||
logger.info(f"generate_slogan response @@@@@@:{json.dumps(json.loads(data.content), indent=4)}")
|
||||
return ResponseModel(data=json.loads(data.content))
|
||||
except Exception as e:
|
||||
logger.warning(f"generate_slogan Run Exception @@@@@@:{e}")
|
||||
|
||||
|
||||
"""product image flux2.0"""
|
||||
|
||||
# @router.post("/img_to_product")
|
||||
# async def img_to_product(request_data: Flux2ToProductImgModel):
|
||||
# """
|
||||
# 创建一个具有以下参数的请求体:
|
||||
# - **tasks_id**: 任务id 用于取消生成任务和获取生成结果
|
||||
# - **prompt**: 想要生成图片的描述词
|
||||
# - **image_path**: 被生成图片的S3或minio url地址
|
||||
# - **infer_step**: 推理步数
|
||||
#
|
||||
# ### 请求体示例:
|
||||
# ```json
|
||||
# point
|
||||
# {
|
||||
# "prompt": "Create realistic studio photo with real people model standing and wearing this garment, in white studio, Keep original model if present, or generate appropriate model, Standing pose, facing camera.",
|
||||
# "image_path":"aida-results/result_38151e0a-f83b-11f0-89f6-0242ac130002.png",
|
||||
# "infer_step":4,
|
||||
# "tasks_id":"123456-123"
|
||||
# }
|
||||
# ```
|
||||
# """
|
||||
# try:
|
||||
# logger.info(f"img_to_product request item is : @@@@@@:{json.dumps(request_data.dict(), indent=4)}")
|
||||
# data = requests.post(f"http://{settings.A6000_SERVICE_HOST}:10090/api/v1/to_product", json=request_data.dict())
|
||||
# logger.info(f"img_to_product response @@@@@@:{json.dumps(json.loads(data.content), indent=4)}")
|
||||
# return ResponseModel(data=json.loads(data.content))
|
||||
# except Exception as e:
|
||||
# logger.warning(f"img_to_product Run Exception @@@@@@:{e}")
|
||||
|
||||
|
||||
'''product image'''
|
||||
|
||||
|
||||
@@ -178,7 +292,7 @@ def generate_product_image(request_item: GenerateProductImageModel, background_t
|
||||
}
|
||||
"""
|
||||
try:
|
||||
logger.info(f"generate_product_image request item is : @@@@@@:{json.dumps(request_item.dict(),indent=4)}")
|
||||
logger.info(f"generate_product_image request item is : @@@@@@:{json.dumps(request_item.dict(), indent=4)}")
|
||||
service = GenerateProductImage(request_item)
|
||||
background_tasks.add_task(service.get_result)
|
||||
except Exception as e:
|
||||
|
||||
@@ -1,235 +0,0 @@
|
||||
import os
|
||||
|
||||
import pika
|
||||
from dotenv import load_dotenv
|
||||
from pydantic import BaseSettings
|
||||
|
||||
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))
|
||||
load_dotenv(os.path.join(BASE_DIR, '.env'))
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
PROJECT_NAME: str = 'FASTAPI BASE'
|
||||
SECRET_KEY: str = ''
|
||||
API_PREFIX: str = ''
|
||||
BACKEND_CORS_ORIGINS: list[str] = ['*']
|
||||
DATABASE_URL: str = ''
|
||||
ACCESS_TOKEN_EXPIRE_SECONDS: int = 60 * 60 * 24 * 7 # Token expired after 7 days
|
||||
SECURITY_ALGORITHM: str = 'HS256'
|
||||
LOGGING_CONFIG_FILE: str = os.path.join(BASE_DIR, 'logging_env.py')
|
||||
|
||||
|
||||
OSS = "minio"
|
||||
DEBUG = False
|
||||
if DEBUG:
|
||||
LOGS_PATH = "logs/"
|
||||
CATEGORY_PATH = "service/attribute/config/descriptor/category/category_dis.csv"
|
||||
SEG_CACHE_PATH = "../seg_cache/"
|
||||
POSE_TRANSFORM_VIDEO_PATH = "../pose_transform_video/"
|
||||
RECOMMEND_PATH_PREFIX = "service/recommend/"
|
||||
CHROMADB_PATH = "./chromadb/"
|
||||
else:
|
||||
LOGS_PATH = "app/logs/"
|
||||
CATEGORY_PATH = "app/service/attribute/config/descriptor/category/category_dis.csv"
|
||||
SEG_CACHE_PATH = "/seg_cache/"
|
||||
POSE_TRANSFORM_VIDEO_PATH = "/pose_transform_video/"
|
||||
RECOMMEND_PATH_PREFIX = "app/service/recommend/"
|
||||
CHROMADB_PATH = "/chromadb/"
|
||||
|
||||
# RABBITMQ_ENV = "" # 生产环境
|
||||
RABBITMQ_ENV = os.getenv("RABBITMQ_ENV", "-dev")
|
||||
# RABBITMQ_ENV = "-local" # 本地测试环境
|
||||
|
||||
if RABBITMQ_ENV == "-dev":
|
||||
JAVA_STREAM_API_URL = f"https://develop.api.aida.com.hk/api/third/party/receiveDesignResults"
|
||||
elif RABBITMQ_ENV == "-prod":
|
||||
JAVA_STREAM_API_URL = f"https://api.aida.com.hk/api/third/party/receiveDesignResults"
|
||||
|
||||
settings = Settings()
|
||||
|
||||
# minio 配置
|
||||
MINIO_URL = "www.minio-api.aida.com.hk"
|
||||
MINIO_ACCESS = 'vXKFLSJkYeEq2DrSZvkB'
|
||||
MINIO_SECRET = 'uKTZT3x7C43WvPN9QTc99DiRkwddWZrG9Uh3JVlR'
|
||||
MINIO_SECURE = True
|
||||
|
||||
# S3 配置
|
||||
S3_ACCESS_KEY = "AKIAVD3OJIMF6UJFLSHZ"
|
||||
S3_AWS_SECRET_ACCESS_KEY = "LNIwFFB27/QedtZ+Q/viVUoX9F5x1DbuM8N0DkD8"
|
||||
S3_REGION_NAME = "ap-east-1"
|
||||
|
||||
# redis 配置
|
||||
REDIS_HOST = "10.1.1.240"
|
||||
REDIS_PORT = "6379"
|
||||
REDIS_DB = "2"
|
||||
|
||||
# rabbitmq config
|
||||
RABBITMQ_PARAMS = {
|
||||
"host": "18.167.251.121",
|
||||
"port": 5672,
|
||||
"credentials": pika.credentials.PlainCredentials(username='rabbit', password='123456'),
|
||||
"virtual_host": "/"
|
||||
}
|
||||
|
||||
# milvus 配置
|
||||
MILVUS_URL = "http://10.1.1.240:19530"
|
||||
MILVUS_TOKEN = "root:Milvus"
|
||||
MILVUS_ALIAS = "default"
|
||||
MILVUS_TABLE_KEYPOINT = "keypoint_cache_2"
|
||||
MILVUS_TABLE_SEG = "seg_cache"
|
||||
|
||||
# Mysql 配置
|
||||
DB_HOST = '18.167.251.121' # 数据库主机地址
|
||||
# DB_PORT = int( 33006)
|
||||
DB_PORT = 33008 # 数据库端口
|
||||
DB_USERNAME = 'aida_con_python' # 数据库用户名
|
||||
DB_PASSWORD = '123456' # 数据库密码
|
||||
DB_NAME = 'aida' # 数据库库名
|
||||
|
||||
# openai
|
||||
os.environ['SERPAPI_API_KEY'] = "a793513017b0718db7966207c31703d280d12435c982f1e67bbcbffa52e7632c"
|
||||
OPENAI_STREAM = True
|
||||
BUFFER_THRESHOLD = 6 # must be even number
|
||||
SINGLE_TOKEN_THRESHOLD = 200
|
||||
TOKEN_THRESHOLD = 600
|
||||
OPENAI_TEMPERATURE = 0
|
||||
|
||||
# OPENAI_API_KEY = "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"
|
||||
OPENAI_API_KEY = "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"
|
||||
OPENAI_MODEL = "gpt-3.5-turbo-0613"
|
||||
OPENAI_MODEL_LIST = {"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k-0613", }
|
||||
|
||||
# SR service config
|
||||
SR_MODEL_NAME = "super_resolution"
|
||||
SR_TRITON_URL = "10.1.1.240:10031"
|
||||
SR_MINIO_BUCKET = "aida-users"
|
||||
SR_RABBITMQ_QUEUES = f"SuperResolution{RABBITMQ_ENV}"
|
||||
|
||||
# GenerateImage service config
|
||||
FAST_GI_MODEL_URL = '10.1.1.243:10011'
|
||||
FAST_GI_MODEL_NAME = 'stable_diffusion_xl'
|
||||
|
||||
GI_MODEL_URL = '10.1.1.240:10061'
|
||||
GI_MODEL_NAME = 'flux'
|
||||
|
||||
GMV_MODEL_URL = '10.1.1.243:10081'
|
||||
GMV_MODEL_NAME = 'multi_view'
|
||||
|
||||
GMV_RABBITMQ_QUEUES = f"GenerateMultiView{RABBITMQ_ENV}"
|
||||
|
||||
GI_MINIO_BUCKET = "aida-users"
|
||||
GI_RABBITMQ_QUEUES = f"GenerateImage{RABBITMQ_ENV}"
|
||||
GI_SYS_IMAGE_URL = "aida-sys-image/generate_image/white_image.jpg"
|
||||
|
||||
# SLOGAN service config
|
||||
SLOGAN_RABBITMQ_QUEUES = f"Slogan{RABBITMQ_ENV}"
|
||||
|
||||
# Generate Single Logo service config
|
||||
GSL_MODEL_URL = '10.1.1.243:10041'
|
||||
GSL_MINIO_BUCKET = "aida-users"
|
||||
GSL_MODEL_NAME = 'stable_diffusion_xl_transparent'
|
||||
GEN_SINGLE_LOGO_RABBITMQ_QUEUES = f"GenSingleLogo{RABBITMQ_ENV}"
|
||||
|
||||
# Generate Product service config
|
||||
# GPI_RABBITMQ_QUEUES = os.getenv("GEN_PRODUCT_IMAGE_RABBITMQ_QUEUES", f"ToProductImage{RABBITMQ_ENV}")
|
||||
# GPI_MODEL_NAME_OVERALL = 'sdxl_ensemble_all'
|
||||
# GPI_MODEL_URL = '10.1.1.243:10051'
|
||||
|
||||
# Generate Product service config 旧版product img 模型
|
||||
GPI_RABBITMQ_QUEUES = f"ToProductImage{RABBITMQ_ENV}"
|
||||
BATCH_GPI_RABBITMQ_QUEUES = f"BatchToProductImage{RABBITMQ_ENV}"
|
||||
GPI_MODEL_NAME_OVERALL = 'diffusion_ensemble_all'
|
||||
GPI_MODEL_NAME_SINGLE = 'stable_diffusion_1_5_cnet'
|
||||
GPI_MODEL_URL = '10.1.1.243:10051'
|
||||
|
||||
# Generate Single Logo service config
|
||||
GRI_RABBITMQ_QUEUES = f"Relight{RABBITMQ_ENV}"
|
||||
BATCH_GRI_RABBITMQ_QUEUES = f"BatchRelight{RABBITMQ_ENV}"
|
||||
GRI_MODEL_NAME_OVERALL = 'diffusion_relight_ensemble'
|
||||
GRI_MODEL_NAME_SINGLE = 'stable_diffusion_1_5_relight'
|
||||
GRI_MODEL_URL = '10.1.1.240:10051'
|
||||
|
||||
# Pose Transform service config
|
||||
|
||||
PS_RABBITMQ_QUEUES = f"PoseTransform{RABBITMQ_ENV}"
|
||||
BATCH_PS_RABBITMQ_QUEUES = f"BatchPoseTransform{RABBITMQ_ENV}"
|
||||
PT_MODEL_URL = '10.1.1.243:10061'
|
||||
|
||||
# SEG service config
|
||||
SEGMENTATION = {
|
||||
"new_model_name": "seg_knet",
|
||||
"name": "seg_ocrnet_hr18",
|
||||
"input": "seg_input__0",
|
||||
"output": "seg_output__0",
|
||||
}
|
||||
# ollama config
|
||||
OLLAMA_URL = "http://10.1.1.240:11434/api/embeddings"
|
||||
|
||||
# design batch
|
||||
BATCH_DESIGN_RABBITMQ_QUEUES = f"DesignBatch{RABBITMQ_ENV}"
|
||||
|
||||
# DESIGN config
|
||||
DESIGN_MODEL_URL = '10.1.1.240:10000'
|
||||
AIDA_CLOTHING = "aida-clothing"
|
||||
KEYPOINT_RESULT_TABLE_FIELD_SET = ('neckline_left', 'neckline_right', 'shoulder_left', 'shoulder_right', 'armpit_left', 'armpit_right',
|
||||
'cuff_left_in', 'cuff_left_out', 'cuff_right_in', 'cuff_right_out', 'waistband_left', 'waistband_right')
|
||||
|
||||
# DESIGN 预处理
|
||||
IF_DEBUG_SHOW = False
|
||||
|
||||
# 优先级
|
||||
PRIORITY_DICT = {
|
||||
'earring_front': 99,
|
||||
'bag_front': 98,
|
||||
'hairstyle_front': 97,
|
||||
'outwear_front': 20,
|
||||
'tops_front': 19,
|
||||
'dress_front': 18,
|
||||
'blouse_front': 17,
|
||||
'skirt_front': 16,
|
||||
'trousers_front': 15,
|
||||
'bottoms_front': 14,
|
||||
'shoes_right': 1,
|
||||
'shoes_left': 1,
|
||||
'body': 0,
|
||||
'bottoms_back': -14,
|
||||
'trousers_back': -15,
|
||||
'skirt_back': -16,
|
||||
'blouse_back': -17,
|
||||
'dress_back': -18,
|
||||
'tops_back': -19,
|
||||
'outwear_back': -20,
|
||||
'hairstyle_back': -97,
|
||||
'bag_back': -98,
|
||||
'earring_back': -99,
|
||||
}
|
||||
|
||||
QWEN_API_KEY = "sk-f31c29e61ac2498ba5e307aaa6dc10e0"
|
||||
|
||||
DB_CONFIG = {
|
||||
"host": "18.167.251.121",
|
||||
"port": 3306,
|
||||
"user": "root",
|
||||
"password": "QWa998345",
|
||||
"database": "aida",
|
||||
"charset": "utf8mb4"
|
||||
}
|
||||
|
||||
TABLE_CATEGORIES = {
|
||||
"female_dress": "female/dress",
|
||||
"female_outwear": "female/outwear",
|
||||
"female_trousers": "female/trousers",
|
||||
"female_skirt": "female/skirt",
|
||||
"female_blouse": "female/blouse",
|
||||
"male_tops": "male/tops",
|
||||
"male_bottoms": "male/bottoms",
|
||||
"male_outwear": "male/outwear"
|
||||
}
|
||||
|
||||
# --- ComfyUI 配置信息 ---
|
||||
COMFYUI_SERVER_ADDRESS = "10.1.2.227:8080" # 替换为您的 ComfyUI 服务器地址
|
||||
@@ -36,7 +36,7 @@ class Settings(BaseSettings):
|
||||
|
||||
# --- mysql 配置信息 ---
|
||||
MYSQL_HOST: str = Field(default='', description="")
|
||||
MYSQL_PORT: int = Field(default='', description="")
|
||||
MYSQL_PORT: int = Field(default=3306, description="")
|
||||
MYSQL_USER: str = Field(default='', description="")
|
||||
MYSQL_PASSWORD: str = Field(default='', description="")
|
||||
MYSQL_DB: str = Field(default='', description="")
|
||||
@@ -64,11 +64,19 @@ class Settings(BaseSettings):
|
||||
# --- Design Callback Java 接口 ---
|
||||
JAVA_STREAM_API_URL: str = Field(default='', description="")
|
||||
|
||||
# --- flux2 klein model url ---
|
||||
FLUX2_GEN_IMG_MODEL_URL: str = Field(default='', description="")
|
||||
|
||||
# --- 服务器IP ---
|
||||
A6000_SERVICE_HOST: str = Field(default='', description="")
|
||||
B_4_X_4090_SERVICE_HOST: str = Field(default='', description="")
|
||||
|
||||
# --- 其他配置信息 以下均为Docker容器内配置---
|
||||
LOGS_PATH: str = Field(default="/logs/", description="")
|
||||
CATEGORY_PATH: str = Field(default="/app/service/attribute/config/descriptor/category/category_dis.csv", description="")
|
||||
SEG_CACHE_PATH: str = Field(default="/seg_cache/", description="")
|
||||
RECOMMEND_PATH_PREFIX: str = Field(default="/app/service/recommend/", description="")
|
||||
SERVE_PORT: int = Field(default=2010, description="")
|
||||
|
||||
|
||||
settings = Settings()
|
||||
@@ -117,39 +125,41 @@ KEYPOINT_RESULT_TABLE_FIELD_SET = ('neckline_left', 'neckline_right', 'shoulder_
|
||||
MILVUS_TABLE_KEYPOINT = "keypoint_cache_2"
|
||||
|
||||
# ollama 地址
|
||||
OLLAMA_URL = "http://10.1.1.240:11434/api/embeddings"
|
||||
OLLAMA_URL = f"http://{settings.A6000_SERVICE_HOST}:11434/api/embeddings"
|
||||
|
||||
"""Triton Server Config"""
|
||||
# Design
|
||||
DESIGN_MODEL_URL = '10.1.1.240:10000'
|
||||
DESIGN_MODEL_URL = f'{settings.A6000_SERVICE_HOST}:10000'
|
||||
DESIGN_MODEL_NAME = 'seg_knet'
|
||||
# Seg Product
|
||||
SEG_PRODUCT_MODEL_URL = f'{settings.B_4_X_4090_SERVICE_HOST}:30000'
|
||||
# Generate Image
|
||||
GI_MODEL_URL = '10.1.1.240:10061'
|
||||
GI_MODEL_URL = f'{settings.A6000_SERVICE_HOST}:10061'
|
||||
GI_MODEL_NAME = 'flux'
|
||||
# Generate Single Logo
|
||||
GSL_MODEL_URL = '10.1.1.243:10041'
|
||||
GSL_MODEL_URL = f'{settings.B_4_X_4090_SERVICE_HOST}:10041'
|
||||
GSL_MODEL_NAME = 'stable_diffusion_xl_transparent'
|
||||
# Generate Product (整套和单品)
|
||||
GPI_MODEL_URL = '10.1.1.243:10051'
|
||||
GPI_MODEL_URL = f'{settings.B_4_X_4090_SERVICE_HOST}:10051'
|
||||
GPI_MODEL_NAME_OVERALL = 'diffusion_ensemble_all'
|
||||
GPI_MODEL_NAME_SINGLE = 'stable_diffusion_1_5_cnet'
|
||||
|
||||
# 以下停用中...*************
|
||||
# 多视角生成
|
||||
GMV_MODEL_URL = '10.1.1.243:10081'
|
||||
GMV_MODEL_URL = f'{settings.B_4_X_4090_SERVICE_HOST}:10081'
|
||||
GMV_MODEL_NAME = 'multi_view'
|
||||
# 超分
|
||||
SR_MODEL_NAME = "super_resolution"
|
||||
SR_TRITON_URL = "10.1.1.240:10031"
|
||||
SR_TRITON_URL = f"{settings.A6000_SERVICE_HOST}:10031"
|
||||
# 打光
|
||||
GRI_MODEL_URL = '10.1.1.240:10051'
|
||||
GRI_MODEL_URL = f'{settings.A6000_SERVICE_HOST}:10051'
|
||||
GRI_MODEL_NAME_OVERALL = 'diffusion_relight_ensemble'
|
||||
GRI_MODEL_NAME_SINGLE = 'stable_diffusion_1_5_relight'
|
||||
# agent 图片生成
|
||||
FAST_GI_MODEL_URL = '10.1.1.243:10011'
|
||||
FAST_GI_MODEL_URL = f'{settings.B_4_X_4090_SERVICE_HOST}:10011'
|
||||
FAST_GI_MODEL_NAME = 'stable_diffusion_xl'
|
||||
# 图转视频 triton版
|
||||
PT_MODEL_URL = '10.1.1.243:10061'
|
||||
PT_MODEL_URL = f'{settings.B_4_X_4090_SERVICE_HOST}:10061'
|
||||
|
||||
# *************
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ from fastapi.responses import JSONResponse
|
||||
|
||||
from app.api.api_route import router
|
||||
from app.core.config import settings
|
||||
from app.core.record_api_count import count_api_calls
|
||||
# from app.core.record_api_count import count_api_calls
|
||||
from app.schemas.response_template import ResponseModel
|
||||
from logging_env import LOGGER_CONFIG_DICT
|
||||
from dotenv import load_dotenv
|
||||
@@ -48,7 +48,7 @@ def get_application() -> FastAPI:
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
application.middleware("http")(count_api_calls)
|
||||
# application.middleware("http")(count_api_calls)
|
||||
application.include_router(router=router)
|
||||
return application
|
||||
|
||||
|
||||
@@ -4,12 +4,13 @@ from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class SAMRequestModel(BaseModel):
|
||||
user_id: int = Field(..., description="用户id, 必填字段")
|
||||
bucket: str = Field(..., description="minio bucket name ")
|
||||
object_name: str = Field(..., description="minio object name ")
|
||||
image_path: str = Field(..., description="图片路径,必填字段")
|
||||
type: str = Field(..., description="推理类型,必填字段")
|
||||
points: Optional[List[List[float]]] = None
|
||||
labels: Optional[List[int]] = None
|
||||
box: Optional[List[int]] = None
|
||||
points: Optional[List[List[float]]] | None = None
|
||||
labels: Optional[List[int]] | None = None
|
||||
box: Optional[List[int]] | None = None
|
||||
|
||||
|
||||
class DesignModel(BaseModel):
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import List
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class GenerateMultiViewModel(BaseModel):
|
||||
@@ -8,6 +8,17 @@ class GenerateMultiViewModel(BaseModel):
|
||||
image_url: str
|
||||
|
||||
|
||||
class GenerateImageFlux2KleinModel(BaseModel):
|
||||
bucket_name: str = Field(..., description="OSS桶名,不传则为None")
|
||||
object_name: str = Field(..., description="OSS对象名(文件路径),不传则为None")
|
||||
# input_image_paths: Optional[List[str]] = Field(default=[], description="输入图片路径列表")
|
||||
width: Optional[int] = Field(default=1024, description="图片宽度,默认512像素")
|
||||
height: Optional[int] = Field(default=1024, description="图片高度,默认512像素")
|
||||
prompt: Optional[str] = Field(default="", description="文本提示词,用于模型推理等场景")
|
||||
steps: Optional[int] = Field(default=4, description="推理步数,控制模型生成过程的迭代次数")
|
||||
guidance: Optional[float] = Field(default=4.0, description="引导系数,调节提示词对生成结果的影响程度")
|
||||
|
||||
|
||||
class GenerateImageModel(BaseModel):
|
||||
tasks_id: str
|
||||
prompt: str
|
||||
@@ -24,6 +35,13 @@ class GenerateSingleLogoImageModel(BaseModel):
|
||||
seed: str
|
||||
|
||||
|
||||
class GenerateSloganImageModel(BaseModel):
|
||||
num_point: int
|
||||
tasks_id: str
|
||||
prompt: str
|
||||
image_url: str
|
||||
|
||||
|
||||
class GenerateProductImageModel(BaseModel):
|
||||
tasks_id: str
|
||||
prompt: str
|
||||
@@ -32,6 +50,13 @@ class GenerateProductImageModel(BaseModel):
|
||||
product_type: str
|
||||
|
||||
|
||||
class Flux2ToProductImgModel(BaseModel):
|
||||
tasks_id: str
|
||||
prompt: str
|
||||
image_path: str
|
||||
infer_step: int | None = None
|
||||
|
||||
|
||||
class GenerateRelightImageModel(BaseModel):
|
||||
tasks_id: str
|
||||
prompt: str
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
from pprint import pprint
|
||||
|
||||
import cv2
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import torch
|
||||
@@ -12,6 +11,7 @@ from minio import Minio
|
||||
|
||||
from app.core.config import settings, DESIGN_MODEL_URL
|
||||
from app.schemas.attribute_retrieve import AttributeRecognitionModel
|
||||
from app.service.utils.image_normalize import my_imnormalize
|
||||
from app.service.utils.new_oss_client import oss_get_image
|
||||
|
||||
minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE)
|
||||
@@ -109,10 +109,9 @@ class AttributeRecognition:
|
||||
|
||||
@staticmethod
|
||||
def preprocess(img):
|
||||
img = mmcv.imread(img)
|
||||
img_scale = (224, 224)
|
||||
img = cv2.resize(img, img_scale)
|
||||
img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
|
||||
return preprocessed_img
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
from minio import Minio
|
||||
from skimage import transform
|
||||
import cv2
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import tritonclient.http as httpclient
|
||||
@@ -18,6 +17,7 @@ import torch
|
||||
|
||||
from app.core.config import settings, DESIGN_MODEL_URL
|
||||
from app.schemas.attribute_retrieve import CategoryRecognitionModel
|
||||
from app.service.utils.image_normalize import my_imnormalize
|
||||
from app.service.utils.new_oss_client import oss_get_image
|
||||
|
||||
minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE)
|
||||
@@ -39,11 +39,10 @@ class CategoryRecognition:
|
||||
|
||||
@staticmethod
|
||||
def preprocess(img):
|
||||
img = mmcv.imread(img)
|
||||
# ori_shape = img.shape[:2]
|
||||
img_scale = (224, 224)
|
||||
img = cv2.resize(img, img_scale)
|
||||
img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
|
||||
return preprocessed_img
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import logging
|
||||
|
||||
import cv2
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import torch
|
||||
@@ -9,11 +8,12 @@ import torch.nn.functional as F
|
||||
import tritonclient.http as httpclient
|
||||
from minio import Minio
|
||||
|
||||
from app.core.config import DESIGN_MODEL_URL
|
||||
from app.core.config import DESIGN_MODEL_URL, SEG_PRODUCT_MODEL_URL
|
||||
from app.core.config import settings
|
||||
from app.schemas.brand_dna import BrandDnaModel
|
||||
from app.service.attribute.config import const
|
||||
from app.service.utils.generate_uuid import generate_uuid
|
||||
from app.service.utils.image_normalize import my_imnormalize
|
||||
from app.service.utils.new_oss_client import oss_upload_image, oss_get_image
|
||||
|
||||
minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE)
|
||||
@@ -29,7 +29,7 @@ class BrandDna:
|
||||
self.attr_type = pd.read_csv(settings.CATEGORY_PATH)
|
||||
# self.attr_type = pd.read_csv(r"E:\workspace\trinity_client_aida\app\service\attribute\config\descriptor\category\category_dis.csv")
|
||||
self.att_client = httpclient.InferenceServerClient(url=DESIGN_MODEL_URL)
|
||||
self.seg_client = httpclient.InferenceServerClient(url='10.1.1.243:30000')
|
||||
self.seg_client = httpclient.InferenceServerClient(url=SEG_PRODUCT_MODEL_URL)
|
||||
self.const = const
|
||||
# self.const = local_debug_const
|
||||
|
||||
@@ -202,7 +202,7 @@ class BrandDna:
|
||||
# 服装分割预处理
|
||||
@staticmethod
|
||||
def seg_product_preprocess(image):
|
||||
img = mmcv.imread(image)
|
||||
img = image
|
||||
ori_shape = img.shape[:2]
|
||||
img_scale_w, img_scale_h = ori_shape
|
||||
if ori_shape[0] > 1024:
|
||||
@@ -211,9 +211,9 @@ class BrandDna:
|
||||
img_scale_h = 1024
|
||||
# 如果图片size任意一边 大于 1024, 则会resize 成1024
|
||||
if ori_shape != (img_scale_w, img_scale_h):
|
||||
# mmcv.imresize(img, img_scale_h, img_scale_w) # 老代码 引以为戒!哈哈哈~ h和w写反了
|
||||
# my_imnormalize(img, img_scale_h, img_scale_w) # 老代码 引以为戒!哈哈哈~ h和w写反了
|
||||
img = cv2.resize(img, (img_scale_h, img_scale_w))
|
||||
img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
|
||||
return preprocessed_img, ori_shape
|
||||
|
||||
@@ -227,11 +227,10 @@ class BrandDna:
|
||||
# 类别检测模型预处理
|
||||
@staticmethod
|
||||
def category_preprocess(img):
|
||||
img = mmcv.imread(img)
|
||||
# ori_shape = img.shape[:2]
|
||||
img_scale = (224, 224)
|
||||
img = cv2.resize(img, img_scale)
|
||||
img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
|
||||
return preprocessed_img
|
||||
|
||||
|
||||
@@ -1,19 +1,10 @@
|
||||
import logging
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import tritonclient.grpc as grpcclient
|
||||
import uuid
|
||||
import httpx
|
||||
from langchain_classic.output_parsers import ResponseSchema, StructuredOutputParser
|
||||
from langchain_community.chat_models import ChatTongyi
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
from minio import Minio
|
||||
from tritonclient.utils import np_to_triton_dtype
|
||||
|
||||
from app.core.config import GI_MODEL_URL, GI_MODEL_NAME
|
||||
from app.schemas.brand_dna import GenerateBrandModel
|
||||
from app.service.utils.generate_uuid import generate_uuid
|
||||
from app.service.utils.new_oss_client import oss_upload_image
|
||||
|
||||
from app.core.config import settings
|
||||
|
||||
|
||||
@@ -26,14 +17,9 @@ class GenerateBrandInfo:
|
||||
# user info init
|
||||
self.user_id = request_data.user_id
|
||||
self.category = "brand_logo"
|
||||
# generate logo init
|
||||
self.grpc_client = grpcclient.InferenceServerClient(url=GI_MODEL_URL)
|
||||
self.image = np.random.randint(0, 256, (1024, 1024, 3), dtype=np.uint8)
|
||||
self.batch_size = 1
|
||||
self.mode = 'txt2img'
|
||||
|
||||
# llm generate brand info init
|
||||
self.model = ChatTongyi(model="qwen2.5-14b-instruct", api_key="sk-7658298c6b99443c98184a5e634fe6ab")
|
||||
self.model = ChatTongyi(model="qwen2.5-14b-instruct", api_key=settings.QWEN_API_KEY)
|
||||
|
||||
self.response_schemas = [
|
||||
ResponseSchema(name="brand_name", description="Brand name."),
|
||||
@@ -63,38 +49,20 @@ class GenerateBrandInfo:
|
||||
self.generate_logo_prompt = brand_data['brand_logo_prompt']
|
||||
|
||||
def generate_brand_logo(self):
|
||||
prompts = [self.generate_logo_prompt] * self.batch_size
|
||||
modes = [self.mode] * self.batch_size
|
||||
images = [self.image.astype(np.float16)] * self.batch_size
|
||||
|
||||
text_obj = np.array(prompts, dtype="object").reshape((-1, 1))
|
||||
mode_obj = np.array(modes, dtype="object").reshape((-1, 1))
|
||||
image_obj = np.array(images, dtype=np.float16).reshape((-1, 1024, 1024, 3))
|
||||
|
||||
input_text = grpcclient.InferInput("prompt", text_obj.shape, np_to_triton_dtype(text_obj.dtype))
|
||||
input_image = grpcclient.InferInput("input_image", image_obj.shape, np_to_triton_dtype(image_obj.dtype))
|
||||
input_mode = grpcclient.InferInput("mode", mode_obj.shape, np_to_triton_dtype(mode_obj.dtype))
|
||||
|
||||
input_text.set_data_from_numpy(text_obj)
|
||||
input_image.set_data_from_numpy(image_obj)
|
||||
input_mode.set_data_from_numpy(mode_obj)
|
||||
|
||||
inputs = [input_text, input_image, input_mode]
|
||||
result = self.grpc_client.infer(model_name=GI_MODEL_NAME, inputs=inputs)
|
||||
image = result.as_numpy("generated_image")
|
||||
image_result = cv2.cvtColor(np.squeeze(image.astype(np.uint8)), cv2.COLOR_RGB2BGR)
|
||||
logo_url = self.upload_logo_image(image_result, generate_uuid())
|
||||
self.result_data['brand_logo'] = logo_url
|
||||
|
||||
def upload_logo_image(self, image, object_name):
|
||||
try:
|
||||
_, img_byte_array = cv2.imencode('.jpg', image)
|
||||
object_name = f'{self.user_id}/{self.category}/{object_name}.jpg'
|
||||
oss_upload_image(oss_client=self.minio_client, bucket="aida-users", object_name=object_name, image_bytes=img_byte_array)
|
||||
image_url = f"aida-users/{object_name}"
|
||||
return image_url
|
||||
except Exception as e:
|
||||
logging.warning(f"upload_png_mask runtime exception : {e}")
|
||||
request_item = {
|
||||
"bucket_name": "aida-users",
|
||||
"object_name": f'{self.user_id}/{self.category}/{uuid.uuid4().hex}.png',
|
||||
"prompt": self.generate_logo_prompt,
|
||||
"height": 1024,
|
||||
"width": 1024
|
||||
}
|
||||
with httpx.Client(timeout=120) as client:
|
||||
resp = client.post(
|
||||
f"http://{settings.FLUX2_GEN_IMG_MODEL_URL}/predict",
|
||||
json=request_item,
|
||||
)
|
||||
result = resp.json()
|
||||
self.result_data['brand_logo'] = result.get("output_path", "")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -23,7 +23,7 @@ class ClothingSeg:
|
||||
def __init__(self, request_data):
|
||||
self.image_data = request_data.image_data
|
||||
self.user_id = request_data.user_id
|
||||
self.triton_client = grpcclient.InferenceServerClient(url="10.1.1.243:10071")
|
||||
self.triton_client = grpcclient.InferenceServerClient(url=f"{settings.B_4_X_4090_SERVICE_HOST}:10071")
|
||||
|
||||
@RunTime
|
||||
def get_result(self):
|
||||
@@ -139,7 +139,7 @@ def get_bounding_box(mask):
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_data = ClothingSegModel(
|
||||
user_id=89,
|
||||
user_id="89",
|
||||
image_data=[
|
||||
# {
|
||||
# "image_url": "test/clothing_seg/dress.jpg",
|
||||
|
||||
@@ -13,7 +13,7 @@ from PIL import Image
|
||||
from minio import Minio, S3Error
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from app.core.config import settings
|
||||
from app.core.config import settings, PS_RABBITMQ_QUEUES
|
||||
from app.schemas.comfyui_i2v import ComfyuiPose2VModel
|
||||
from app.service.generate_image.utils.mq import publish_status
|
||||
|
||||
@@ -622,9 +622,9 @@ class ComfyUIServerPose2V:
|
||||
|
||||
# 推送消息
|
||||
if not settings.DEBUG:
|
||||
publish_status(json.dumps(self.pose_transform_data), settings.COMFYUI_SERVER_ADDRESS)
|
||||
publish_status(json.dumps(self.pose_transform_data), PS_RABBITMQ_QUEUES)
|
||||
logger.info(
|
||||
f" [x] Sent to: {settings.COMFYUI_SERVER_ADDRESS} data:@@@@ {json.dumps(self.pose_transform_data, indent=4)}")
|
||||
f" [x] Sent to: {PS_RABBITMQ_QUEUES} data:@@@@ {json.dumps(self.pose_transform_data, indent=4)}")
|
||||
|
||||
return "\n🎉 所有任务完成!"
|
||||
|
||||
|
||||
@@ -10,13 +10,13 @@
|
||||
import logging
|
||||
|
||||
import cv2
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
import tritonclient.http as httpclient
|
||||
|
||||
from app.core.config import DESIGN_MODEL_URL, DESIGN_MODEL_NAME
|
||||
from app.service.utils.image_normalize import my_imnormalize
|
||||
|
||||
"""
|
||||
keypoint
|
||||
@@ -25,13 +25,13 @@ from app.core.config import DESIGN_MODEL_URL, DESIGN_MODEL_NAME
|
||||
|
||||
|
||||
def keypoint_preprocess(img_path):
|
||||
img = mmcv.imread(img_path)
|
||||
img = img_path
|
||||
img_scale = (256, 256)
|
||||
h, w = img.shape[:2]
|
||||
img = cv2.resize(img, img_scale)
|
||||
w_scale = img_scale[0] / w
|
||||
h_scale = img_scale[1] / h
|
||||
img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
|
||||
return preprocessed_img, (w_scale, h_scale)
|
||||
|
||||
@@ -74,7 +74,7 @@ def keypoint_postprocess(output, scale_factor):
|
||||
|
||||
# KNet
|
||||
def seg_preprocess(img_path):
|
||||
img = mmcv.imread(img_path)
|
||||
img = img_path
|
||||
ori_shape = img.shape[:2]
|
||||
img_scale_w, img_scale_h = ori_shape
|
||||
if ori_shape[0] > 1024:
|
||||
@@ -83,9 +83,9 @@ def seg_preprocess(img_path):
|
||||
img_scale_h = 1024
|
||||
# 如果图片size任意一边 大于 1024, 则会resize 成1024
|
||||
if ori_shape != (img_scale_w, img_scale_h):
|
||||
# mmcv.imresize(img, img_scale_h, img_scale_w) # 老代码 引以为戒!哈哈哈~ h和w写反了
|
||||
# my_imnormalize(img, img_scale_h, img_scale_w) # 老代码 引以为戒!哈哈哈~ h和w写反了
|
||||
img = cv2.resize(img, (img_scale_h, img_scale_w))
|
||||
img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
|
||||
return preprocessed_img, ori_shape
|
||||
|
||||
|
||||
@@ -130,7 +130,7 @@ def design_generate(request_data):
|
||||
items_response['synthesis_url'] = synthesis(layers, new_size, basic)
|
||||
|
||||
else:
|
||||
item_result = process_item(object['items'][0], basic)
|
||||
item_result = process_item(object['items'][0], basic, design_type)
|
||||
items_response['layers'].append({
|
||||
'image_category': f"{item_result['name']}_front",
|
||||
'image_size': item_result['back_image'].size if item_result['back_image'] else None,
|
||||
@@ -184,6 +184,7 @@ def design_generate_v2(request_data):
|
||||
|
||||
def process_object(object, callback_url):
|
||||
basic = object['basic']
|
||||
design_type = basic.get('design_type', "default")
|
||||
items_response = {
|
||||
'layers': [],
|
||||
'objectSign': object['objectSign'] if 'objectSign' in object.keys() else "",
|
||||
@@ -192,7 +193,7 @@ def design_generate_v2(request_data):
|
||||
if basic['single_overall'] == "overall":
|
||||
item_results = []
|
||||
for item in object['items']:
|
||||
item_results.append(process_item(item, basic))
|
||||
item_results.append(process_item(item, basic, design_type))
|
||||
layers = []
|
||||
for item in item_results:
|
||||
process_layer(item, layers)
|
||||
@@ -217,7 +218,7 @@ def design_generate_v2(request_data):
|
||||
})
|
||||
items_response['synthesis_url'] = synthesis(layers, new_size, basic)
|
||||
else:
|
||||
item_result = process_item(object['items'][0], basic)
|
||||
item_result = process_item(object['items'][0], basic, design_type)
|
||||
items_response['layers'].append({
|
||||
'image_category': f"{item_result['name']}_front",
|
||||
'image_size': item_result['back_image'].size if item_result['back_image'] else None,
|
||||
|
||||
@@ -16,6 +16,9 @@ class OthersItem(BaseItem):
|
||||
self.Others_pipeline = [
|
||||
LoadImage(minio_client),
|
||||
Segmentation(minio_client),
|
||||
Color(minio_client),
|
||||
NoSegPrintPainting(minio_client),
|
||||
PrintPainting(minio_client),
|
||||
Scaling(),
|
||||
Split(minio_client)
|
||||
]
|
||||
@@ -82,8 +85,8 @@ class OthersMergeItem(BaseItem):
|
||||
Segmentation(minio_client),
|
||||
# BackPerspective(minio_client),
|
||||
Color(minio_client),
|
||||
NoSegPrintPainting(minio_client),
|
||||
PrintPainting(minio_client),
|
||||
# NoSegPrintPainting(minio_client),
|
||||
# PrintPainting(minio_client),
|
||||
Scaling(),
|
||||
Split(minio_client)
|
||||
]
|
||||
|
||||
@@ -12,9 +12,13 @@ class NoSegPrintPainting:
|
||||
self.minio_client = minio_client
|
||||
|
||||
def __call__(self, result):
|
||||
single_print = result['print']['single']
|
||||
# single_print = [result['print']['single']]
|
||||
overall_print = result['print']['overall']
|
||||
element_print = result['print']['element']
|
||||
# element_print = result['print']['element'
|
||||
|
||||
single_print = None
|
||||
element_print = None
|
||||
|
||||
result['single_image'] = None
|
||||
result['print_image'] = None
|
||||
|
||||
@@ -25,7 +29,7 @@ class NoSegPrintPainting:
|
||||
result['no_seg_sketch_overall'] = result['no_seg_sketch_print'] = self.printpaint(result, painting_dict, print_=True)
|
||||
result['pattern_image'] = result['no_seg_sketch_overall']
|
||||
|
||||
if single_print['print_path_list']:
|
||||
if single_print:
|
||||
print_background = np.zeros((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), dtype=np.uint8)
|
||||
mask_background = np.zeros((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), dtype=np.uint8)
|
||||
for i in range(len(single_print['print_path_list'])):
|
||||
@@ -65,7 +69,7 @@ class NoSegPrintPainting:
|
||||
single_image = cv2.add(tmp1, tmp2)
|
||||
result['no_seg_sketch_print'] = single_image
|
||||
|
||||
if element_print['element_path_list']:
|
||||
if element_print:
|
||||
print_background = np.zeros((result['final_image'].shape[0], result['final_image'].shape[1], 3), dtype=np.uint8)
|
||||
mask_background = np.zeros((result['final_image'].shape[0], result['final_image'].shape[1], 3), dtype=np.uint8)
|
||||
for i in range(len(element_print['element_path_list'])):
|
||||
@@ -169,7 +173,7 @@ class NoSegPrintPainting:
|
||||
canvas_h=painting_dict['dim_image_h'],
|
||||
canvas_w=painting_dict['dim_image_w'],
|
||||
location=painting_dict['location'],
|
||||
angle=45)
|
||||
angle=int(print_.get('print_angle_list', [0])[0]))
|
||||
painting_dict['mask_inv_print'] = np.zeros(painting_dict['tile_print'].shape[:2], dtype=np.uint8)
|
||||
return painting_dict
|
||||
|
||||
|
||||
@@ -12,10 +12,14 @@ class PrintPainting:
|
||||
self.minio_client = minio_client
|
||||
|
||||
def __call__(self, result):
|
||||
single_print = result['print']['single']
|
||||
# single_print = result['print']['single']
|
||||
overall_print = result['print']['overall']
|
||||
element_print = result['print']['element']
|
||||
partial_path = result['print']['partial'] if 'partial' in result['print'] else None
|
||||
# element_print = result['print']['element']
|
||||
# partial_path = result['print']['partial'] if 'partial' in result['print'] else None
|
||||
|
||||
single_print = None
|
||||
element_print = None
|
||||
partial_path = None
|
||||
result['single_image'] = None
|
||||
result['print_image'] = None
|
||||
# TODO 给result['pattern_image'] resize 到resize_scale的大小
|
||||
@@ -43,7 +47,7 @@ class PrintPainting:
|
||||
result['print_image'] = self.printpaint(result, painting_dict, print_=True)
|
||||
result['single_image'] = result['final_image'] = result['pattern_image'] = result['print_image']
|
||||
|
||||
if single_print['print_path_list']:
|
||||
if single_print:
|
||||
# 2025-9-19 印花调整 印花坐标按照sketch的缩放比调整
|
||||
sketch_resize_scale = result['resize_scale']
|
||||
print_background = np.zeros((result['pattern_image'].shape[0], result['pattern_image'].shape[1], 3), dtype=np.uint8)
|
||||
@@ -84,7 +88,7 @@ class PrintPainting:
|
||||
tmp2 = (result['final_image'] * (temp_fg / 255)).astype(np.uint8)
|
||||
result['single_image'] = cv2.add(tmp1, tmp2)
|
||||
|
||||
if element_print['element_path_list']:
|
||||
if element_print:
|
||||
# 2025-9-19 印花调整 印花坐标按照sketch的缩放比调整
|
||||
sketch_resize_scale = result['resize_scale']
|
||||
print_background = np.zeros((result['final_image'].shape[0], result['final_image'].shape[1], 3), dtype=np.uint8)
|
||||
@@ -232,7 +236,7 @@ class PrintPainting:
|
||||
canvas_h=painting_dict['dim_image_h'],
|
||||
canvas_w=painting_dict['dim_image_w'],
|
||||
location=painting_dict['location'],
|
||||
angle=45)
|
||||
angle=int(print_.get('print_angle_list', [0])[0]))
|
||||
painting_dict['mask_inv_print'] = np.zeros(painting_dict['tile_print'].shape[:2], dtype=np.uint8)
|
||||
return painting_dict
|
||||
|
||||
|
||||
@@ -47,9 +47,12 @@ class Segmentation:
|
||||
# 本地查询seg 缓存是否存在
|
||||
_, seg_result = self.load_seg_result(result["image_id"])
|
||||
# 判断缓存和实际图片size是否相同
|
||||
_ = False
|
||||
if not _ or result["image"].shape[:2] != seg_result.shape:
|
||||
# 推理获得seg 结果
|
||||
seg_result = get_seg_result(result['image'])
|
||||
if result['name'] == 'others':
|
||||
seg_result = seg_result.clip(max=1)
|
||||
self.save_seg_result(seg_result, result['image_id'])
|
||||
result['seg_result'] = seg_result
|
||||
|
||||
|
||||
@@ -21,7 +21,9 @@ class Split(object):
|
||||
try:
|
||||
if result['name'] in ('outwear', 'dress', 'blouse', 'skirt', 'trousers', 'tops', 'bottoms', 'others'):
|
||||
if result.get('design_type', None) == 'merge':
|
||||
# merge 不需要返回mask (红绿图)
|
||||
ori_front_mask = result['front_mask'].copy()
|
||||
ori_back_mask = result['back_mask'].copy()
|
||||
|
||||
if result['resize_scale'][0] == 1.0 and result['resize_scale'][1] == 1.0:
|
||||
front_mask = result['front_mask']
|
||||
back_mask = result['back_mask']
|
||||
@@ -43,6 +45,20 @@ class Split(object):
|
||||
result_front_image_pil = Image.fromarray(cv2.cvtColor(result_front_image, cv2.COLOR_BGR2RGBA))
|
||||
result['front_image'], result["front_image_url"], _ = upload_png_mask(self.minio_client, result_front_image_pil, f'{generate_uuid()}', mask=None)
|
||||
|
||||
height, width = ori_front_mask.shape
|
||||
mask_image = np.zeros((height, width, 3))
|
||||
mask_image[ori_front_mask != 0] = [0, 0, 255]
|
||||
mask_image[ori_back_mask != 0] = [0, 255, 0]
|
||||
rbga_mask = rgb_to_rgba(mask_image, ori_front_mask + ori_back_mask)
|
||||
mask_pil = Image.fromarray(cv2.cvtColor(rbga_mask.astype(np.uint8), cv2.COLOR_BGR2RGBA))
|
||||
image_data = io.BytesIO()
|
||||
mask_pil.save(image_data, format='PNG')
|
||||
image_data.seek(0)
|
||||
image_bytes = image_data.read()
|
||||
req = oss_upload_image(oss_client=self.minio_client, bucket="aida-clothing", object_name=f"mask/mask_{generate_uuid()}.png", image_bytes=image_bytes)
|
||||
result['mask_url'] = req.bucket_name + "/" + req.object_name
|
||||
|
||||
|
||||
result_back_image = np.zeros_like(rgba_image)
|
||||
back_mask = cv2.resize(back_mask, new_size, interpolation=cv2.INTER_AREA)
|
||||
result_back_image[back_mask != 0] = rgba_image[back_mask != 0]
|
||||
|
||||
@@ -10,12 +10,12 @@
|
||||
import logging
|
||||
|
||||
import cv2
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import torch
|
||||
import tritonclient.http as httpclient
|
||||
|
||||
from app.core.config import DESIGN_MODEL_URL, DESIGN_MODEL_NAME
|
||||
from app.service.utils.image_normalize import my_imnormalize
|
||||
|
||||
"""
|
||||
keypoint
|
||||
@@ -24,14 +24,14 @@ from app.core.config import DESIGN_MODEL_URL, DESIGN_MODEL_NAME
|
||||
|
||||
|
||||
def keypoint_preprocess(img_path):
|
||||
img = mmcv.imread(img_path)
|
||||
img = img_path
|
||||
img = cv2.copyMakeBorder(img, 25, 25, 25, 25, cv2.BORDER_CONSTANT, value=[255, 255, 255])
|
||||
img_scale = (256, 256)
|
||||
h, w = img.shape[:2]
|
||||
img = cv2.resize(img, img_scale)
|
||||
w_scale = img_scale[0] / w
|
||||
h_scale = img_scale[1] / h
|
||||
img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
|
||||
return preprocessed_img, (w_scale, h_scale)
|
||||
|
||||
@@ -78,7 +78,7 @@ def keypoint_postprocess(output, scale_factor):
|
||||
|
||||
# KNet
|
||||
def seg_preprocess(img_path):
|
||||
img = mmcv.imread(img_path)
|
||||
img = img_path
|
||||
ori_shape = img.shape[:2]
|
||||
img_scale_w, img_scale_h = ori_shape
|
||||
if ori_shape[0] > 1024:
|
||||
@@ -87,12 +87,12 @@ def seg_preprocess(img_path):
|
||||
img_scale_h = 1024
|
||||
# 如果图片size任意一边 大于 1024, 则会resize 成1024
|
||||
if ori_shape != (img_scale_w, img_scale_h):
|
||||
# mmcv.imresize(img, img_scale_h, img_scale_w) # 老代码 引以为戒!哈哈哈~ h和w写反了
|
||||
# my_imnormalize(img, img_scale_h, img_scale_w) # 老代码 引以为戒!哈哈哈~ h和w写反了
|
||||
img = cv2.resize(img, (img_scale_h, img_scale_w))
|
||||
|
||||
# 扩充25的白边
|
||||
img = cv2.copyMakeBorder(img, 25, 25, 25, 25, cv2.BORDER_CONSTANT, value=[255, 255, 255])
|
||||
img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
|
||||
return preprocessed_img, ori_shape
|
||||
|
||||
|
||||
@@ -92,6 +92,8 @@ def organize_others(layer):
|
||||
pattern_print_image_url=layer.get('pattern_print_image_url', None),
|
||||
pattern_image=layer.get('pattern_image', None),
|
||||
# back_perspective_url=layer['back_perspective_url'] if 'back_perspective_url' in layer.keys() else ""
|
||||
transpose=layer.get("transpose", [1, 1]), # 默认为1, 1代表不镜像
|
||||
rotate=layer.get('rotate', 0),
|
||||
)
|
||||
# 后片数据
|
||||
back_layer = dict(priority=-layer.get("priority", 0) if layer.get("layer_order", False) else PRIORITY_DICT.get(f'{layer["name"].lower()}_back', None),
|
||||
@@ -109,6 +111,8 @@ def organize_others(layer):
|
||||
pattern_overall_image_url=layer.get('pattern_overall_image_url', None),
|
||||
pattern_print_image_url=layer.get('pattern_print_image_url', None),
|
||||
# back_perspective_url=layer['back_perspective_url'] if 'back_perspective_url' in layer.keys() else ""
|
||||
transpose=layer.get("transpose", [1, 1]), # 默认为1, 1代表不镜像
|
||||
rotate=layer.get('rotate', 0),
|
||||
)
|
||||
return front_layer, back_layer
|
||||
|
||||
|
||||
@@ -347,7 +347,8 @@ def transpose_rotate(layer, image):
|
||||
|
||||
rotate = layer.get('rotate', 0)
|
||||
paste_x, paste_y = layer['adaptive_position'][1], layer['adaptive_position'][0]
|
||||
|
||||
original_w = image.width
|
||||
original_h = image.height
|
||||
# transpose左右是1 上下是-1
|
||||
if transpose[0] != 1:
|
||||
# 左右
|
||||
@@ -361,8 +362,8 @@ def transpose_rotate(layer, image):
|
||||
image = image.rotate(-rotate, expand=True)
|
||||
# 4. 计算粘贴位置以保持视觉中心一致
|
||||
# 原本 (15, 36) 是 288*288 的左上角,我们计算其中心点
|
||||
target_center_x = 15 + 288 // 2
|
||||
target_center_y = 36 + 288 // 2
|
||||
target_center_x = paste_x + original_w // 2
|
||||
target_center_y = paste_y + original_h // 2
|
||||
|
||||
# 获取旋转后图像的新尺寸
|
||||
new_w, new_h = image.size
|
||||
@@ -370,4 +371,4 @@ def transpose_rotate(layer, image):
|
||||
# 计算新的左上角坐标,使得旋转后的图像中心依然在原定的中心位置
|
||||
paste_x = target_center_x - new_w // 2
|
||||
paste_y = target_center_y - new_h // 2
|
||||
return image, (paste_x, paste_y)
|
||||
return image, (paste_x, paste_y)
|
||||
@@ -7,7 +7,7 @@ import numpy as np
|
||||
import torch
|
||||
import tritonclient.grpc as grpcclient
|
||||
from minio import Minio
|
||||
from pymilvus import MilvusClient
|
||||
# from pymilvus import MilvusClient
|
||||
from urllib3.exceptions import ResponseError
|
||||
|
||||
from app.core.config import settings, SR_MODEL_NAME, SR_TRITON_URL, MILVUS_TABLE_KEYPOINT, KEYPOINT_RESULT_TABLE_FIELD_SET
|
||||
@@ -58,7 +58,21 @@ class DesignPreprocessing:
|
||||
if len(image.shape) == 2:
|
||||
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
|
||||
elif image.shape[2] == 4: # 如果是四通道 mask
|
||||
image = image[:, :, :3]
|
||||
# 分离RGB和Alpha通道
|
||||
bgr = image[:, :, :3]
|
||||
alpha = image[:, :, 3]
|
||||
|
||||
# 创建白色背景(也可改为其他颜色,如(255,255,255)就是白色)
|
||||
background_color = (255, 255, 255)
|
||||
background = np.full_like(bgr, background_color)
|
||||
|
||||
# 将Alpha通道转换为掩码(0=透明,255=不透明)
|
||||
alpha_mask = alpha / 255.0 # 归一化到0-1
|
||||
alpha_mask = np.expand_dims(alpha_mask, axis=-1) # 扩展维度,方便广播计算
|
||||
|
||||
# 混合背景和原图:透明区域显示背景色,不透明区域显示原图
|
||||
image = (bgr * alpha_mask + background * (1 - alpha_mask)).astype(np.uint8)
|
||||
# 此时image已经是3通道RGB,无需再执行image = image[:, :, :3]
|
||||
obj["image_obj"] = image
|
||||
return image_list
|
||||
|
||||
@@ -174,8 +188,9 @@ class DesignPreprocessing:
|
||||
scale = 0.4
|
||||
if waist_width / scale >= image_width:
|
||||
add_width = int((waist_width / scale - image_width) / 2)
|
||||
ret = cv2.copyMakeBorder(image['obj'], 0, 0, add_width, add_width, cv2.BORDER_CONSTANT, value=(256, 256, 256))
|
||||
image_bytes = cv2.imencode(".jpg", ret)[1].tobytes()
|
||||
ret = cv2.copyMakeBorder(image['obj'], 0, 0, add_width, add_width, cv2.BORDER_CONSTANT, value=(255, 255, 255))
|
||||
img_rgba = cv2.cvtColor(ret, cv2.COLOR_RGB2RGBA)
|
||||
image_bytes = cv2.imencode(".png", img_rgba)[1].tobytes()
|
||||
# image['show_image_url'] = f"{image['image_url'].split('/', 1)[0]}/{self.minio_client.put_object(image['image_url'].split('/', 1)[0], image['image_url'].split('/', 1)[1].replace('.', '-show.'), io.BytesIO(image_bytes), len(image_bytes), content_type='image/jpeg').object_name}"
|
||||
bucket_name = image['image_url'].split('/', 1)[0]
|
||||
object_name = image['image_url'].split('/', 1)[1].replace('.', '-show.')
|
||||
@@ -261,14 +276,15 @@ class DesignPreprocessing:
|
||||
|
||||
def keypoint_cache(self, sketch):
|
||||
try:
|
||||
client = MilvusClient(uri=settings.MILVUS_URL, token=settings.MILVUS_TOKEN, db_name=settings.MILVUS_ALIAS)
|
||||
# client = MilvusClient(uri=settings.MILVUS_URL, token=settings.MILVUS_TOKEN, db_name=settings.MILVUS_ALIAS)
|
||||
keypoint_id = sketch['image_id']
|
||||
res = client.query(
|
||||
collection_name=MILVUS_TABLE_KEYPOINT,
|
||||
# ids=[keypoint_id],
|
||||
filter=f"keypoint_id == {keypoint_id}",
|
||||
output_fields=['keypoint_vector', 'keypoint_site']
|
||||
)
|
||||
# res = client.query(
|
||||
# collection_name=MILVUS_TABLE_KEYPOINT,
|
||||
# # ids=[keypoint_id],
|
||||
# filter=f"keypoint_id == {keypoint_id}",
|
||||
# output_fields=['keypoint_vector', 'keypoint_site']
|
||||
# )
|
||||
res = []
|
||||
if len(res) == 0:
|
||||
# 没有结果 直接推理拿结果 并保存
|
||||
keypoint_infer_result = self.infer_keypoint_result(sketch)
|
||||
|
||||
@@ -11,7 +11,6 @@ import logging
|
||||
import uuid
|
||||
|
||||
import cv2
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import torch
|
||||
@@ -21,6 +20,7 @@ from minio import Minio
|
||||
from tritonclient.utils import np_to_triton_dtype
|
||||
|
||||
from app.core.config import settings, FAST_GI_MODEL_URL, GI_MODEL_URL, DESIGN_MODEL_URL, FAST_GI_MODEL_NAME, GI_MODEL_NAME
|
||||
from app.service.utils.image_normalize import my_imnormalize
|
||||
from app.service.utils.new_oss_client import oss_upload_image
|
||||
|
||||
logger = logging.getLogger()
|
||||
@@ -86,10 +86,9 @@ class AgentToolGenerateImage:
|
||||
|
||||
@staticmethod
|
||||
def preprocess(img):
|
||||
img = mmcv.imread(img)
|
||||
img_scale = (224, 224)
|
||||
img = cv2.resize(img, img_scale)
|
||||
img = mmcv.imnormalize(
|
||||
img = my_imnormalize(
|
||||
img,
|
||||
mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]),
|
||||
to_rgb=True)
|
||||
|
||||
@@ -189,10 +189,10 @@ if __name__ == '__main__':
|
||||
tasks_id="123-89",
|
||||
prompt="a single item of sketch of dress, 4k, white background",
|
||||
image_url="aida-collection-element/89/Sketchboard/95f20cdc-e059-435c-b8b1-d04cc9e80c3d.png",
|
||||
mode='img2img',
|
||||
mode='txt2img',
|
||||
category="sketch",
|
||||
gender="Female",
|
||||
version="fast"
|
||||
version="hight"
|
||||
)
|
||||
server = GenerateImage(rd)
|
||||
print(server.get_result())
|
||||
|
||||
@@ -2,23 +2,23 @@ import logging
|
||||
import time
|
||||
|
||||
import cv2
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import torch
|
||||
import tritonclient.http as httpclient
|
||||
|
||||
from app.core.config import settings, DESIGN_MODEL_URL, DESIGN_MODEL_NAME
|
||||
from app.service.generate_image.utils.upload_sd_image import upload_stain_png_sd, upload_face_png_sd
|
||||
from app.service.utils.image_normalize import my_imnormalize
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
||||
|
||||
def seg_preprocess(img_path):
|
||||
img = mmcv.imread(img_path)
|
||||
img = img_path
|
||||
ori_shape = img.shape[:2]
|
||||
img_scale = ori_shape
|
||||
img = cv2.resize(img, img_scale)
|
||||
img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
|
||||
return preprocessed_img, ori_shape
|
||||
|
||||
@@ -242,10 +242,9 @@ def stain_detection(image, user_id, category, tasks_id, spot_size=100):
|
||||
|
||||
def generate_category_recognition(image, gender):
|
||||
def preprocess(img):
|
||||
img = mmcv.imread(img)
|
||||
img_scale = (224, 224)
|
||||
img = cv2.resize(img, img_scale)
|
||||
img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
|
||||
return preprocessed_img
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import logging
|
||||
|
||||
import cv2
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
@@ -10,6 +9,7 @@ from minio import Minio
|
||||
from app.core.config import settings
|
||||
from app.core.config import DESIGN_MODEL_URL
|
||||
from app.schemas.image2sketch import Image2SketchModel
|
||||
from app.service.utils.image_normalize import my_imnormalize
|
||||
from app.service.utils.new_oss_client import oss_get_image, oss_upload_image
|
||||
|
||||
logger = logging.getLogger()
|
||||
@@ -67,7 +67,7 @@ class LineArtService:
|
||||
|
||||
@staticmethod
|
||||
def line_art_preprocess(image):
|
||||
img = mmcv.imread(image)
|
||||
img = image
|
||||
ori_shape = img.shape[:2]
|
||||
img_scale_w, img_scale_h = ori_shape
|
||||
if ori_shape[0] > 1024:
|
||||
@@ -76,9 +76,9 @@ class LineArtService:
|
||||
img_scale_h = 1024
|
||||
# 如果图片size任意一边 大于 1024, 则会resize 成1024
|
||||
if ori_shape != (img_scale_w, img_scale_h):
|
||||
# mmcv.imresize(img, img_scale_h, img_scale_w) # 老代码 引以为戒!哈哈哈~ h和w写反了
|
||||
# my_imnormalize(img, img_scale_h, img_scale_w) # 老代码 引以为戒!哈哈哈~ h和w写反了
|
||||
img = cv2.resize(img, (img_scale_h, img_scale_w))
|
||||
img = mmcv.imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
img = my_imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
|
||||
preprocessed_img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
|
||||
return preprocessed_img, ori_shape
|
||||
|
||||
|
||||
@@ -90,7 +90,7 @@ def get_response(messages):
|
||||
|
||||
def get_translation_from_llama3(text):
|
||||
start_time = time.time()
|
||||
url = "http://10.1.1.240:11434/api/generate"
|
||||
url = f"http://{settings.A6000_SERVICE_HOST}:12434/api/generate"
|
||||
# url = "http://10.1.1.240:1143/api/generate"
|
||||
|
||||
# prompt = f"System: {prefix_for_llama}\nUser:[{text}]"
|
||||
@@ -103,8 +103,8 @@ def get_translation_from_llama3(text):
|
||||
|
||||
# 创建请求的负载 translator是自定义的翻译模型
|
||||
payload = {
|
||||
"model": "translator",
|
||||
"prompt": f"[{text}]",
|
||||
"model": "AiDA-translator:latest",
|
||||
"prompt": text,
|
||||
"stream": False
|
||||
}
|
||||
# 将负载转换为 JSON 格式
|
||||
@@ -148,7 +148,7 @@ def get_translation_from_llama3(text):
|
||||
def get_prompt_from_image(image_path, text):
|
||||
start_time = time.time()
|
||||
# url = "http://localhost:11434/api/generate"
|
||||
url = "http://10.1.1.243:11434/api/generate"
|
||||
url = f"http://{settings.B_4_X_4090_SERVICE_HOST}:11434/api/generate"
|
||||
|
||||
image_base64 = minio_util.minio_url_to_base64(image_path.img)
|
||||
# image_base64 = minio_url_to_base64(image_path)
|
||||
@@ -180,7 +180,7 @@ def get_prompt_from_image(image_path, text):
|
||||
|
||||
def main():
|
||||
"""Main function"""
|
||||
text = get_translation_from_llama3("[火焰]")
|
||||
text = get_translation_from_llama3("火焰")
|
||||
print(text)
|
||||
|
||||
|
||||
|
||||
27
app/service/utils/image_normalize.py
Normal file
27
app/service/utils/image_normalize.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
|
||||
def my_imnormalize(img, mean, std, to_rgb=True):
|
||||
"""Inplace normalize an image with mean and std.
|
||||
|
||||
Args:
|
||||
img (ndarray): Image to be normalized.
|
||||
mean (ndarray): The mean to be used for normalize.
|
||||
std (ndarray): The std to be used for normalize.
|
||||
to_rgb (bool): Whether to convert to rgb.
|
||||
|
||||
Returns:
|
||||
ndarray: The normalized image.
|
||||
"""
|
||||
# cv2 inplace normalization does not accept uint8
|
||||
img = img.copy().astype(np.float32)
|
||||
|
||||
assert img.dtype != np.uint8
|
||||
mean = np.float64(mean.reshape(1, -1))
|
||||
stdinv = 1 / np.float64(std.reshape(1, -1))
|
||||
if to_rgb:
|
||||
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace
|
||||
cv2.subtract(img, mean, img) # inplace
|
||||
cv2.multiply(img, stdinv, img) # inplace
|
||||
return img
|
||||
@@ -1,25 +1,20 @@
|
||||
services:
|
||||
aida_server:
|
||||
container_name: "AiDA_${SERVE_ENV}_Server"
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ./app:/app/app
|
||||
- ./.env_prod:/app/.env
|
||||
- ./.env:/app/.env
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- ./seg_cache:/seg_cache
|
||||
ports:
|
||||
- "10200:80"
|
||||
depends_on:
|
||||
- redis
|
||||
redis:
|
||||
image: redis
|
||||
container_name: aida_redis
|
||||
restart: always
|
||||
ports:
|
||||
- "6400:6379"
|
||||
volumes:
|
||||
- ./redis/data:/data
|
||||
- ./redis/conf/redis.conf:/etc/redis/redis.conf
|
||||
command: redis-server /etc/redis/redis.conf --appendonly yes
|
||||
- "${SERVE_PORT}:80"
|
||||
networks:
|
||||
- aida_app_net
|
||||
networks:
|
||||
aida_app_net:
|
||||
external: true
|
||||
name: aida_app_net
|
||||
@@ -23,8 +23,8 @@ dependencies = [
|
||||
"load-dotenv>=0.1.0",
|
||||
"loguru>=0.7.3",
|
||||
"minio>=7.2.20",
|
||||
"mmcv>=2.2.0",
|
||||
"moviepy==1.0.3",
|
||||
"np>=1.0.2",
|
||||
"numpy<2",
|
||||
"ollama>=0.6.1",
|
||||
"opencv-python>=4.11.0.86",
|
||||
|
||||
BIN
requirements.txt
BIN
requirements.txt
Binary file not shown.
Binary file not shown.
88
uv.lock
generated
88
uv.lock
generated
@@ -8,15 +8,6 @@ resolution-markers = [
|
||||
"(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "addict"
|
||||
version = "2.4.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/85/ef/fd7649da8af11d93979831e8f1f8097e85e82d5bfeabc8c68b39175d8e75/addict-2.4.0.tar.gz", hash = "sha256:b3b2210e0e067a281f5646c8c5db92e99b7231ea8b0eb5f74dbdf9e259d4e494", size = 9186, upload-time = "2020-11-21T16:21:31.416Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/6a/00/b08f23b7d7e1e14ce01419a467b583edbb93c6cdb8654e54a9cc579cd61f/addict-2.4.0-py3-none-any.whl", hash = "sha256:249bb56bbfd3cdc2a004ea0ff4c2b6ddc84d53bc2194761636eb314d5cfa5dfc", size = 3832, upload-time = "2020-11-21T16:21:29.588Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "agentaction"
|
||||
version = "0.1.7"
|
||||
@@ -1671,43 +1662,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/3e/9a/b697530a882588a84db616580f2ba5d1d515c815e11c30d219145afeec87/minio-7.2.20-py3-none-any.whl", hash = "sha256:eb33dd2fb80e04c3726a76b13241c6be3c4c46f8d81e1d58e757786f6501897e", size = 93751, upload-time = "2025-11-27T00:37:13.993Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mmcv"
|
||||
version = "2.2.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "addict" },
|
||||
{ name = "mmengine" },
|
||||
{ name = "numpy" },
|
||||
{ name = "opencv-python" },
|
||||
{ name = "packaging" },
|
||||
{ name = "pillow" },
|
||||
{ name = "pyyaml" },
|
||||
{ name = "regex", marker = "sys_platform == 'win32'" },
|
||||
{ name = "yapf" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e9/a2/57a733e7e84985a8a0e3101dfb8170fc9db92435c16afad253069ae3f9df/mmcv-2.2.0.tar.gz", hash = "sha256:ac479247e808d8802f89eadf04d4118de86bdfe81361ec5aed0cc1bf731c67c9", size = 479121, upload-time = "2024-04-24T14:24:28.064Z" }
|
||||
|
||||
[[package]]
|
||||
name = "mmengine"
|
||||
version = "0.10.7"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "addict" },
|
||||
{ name = "matplotlib" },
|
||||
{ name = "numpy" },
|
||||
{ name = "opencv-python" },
|
||||
{ name = "pyyaml" },
|
||||
{ name = "regex", marker = "sys_platform == 'win32'" },
|
||||
{ name = "rich" },
|
||||
{ name = "termcolor" },
|
||||
{ name = "yapf" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/17/14/959360bbd8374e23fc1b720906999add16a3ac071a501636db12c5861ff5/mmengine-0.10.7.tar.gz", hash = "sha256:d20ffcc31127567e53dceff132612a87f0081de06cbb7ab2bdb7439125a69225", size = 378090, upload-time = "2025-03-04T12:23:09.568Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/98/8e/f98332248aad102511bea4ae19c0ddacd2f0a994f3ca4c82b7a369e0af8b/mmengine-0.10.7-py3-none-any.whl", hash = "sha256:262ac976a925562f78cd5fd14dd1bc9b680ed0aa81f0d85b723ef782f99c54ee", size = 452720, upload-time = "2025-03-04T12:23:06.339Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mmh3"
|
||||
version = "5.2.0"
|
||||
@@ -1801,6 +1755,12 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/c9/b2622292ea83fbb4ec318f5b9ab867d0a28ab43c5717bb85b0a5f6b3b0a4/networkx-3.6.1-py3-none-any.whl", hash = "sha256:d47fbf302e7d9cbbb9e2555a0d267983d2aa476bac30e90dfbe5669bd57f3762", size = 2068504, upload-time = "2025-12-08T17:02:38.159Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "np"
|
||||
version = "1.0.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/40/7d/749666e5a9976dcbc4d16d487bbe571efc6bbf4cdf3f4620c0ccc52b57ef/np-1.0.2.tar.gz", hash = "sha256:781265283f3823663ad8fb48741aae62abcf4c78bc19f908f8aa7c1d3eb132f8", size = 7419, upload-time = "2017-10-05T11:26:00.956Z" }
|
||||
|
||||
[[package]]
|
||||
name = "numpy"
|
||||
version = "1.26.4"
|
||||
@@ -2269,15 +2229,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/96/aaa61ce33cc98421fb6088af2a03be4157b1e7e0e87087c888e2370a7f45/pillow-12.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:7dfb439562f234f7d57b1ac6bc8fe7f838a4bd49c79230e0f6a1da93e82f1fad", size = 2436012, upload-time = "2025-10-15T18:22:23.621Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "platformdirs"
|
||||
version = "4.5.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "posthog"
|
||||
version = "5.4.0"
|
||||
@@ -2746,17 +2697,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/58/ca301544e1fa93ed4f80d724bf5b194f6e4b945841c5bfd555878eea9fcb/referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231", size = 26766, upload-time = "2025-10-13T15:30:47.625Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "2025.11.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/cc/a9/546676f25e573a4cf00fe8e119b78a37b6a8fe2dc95cda877b30889c9c45/regex-2025.11.3.tar.gz", hash = "sha256:1fedc720f9bb2494ce31a58a1631f9c82df6a09b49c19517ea5cc280b4541e01", size = 414669, upload-time = "2025-11-03T21:34:22.089Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/59/9b/7c29be7903c318488983e7d97abcf8ebd3830e4c956c4c540005fcfb0462/regex-2025.11.3-cp312-cp312-win32.whl", hash = "sha256:3839967cf4dc4b985e1570fd8d91078f0c519f30491c60f9ac42a8db039be204", size = 266194, upload-time = "2025-11-03T21:31:51.53Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1a/67/3b92df89f179d7c367be654ab5626ae311cb28f7d5c237b6bb976cd5fbbb/regex-2025.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:e721d1b46e25c481dc5ded6f4b3f66c897c58d2e8cfdf77bbced84339108b0b9", size = 277069, upload-time = "2025-11-03T21:31:53.151Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/55/85ba4c066fe5094d35b249c3ce8df0ba623cfd35afb22d6764f23a52a1c5/regex-2025.11.3-cp312-cp312-win_arm64.whl", hash = "sha256:64350685ff08b1d3a6fff33f45a9ca183dc1d58bbfe4981604e70ec9801bbc26", size = 270330, upload-time = "2025-11-03T21:31:54.514Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "requests"
|
||||
version = "2.32.5"
|
||||
@@ -3224,8 +3164,8 @@ dependencies = [
|
||||
{ name = "load-dotenv" },
|
||||
{ name = "loguru" },
|
||||
{ name = "minio" },
|
||||
{ name = "mmcv" },
|
||||
{ name = "moviepy" },
|
||||
{ name = "np" },
|
||||
{ name = "numpy" },
|
||||
{ name = "ollama" },
|
||||
{ name = "opencv-python" },
|
||||
@@ -3275,8 +3215,8 @@ requires-dist = [
|
||||
{ name = "load-dotenv", specifier = ">=0.1.0" },
|
||||
{ name = "loguru", specifier = ">=0.7.3" },
|
||||
{ name = "minio", specifier = ">=7.2.20" },
|
||||
{ name = "mmcv", specifier = ">=2.2.0" },
|
||||
{ name = "moviepy", specifier = "==1.0.3" },
|
||||
{ name = "np", specifier = ">=1.0.2" },
|
||||
{ name = "numpy", specifier = "<2" },
|
||||
{ name = "ollama", specifier = ">=0.6.1" },
|
||||
{ name = "opencv-python", specifier = ">=4.11.0.86" },
|
||||
@@ -3605,18 +3545,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/54/85/6ec269b0952ec7e36ba019125982cf11d91256a778c7c3f98a4c5043d283/xxhash-3.6.0-cp312-cp312-win_arm64.whl", hash = "sha256:eae5c13f3bc455a3bbb68bdc513912dc7356de7e2280363ea235f71f54064829", size = 27876, upload-time = "2025-10-02T14:34:54.371Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "yapf"
|
||||
version = "0.43.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "platformdirs" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/23/97/b6f296d1e9cc1ec25c7604178b48532fa5901f721bcf1b8d8148b13e5588/yapf-0.43.0.tar.gz", hash = "sha256:00d3aa24bfedff9420b2e0d5d9f5ab6d9d4268e72afbf59bb3fa542781d5218e", size = 254907, upload-time = "2024-11-14T00:11:41.584Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/37/81/6acd6601f61e31cfb8729d3da6d5df966f80f374b78eff83760714487338/yapf-0.43.0-py3-none-any.whl", hash = "sha256:224faffbc39c428cb095818cf6ef5511fdab6f7430a10783fdfb292ccf2852ca", size = 256158, upload-time = "2024-11-14T00:11:39.37Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "yarl"
|
||||
version = "1.22.0"
|
||||
|
||||
Reference in New Issue
Block a user