first cimmit
This commit is contained in:
153
.gitignore
vendored
Normal file
153
.gitignore
vendored
Normal file
@@ -0,0 +1,153 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# celery beat schedule file
|
||||
celerybeat-schedule
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
.idea
|
||||
|
||||
|
||||
.test
|
||||
|
||||
#runtime produce
|
||||
test
|
||||
seg_cache
|
||||
logs
|
||||
seg_result/
|
||||
seg_result
|
||||
uwsgi
|
||||
*.yaml
|
||||
*.yml
|
||||
Dockerfile
|
||||
|
||||
.conf
|
||||
app/logs
|
||||
app/logs/*
|
||||
*.log
|
||||
/qodana.yaml
|
||||
.pth
|
||||
.pytorch
|
||||
*.png
|
||||
*.pth
|
||||
*.db
|
||||
*.npy
|
||||
*.pytorch
|
||||
*.jpg
|
||||
*.mp4
|
||||
*.sqlite3
|
||||
*.bin
|
||||
*.pickle
|
||||
*.csv
|
||||
*.avi
|
||||
*.json
|
||||
*.env*
|
||||
config.backup.py
|
||||
37
Dockerfile
Normal file
37
Dockerfile
Normal file
@@ -0,0 +1,37 @@
|
||||
FROM ghcr.io/astral-sh/uv:latest AS uv_bin
|
||||
FROM nvidia/cuda:12.4.1-base-ubuntu22.04
|
||||
|
||||
# 1. 基础环境配置
|
||||
ENV UV_LINK_MODE=copy \
|
||||
UV_COMPILE_BYTECODE=1 \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
UV_PROJECT_ENVIRONMENT=/app/.venv
|
||||
|
||||
COPY --from=uv_bin /uv /uvx /bin/
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
wget \
|
||||
libcurl4-openssl-dev \
|
||||
build-essential \
|
||||
libgl1 \
|
||||
libglib2.0-0 \
|
||||
ca-certificates \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY pyproject.toml uv.lock ./
|
||||
|
||||
ENV UV_COMPILE_BYTECODE=0
|
||||
|
||||
RUN uv sync --frozen --no-dev --no-install-project --python 3.10
|
||||
|
||||
# 4. 拷贝项目文件并安装项目本身
|
||||
COPY . .
|
||||
RUN uv sync --frozen --no-dev --python 3.10
|
||||
|
||||
ENV PATH="/app/.venv/bin:$PATH"
|
||||
|
||||
EXPOSE 8000
|
||||
CMD ["uv", "run","-m","app.main"]
|
||||
32
app/config/config.py
Normal file
32
app/config/config.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import os
|
||||
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
from pydantic import Field
|
||||
|
||||
|
||||
# ⚠️ 注意: 您需要安装 pydantic-settings: pip install pydantic-settings
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
"""
|
||||
应用配置类。Pydantic Settings 会自动从环境变量和 .env 文件中加载这些值。
|
||||
"""
|
||||
model_config = SettingsConfigDict(
|
||||
env_file='.env',
|
||||
env_file_encoding='utf-8',
|
||||
extra='ignore' # 忽略环境变量中多余的键
|
||||
)
|
||||
# 启动端口
|
||||
SERVE_PROD: int = Field(default=8000, description='')
|
||||
# 调试配饰
|
||||
LOCAL: int = Field(default=0, description="是否在本地运行,1表示本地运行,0表示生产环境运行")
|
||||
|
||||
# minio配置
|
||||
MINIO_URL: str = Field(default="", description="URL")
|
||||
MINIO_ACCESS: str = Field(default="", description="ACCESS")
|
||||
MINIO_SECRET: str = Field(default="", description="SECRET")
|
||||
MINIO_SECURE: bool = Field(default=True, description="SECRET")
|
||||
|
||||
|
||||
# 创建配置实例,供应用其他部分使用
|
||||
settings = Settings()
|
||||
13
app/server/generate_image/flux2_klein/client.py
Normal file
13
app/server/generate_image/flux2_klein/client.py
Normal file
@@ -0,0 +1,13 @@
|
||||
# This file is auto-generated by LitServe.
|
||||
# Disable auto-generation by setting `generate_client_file=False` in `LitServer.run()`.
|
||||
|
||||
import requests
|
||||
|
||||
request_data = {
|
||||
"prompt": "Create realistic studio photo with real people model standing and wearing this garment, in white studio, Keep original model if present, or generate appropriate model, Standing pose, facing camera.",
|
||||
"image_path":"aida-results/result_38151e0a-f83b-11f0-89f6-0242ac130002.png",
|
||||
"infer_step":4,
|
||||
"tasks_id":"123456-123"
|
||||
}
|
||||
response = requests.post("http://127.0.0.1:8012//api/v1/to_product", json=request_data)
|
||||
print(f"Status: {response.status_code}\nResponse:\n {response.text}")
|
||||
65
app/server/generate_image/flux2_klein/server.py
Normal file
65
app/server/generate_image/flux2_klein/server.py
Normal file
@@ -0,0 +1,65 @@
|
||||
import io
|
||||
import torch
|
||||
import litserve as ls
|
||||
from diffusers import Flux2KleinPipeline
|
||||
from minio import Minio
|
||||
|
||||
from app.config.config import settings
|
||||
from app.server.utils.minio_client import oss_get_image, oss_upload_image
|
||||
|
||||
minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE)
|
||||
|
||||
|
||||
class Flux2KleinServer(ls.LitAPI):
|
||||
def setup(self, device):
|
||||
# Load the model
|
||||
dtype = torch.bfloat16
|
||||
self.device = device
|
||||
self.model = Flux2KleinPipeline.from_pretrained("black-forest-labs/FLUX.2-klein-4B", torch_dtype=dtype, is_distilled=False)
|
||||
self.model.to(device) # save some VRAM by offloading the model to CPU
|
||||
|
||||
def decode_request(self, request):
|
||||
return request
|
||||
|
||||
def predict(self, request_data):
|
||||
image_path = request_data.get("image_path", "")
|
||||
prompt = request_data.get("prompt", "")
|
||||
height = request_data.get("height", 768)
|
||||
width = request_data.get("width", 512)
|
||||
infer_step = request_data.get("infer_step", 4)
|
||||
tasks_id = request_data.get("tasks_id", "test")
|
||||
|
||||
user_id = tasks_id[tasks_id.rfind('-') + 1:]
|
||||
input_image = oss_get_image(oss_client=minio_client, path=image_path, data_type='pil')
|
||||
with torch.no_grad():
|
||||
images = self.model(
|
||||
image=input_image,
|
||||
prompt=prompt,
|
||||
height=height,
|
||||
width=width,
|
||||
guidance_scale=1.0,
|
||||
num_inference_steps=infer_step,
|
||||
# generator=torch.Generator(device='cuda').manual_seed(3)
|
||||
)[0]
|
||||
|
||||
# save image to minio
|
||||
image = images[0] # Assuming you want to retrieve the first image
|
||||
image_data = io.BytesIO()
|
||||
image.save(image_data, format='PNG')
|
||||
image_data.seek(0)
|
||||
image_bytes = image_data.read()
|
||||
object_name = f'{user_id}/product_image/{tasks_id}.png'
|
||||
req = oss_upload_image(oss_client=minio_client, bucket="aida-users", object_name=object_name, image_bytes=image_bytes)
|
||||
image_url = f"aida-users/{object_name}"
|
||||
return image_url
|
||||
|
||||
def encode_response(self, image_url):
|
||||
return {"image_url": image_url}
|
||||
|
||||
|
||||
# Starting the server
|
||||
if __name__ == "__main__":
|
||||
# Assume that an appropriate device (e.g., 'cuda', 'cpu') is specified
|
||||
api = Flux2KleinServer()
|
||||
server = ls.LitServer(api)
|
||||
server.run(port=8011)
|
||||
97
app/server/utils/minio_client.py
Normal file
97
app/server/utils/minio_client.py
Normal file
@@ -0,0 +1,97 @@
|
||||
import io
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from io import BytesIO
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import urllib3
|
||||
from PIL import Image
|
||||
from minio import Minio
|
||||
|
||||
from app.config.config import settings
|
||||
|
||||
minio_client = Minio(settings.MINIO_URL, access_key=settings.MINIO_ACCESS, secret_key=settings.MINIO_SECRET, secure=settings.MINIO_SECURE)
|
||||
|
||||
|
||||
# 自定义 Retry 类
|
||||
class CustomRetry(urllib3.Retry):
|
||||
def increment(self, method=None, url=None, response=None, error=None, **kwargs):
|
||||
# 调用父类的 increment 方法
|
||||
new_retry = super(CustomRetry, self).increment(method, url, response, error, **kwargs)
|
||||
# 打印重试信息
|
||||
logger.info(f"重试连接: {method} {url},错误: {error},重试次数: {self.total - new_retry.total}")
|
||||
return new_retry
|
||||
|
||||
|
||||
logger = logging.getLogger()
|
||||
timeout = urllib3.Timeout(connect=1, read=10.0) # 连接超时 5 秒,读取超时 10 秒
|
||||
http_client = urllib3.PoolManager(
|
||||
num_pools=10, # 设置连接池大小
|
||||
maxsize=10,
|
||||
timeout=timeout,
|
||||
cert_reqs='CERT_REQUIRED', # 需要证书验证
|
||||
retries=CustomRetry(
|
||||
total=5,
|
||||
backoff_factor=0.2,
|
||||
status_forcelist=[500, 502, 503, 504],
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
# 获取图片
|
||||
def oss_get_image(oss_client, path, data_type):
|
||||
# cv2 默认全通道读取
|
||||
bucket = path.split("/", 1)[0]
|
||||
object_name = path.split("/", 1)[1]
|
||||
image_object = None
|
||||
try:
|
||||
image_data = oss_client.get_object(bucket_name=bucket, object_name=object_name)
|
||||
if data_type == "cv2":
|
||||
image_bytes = image_data.read()
|
||||
image_array = np.frombuffer(image_bytes, np.uint8) # 转成8位无符号整型
|
||||
image_object = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)
|
||||
if image_object.dtype == np.uint16:
|
||||
image_object = (image_object / 256).astype('uint8')
|
||||
else:
|
||||
data_bytes = BytesIO(image_data.read())
|
||||
image_object = Image.open(data_bytes)
|
||||
except Exception as e:
|
||||
logger.warning(f"获取图片出现异常 ######: {e}")
|
||||
return image_object
|
||||
|
||||
|
||||
def oss_upload_image(oss_client, bucket, object_name, image_bytes):
|
||||
req = None
|
||||
try:
|
||||
req = oss_client.put_object(bucket_name=bucket, object_name=object_name, data=io.BytesIO(image_bytes), length=len(image_bytes), content_type='image/png')
|
||||
except Exception as e:
|
||||
logger.warning(f"上传图片出现异常 ######: {e}")
|
||||
return req
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# url = "aida-results/result_0002186a-e631-11ee-86a6-b48351119060.png"
|
||||
# url = "aida-collection-element/11523/Moodboard/f60af0d2-94c2-48f9-90ff-74b8e8a481b5.jpg"
|
||||
# url = "test/test.png"
|
||||
# url = "aida-users/89/product_image/string-89.png"
|
||||
# url = "test/845046c7-4f62-4f54-a4a9-c26d49c6969335b5b3a9-d335-4871-a46c-3cc3caf07da259629dfd1f1f555a2e2a9def7e719366.png"
|
||||
# url = 'aida-users/89/relight_image/123-89.png'
|
||||
# url = 'aida-users/89/relight_image/123-89.png'
|
||||
# url = 'aida-users/89/relight_image/123-89.png'
|
||||
# url = "aida-users/89/sketchboard/female/Dress/e6724ab7-8d3f-4677-abe0-c3e42ab7af85.jpeg"
|
||||
# url = "aida-users/87/print/956614a2-7e75-4fbe-9ed0-c1831e37a2c9-4-87.png"
|
||||
# url = "aida-users/89/single_logo/123-89.png"
|
||||
# url = "aida-users/89/sketch/4e8fe37d-7068-400a-ac94-c01647fa5f6f.png"
|
||||
|
||||
url = "aida-users/123/product_image/123456-123.png"
|
||||
read_type = "2"
|
||||
if read_type == "cv2":
|
||||
img = oss_get_image(oss_client=minio_client, path=url, data_type=read_type)
|
||||
cv2.imshow("", img)
|
||||
cv2.waitKey(0)
|
||||
else:
|
||||
img = oss_get_image(oss_client=minio_client, path=url, data_type=read_type)
|
||||
img.show()
|
||||
img.save("result.png")
|
||||
7
client.py
Normal file
7
client.py
Normal file
@@ -0,0 +1,7 @@
|
||||
# This file is auto-generated by LitServe.
|
||||
# Disable auto-generation by setting `generate_client_file=False` in `LitServer.run()`.
|
||||
|
||||
import requests
|
||||
|
||||
response = requests.post("http://127.0.0.1:8012/predict", json={"input": 4.0})
|
||||
print(f"Status: {response.status_code}\nResponse:\n {response.text}")
|
||||
25
docker-compose.yml
Normal file
25
docker-compose.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
services:
|
||||
AiDA_Model_Litserve:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
working_dir: /app
|
||||
environment:
|
||||
GOOGLE_APPLICATION_CREDENTIALS: /google_application_credentials.json
|
||||
DEBUG: 0
|
||||
volumes:
|
||||
- ./app:/app/app
|
||||
- ./.prod_env:/app/.env
|
||||
- ./data:/data
|
||||
- ./google_application_credentials.json:/google_application_credentials.json
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- "10070:8000"
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
# 告诉 Docker 使用所有可用的 NVIDIA GPU
|
||||
- driver: nvidia
|
||||
device_ids: [ '0' ]
|
||||
capabilities: [ gpu ]
|
||||
50
logging_env.py
Normal file
50
logging_env.py
Normal file
@@ -0,0 +1,50 @@
|
||||
LOGS_PATH = 'logs/'
|
||||
LOGGER_CONFIG_DICT = {
|
||||
'version': 1,
|
||||
'disable_existing_loggers': False,
|
||||
'formatters': {
|
||||
'simple': {'format': '%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s'}
|
||||
},
|
||||
'handlers': {
|
||||
'console': {
|
||||
'class': 'logging.StreamHandler',
|
||||
'level': 'INFO',
|
||||
'formatter': 'simple',
|
||||
'stream': 'ext://sys.stdout',
|
||||
},
|
||||
'info_file_handler': {
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'level': 'INFO',
|
||||
'formatter': 'simple',
|
||||
'filename': f'{LOGS_PATH}info.log',
|
||||
'maxBytes': 10485760,
|
||||
'backupCount': 50,
|
||||
'encoding': 'utf8',
|
||||
},
|
||||
'error_file_handler': {
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'level': 'ERROR',
|
||||
'formatter': 'simple',
|
||||
'filename': f'{LOGS_PATH}error.log',
|
||||
'maxBytes': 10485760,
|
||||
'backupCount': 20,
|
||||
'encoding': 'utf8',
|
||||
},
|
||||
'debug_file_handler': {
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'level': 'DEBUG',
|
||||
'formatter': 'simple',
|
||||
'filename': f'{LOGS_PATH}debug.log',
|
||||
'maxBytes': 10485760,
|
||||
'backupCount': 50,
|
||||
'encoding': 'utf8',
|
||||
},
|
||||
},
|
||||
'loggers': {
|
||||
'my_module': {'level': 'INFO', 'handlers': ['console'], 'propagate': 'no'}
|
||||
},
|
||||
'root': {
|
||||
'level': 'INFO',
|
||||
'handlers': ['error_file_handler', 'info_file_handler', 'debug_file_handler', 'console'],
|
||||
},
|
||||
}
|
||||
25
main.py
Normal file
25
main.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
import litserve as ls
|
||||
|
||||
from app.config.config import settings
|
||||
from logging_env import LOGGER_CONFIG_DICT
|
||||
from app.server.generate_image.flux2_klein.server import Flux2KleinServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# 判断目录是否存在
|
||||
logs = 'logs'
|
||||
if not os.path.exists(logs):
|
||||
# 不存在则创建目录(parents=True 允许创建多级目录,exist_ok=True 避免目录已存在时报错)
|
||||
os.makedirs(logs, exist_ok=True)
|
||||
logger.info(f"目录 {logs} 创建成功")
|
||||
else:
|
||||
logger.info(f"目录 {logs} 已存在")
|
||||
logging.config.dictConfig(LOGGER_CONFIG_DICT)
|
||||
|
||||
if __name__ == '__main__':
|
||||
to_product = Flux2KleinServer(api_path='/api/v1/to_product')
|
||||
|
||||
server = ls.LitServer([to_product])
|
||||
server.run(port=settings.SERVE_PROD)
|
||||
19
pyproject.toml
Normal file
19
pyproject.toml
Normal file
@@ -0,0 +1,19 @@
|
||||
[project]
|
||||
name = "aida-model-litserve"
|
||||
version = "0.1.0"
|
||||
description = "Add your description here"
|
||||
requires-python = ">=3.13"
|
||||
dependencies = [
|
||||
"accelerate>=1.12.0",
|
||||
"diffusers",
|
||||
"litserve>=0.2.17",
|
||||
"minio>=7.2.20",
|
||||
"opencv-python>=4.13.0.90",
|
||||
"pillow>=12.1.0",
|
||||
"pydantic-settings>=2.12.0",
|
||||
"torch>=2.10.0",
|
||||
"transformers>=4.57.6",
|
||||
]
|
||||
|
||||
[tool.uv.sources]
|
||||
diffusers = { git = "https://github.com/huggingface/diffusers.git" }
|
||||
Reference in New Issue
Block a user