feat image2sketch 变更模型
fix
This commit is contained in:
45
app/service/image2sketch_2/download_checkpoints.py
Normal file
45
app/service/image2sketch_2/download_checkpoints.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import os
|
||||
|
||||
from minio import Minio
|
||||
from minio.error import S3Error
|
||||
|
||||
MINIO_URL = "www.minio.aida.com.hk:12024"
|
||||
MINIO_ACCESS = 'vXKFLSJkYeEq2DrSZvkB'
|
||||
MINIO_SECRET = 'uKTZT3x7C43WvPN9QTc99DiRkwddWZrG9Uh3JVlR'
|
||||
MINIO_SECURE = True
|
||||
# 配置MinIO客户端
|
||||
minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE)
|
||||
|
||||
|
||||
# 下载函数
|
||||
def download_folder(bucket_name, folder_name, local_dir):
|
||||
try:
|
||||
# 确保本地目录存在
|
||||
if not os.path.exists(local_dir):
|
||||
os.makedirs(local_dir)
|
||||
|
||||
# 遍历MinIO中的文件
|
||||
objects = minio_client.list_objects(bucket_name, prefix=folder_name, recursive=True)
|
||||
for obj in objects:
|
||||
# 构造本地文件路径
|
||||
local_file_path = os.path.join(local_dir, obj.object_name[len(folder_name):])
|
||||
local_file_dir = os.path.dirname(local_file_path)
|
||||
|
||||
# 确保本地目录存在
|
||||
if not os.path.exists(local_file_dir):
|
||||
os.makedirs(local_file_dir)
|
||||
|
||||
# 下载文件
|
||||
minio_client.fget_object(bucket_name, obj.object_name, local_file_path)
|
||||
print(f"Downloaded {obj.object_name} to {local_file_path}")
|
||||
|
||||
except S3Error as e:
|
||||
print(f"Error occurred: {e}")
|
||||
|
||||
|
||||
# 使用示例
|
||||
bucket_name = "test" # 替换成你的bucket名称
|
||||
folder_name = "checkpoints/lineart/" # 权重文件夹的路径
|
||||
local_dir = "app/service/image2sketch_2" # 替换成你希望保存到的本地目录
|
||||
|
||||
download_folder(bucket_name, folder_name, local_dir)
|
||||
142
app/service/image2sketch_2/server.py
Normal file
142
app/service/image2sketch_2/server.py
Normal file
@@ -0,0 +1,142 @@
|
||||
import cv2
|
||||
import numpy
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torchvision.transforms as transforms
|
||||
from PIL import Image
|
||||
|
||||
from app.service.utils.oss_client import oss_get_image, oss_upload_image
|
||||
|
||||
norm_layer = nn.InstanceNorm2d
|
||||
|
||||
weights = [(0.7, 0.3), (0.5, 0.5), (0.3, 0.7), (0.1, 0.9), (0, 1)]
|
||||
kernel = np.ones((3, 3), np.uint8)
|
||||
|
||||
|
||||
class ResidualBlock(nn.Module):
|
||||
def __init__(self, in_features):
|
||||
super(ResidualBlock, self).__init__()
|
||||
|
||||
conv_block = [nn.ReflectionPad2d(1),
|
||||
nn.Conv2d(in_features, in_features, 3),
|
||||
norm_layer(in_features),
|
||||
nn.ReLU(inplace=True),
|
||||
nn.ReflectionPad2d(1),
|
||||
nn.Conv2d(in_features, in_features, 3),
|
||||
norm_layer(in_features)
|
||||
]
|
||||
|
||||
self.conv_block = nn.Sequential(*conv_block)
|
||||
|
||||
def forward(self, x):
|
||||
return x + self.conv_block(x)
|
||||
|
||||
|
||||
class Generator(nn.Module):
|
||||
def __init__(self, input_nc, output_nc, n_residual_blocks=9, sigmoid=True):
|
||||
super(Generator, self).__init__()
|
||||
|
||||
# Initial convolution block
|
||||
model0 = [nn.ReflectionPad2d(3),
|
||||
nn.Conv2d(input_nc, 64, 7),
|
||||
norm_layer(64),
|
||||
nn.ReLU(inplace=True)]
|
||||
self.model0 = nn.Sequential(*model0)
|
||||
|
||||
# Downsampling
|
||||
model1 = []
|
||||
in_features = 64
|
||||
out_features = in_features * 2
|
||||
for _ in range(2):
|
||||
model1 += [nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
|
||||
norm_layer(out_features),
|
||||
nn.ReLU(inplace=True)]
|
||||
in_features = out_features
|
||||
out_features = in_features * 2
|
||||
self.model1 = nn.Sequential(*model1)
|
||||
|
||||
model2 = []
|
||||
# Residual blocks
|
||||
for _ in range(n_residual_blocks):
|
||||
model2 += [ResidualBlock(in_features)]
|
||||
self.model2 = nn.Sequential(*model2)
|
||||
|
||||
# Upsampling
|
||||
model3 = []
|
||||
out_features = in_features // 2
|
||||
for _ in range(2):
|
||||
model3 += [nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1),
|
||||
norm_layer(out_features),
|
||||
nn.ReLU(inplace=True)]
|
||||
in_features = out_features
|
||||
out_features = in_features // 2
|
||||
self.model3 = nn.Sequential(*model3)
|
||||
|
||||
# Output layer
|
||||
model4 = [nn.ReflectionPad2d(3),
|
||||
nn.Conv2d(64, output_nc, 7)]
|
||||
if sigmoid:
|
||||
model4 += [nn.Sigmoid()]
|
||||
|
||||
self.model4 = nn.Sequential(*model4)
|
||||
|
||||
def forward(self, x, cond=None):
|
||||
out = self.model0(x)
|
||||
out = self.model1(out)
|
||||
out = self.model2(out)
|
||||
out = self.model3(out)
|
||||
out = self.model4(out)
|
||||
|
||||
return out
|
||||
|
||||
|
||||
model1 = Generator(3, 1, 3)
|
||||
model1.load_state_dict(torch.load('service/image2sketch_2/model.pth', map_location=torch.device('cpu')))
|
||||
model1.eval()
|
||||
|
||||
|
||||
def predict(input_img, width):
|
||||
transform = transforms.Compose([transforms.Resize(width, Image.BICUBIC), transforms.ToTensor()])
|
||||
input_img = transform(input_img)
|
||||
input_img = torch.unsqueeze(input_img, 0)
|
||||
|
||||
with torch.no_grad():
|
||||
drawing = model1(input_img)[0].detach()
|
||||
|
||||
drawing = transforms.ToPILImage()(drawing)
|
||||
|
||||
# 转ndarray
|
||||
drawing = numpy.array(drawing)
|
||||
return drawing
|
||||
|
||||
|
||||
def get_image(image_url):
|
||||
image = oss_get_image(bucket=image_url.split('/')[0], object_name=image_url[image_url.find('/') + 1:], data_type="PIL")
|
||||
image = image.convert('RGB')
|
||||
width = image.size[0]
|
||||
height = image.size[1]
|
||||
return image, width, height
|
||||
|
||||
|
||||
def processing_pipeline(image_url, thickness, sketch_bucket, sketch_name):
|
||||
thickness = int(thickness)
|
||||
# 提取sketch
|
||||
image, width, height = get_image(image_url)
|
||||
sketch_image = predict(image, width)
|
||||
|
||||
# 设定线条粗细
|
||||
if thickness != 0:
|
||||
dilated = cv2.erode(sketch_image, kernel, iterations=1)
|
||||
# 将原图与膨胀后的图像进行混合,使用不同的权重
|
||||
sketch_image = cv2.addWeighted(sketch_image, weights[thickness][0], dilated, weights[thickness][1], 0)
|
||||
|
||||
# 上传minio
|
||||
image_bytes = cv2.imencode(".jpg", sketch_image)[1].tobytes()
|
||||
req = oss_upload_image(bucket=sketch_bucket, object_name=sketch_name, image_bytes=image_bytes)
|
||||
return f"{req.bucket_name}/{req.object_name}"
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
result_url = processing_pipeline("aida-users/89/relight_image/d5f0d967-f8e8-424d-98f9-a8ad8313deec-0-89.png", 1, "test", "test123.jpg")
|
||||
print(result_url)
|
||||
@@ -9,6 +9,7 @@ from PIL import Image
|
||||
from minio import Minio
|
||||
|
||||
from app.core.config import *
|
||||
from app.service.utils.decorator import RunTime
|
||||
|
||||
minio_client = Minio(MINIO_URL, access_key=MINIO_ACCESS, secret_key=MINIO_SECRET, secure=MINIO_SECURE)
|
||||
|
||||
@@ -39,6 +40,7 @@ http_client = urllib3.PoolManager(
|
||||
|
||||
|
||||
# 获取图片
|
||||
@RunTime
|
||||
def oss_get_image(oss_client, bucket, object_name, data_type):
|
||||
# cv2 默认全通道读取
|
||||
image_object = None
|
||||
@@ -58,6 +60,7 @@ def oss_get_image(oss_client, bucket, object_name, data_type):
|
||||
return image_object
|
||||
|
||||
|
||||
@RunTime
|
||||
def oss_upload_image(oss_client, bucket, object_name, image_bytes):
|
||||
req = None
|
||||
try:
|
||||
|
||||
Reference in New Issue
Block a user