Add codeformer and update license
This commit is contained in:
100
facelib/detection/__init__.py
Normal file
100
facelib/detection/__init__.py
Normal file
@@ -0,0 +1,100 @@
|
||||
import os
|
||||
import torch
|
||||
from torch import nn
|
||||
from copy import deepcopy
|
||||
|
||||
from facelib.utils import load_file_from_url
|
||||
from facelib.utils import download_pretrained_models
|
||||
from facelib.detection.yolov5face.models.common import Conv
|
||||
|
||||
from .retinaface.retinaface import RetinaFace
|
||||
from .yolov5face.face_detector import YoloDetector
|
||||
|
||||
|
||||
def init_detection_model(model_name, half=False, device='cuda'):
|
||||
if 'retinaface' in model_name:
|
||||
model = init_retinaface_model(model_name, half, device)
|
||||
elif 'YOLOv5' in model_name:
|
||||
model = init_yolov5face_model(model_name, device)
|
||||
else:
|
||||
raise NotImplementedError(f'{model_name} is not implemented.')
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def init_retinaface_model(model_name, half=False, device='cuda'):
|
||||
if model_name == 'retinaface_resnet50':
|
||||
model = RetinaFace(network_name='resnet50', half=half)
|
||||
model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/detection_Resnet50_Final.pth'
|
||||
elif model_name == 'retinaface_mobile0.25':
|
||||
model = RetinaFace(network_name='mobile0.25', half=half)
|
||||
model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/detection_mobilenet0.25_Final.pth'
|
||||
else:
|
||||
raise NotImplementedError(f'{model_name} is not implemented.')
|
||||
|
||||
model_path = load_file_from_url(url=model_url, model_dir='weights/facelib', progress=True, file_name=None)
|
||||
load_net = torch.load(model_path, map_location=lambda storage, loc: storage)
|
||||
# remove unnecessary 'module.'
|
||||
for k, v in deepcopy(load_net).items():
|
||||
if k.startswith('module.'):
|
||||
load_net[k[7:]] = v
|
||||
load_net.pop(k)
|
||||
model.load_state_dict(load_net, strict=True)
|
||||
model.eval()
|
||||
model = model.to(device)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def init_yolov5face_model(model_name, device='cuda'):
|
||||
if model_name == 'YOLOv5l':
|
||||
model = YoloDetector(config_name='facelib/detection/yolov5face/models/yolov5l.yaml', device=device)
|
||||
model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/yolov5l-face.pth'
|
||||
elif model_name == 'YOLOv5n':
|
||||
model = YoloDetector(config_name='facelib/detection/yolov5face/models/yolov5n.yaml', device=device)
|
||||
model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/yolov5n-face.pth'
|
||||
else:
|
||||
raise NotImplementedError(f'{model_name} is not implemented.')
|
||||
|
||||
model_path = load_file_from_url(url=model_url, model_dir='weights/facelib', progress=True, file_name=None)
|
||||
load_net = torch.load(model_path, map_location=lambda storage, loc: storage)
|
||||
model.detector.load_state_dict(load_net, strict=True)
|
||||
model.detector.eval()
|
||||
model.detector = model.detector.to(device).float()
|
||||
|
||||
for m in model.detector.modules():
|
||||
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
|
||||
m.inplace = True # pytorch 1.7.0 compatibility
|
||||
elif isinstance(m, Conv):
|
||||
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
|
||||
|
||||
return model
|
||||
|
||||
|
||||
# Download from Google Drive
|
||||
# def init_yolov5face_model(model_name, device='cuda'):
|
||||
# if model_name == 'YOLOv5l':
|
||||
# model = YoloDetector(config_name='facelib/detection/yolov5face/models/yolov5l.yaml', device=device)
|
||||
# f_id = {'yolov5l-face.pth': '131578zMA6B2x8VQHyHfa6GEPtulMCNzV'}
|
||||
# elif model_name == 'YOLOv5n':
|
||||
# model = YoloDetector(config_name='facelib/detection/yolov5face/models/yolov5n.yaml', device=device)
|
||||
# f_id = {'yolov5n-face.pth': '1fhcpFvWZqghpGXjYPIne2sw1Fy4yhw6o'}
|
||||
# else:
|
||||
# raise NotImplementedError(f'{model_name} is not implemented.')
|
||||
|
||||
# model_path = os.path.join('weights/facelib', list(f_id.keys())[0])
|
||||
# if not os.path.exists(model_path):
|
||||
# download_pretrained_models(file_ids=f_id, save_path_root='weights/facelib')
|
||||
|
||||
# load_net = torch.load(model_path, map_location=lambda storage, loc: storage)
|
||||
# model.detector.load_state_dict(load_net, strict=True)
|
||||
# model.detector.eval()
|
||||
# model.detector = model.detector.to(device).float()
|
||||
|
||||
# for m in model.detector.modules():
|
||||
# if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
|
||||
# m.inplace = True # pytorch 1.7.0 compatibility
|
||||
# elif isinstance(m, Conv):
|
||||
# m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
|
||||
|
||||
# return model
|
||||
219
facelib/detection/align_trans.py
Normal file
219
facelib/detection/align_trans.py
Normal file
@@ -0,0 +1,219 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from .matlab_cp2tform import get_similarity_transform_for_cv2
|
||||
|
||||
# reference facial points, a list of coordinates (x,y)
|
||||
REFERENCE_FACIAL_POINTS = [[30.29459953, 51.69630051], [65.53179932, 51.50139999], [48.02519989, 71.73660278],
|
||||
[33.54930115, 92.3655014], [62.72990036, 92.20410156]]
|
||||
|
||||
DEFAULT_CROP_SIZE = (96, 112)
|
||||
|
||||
|
||||
class FaceWarpException(Exception):
|
||||
|
||||
def __str__(self):
|
||||
return 'In File {}:{}'.format(__file__, super.__str__(self))
|
||||
|
||||
|
||||
def get_reference_facial_points(output_size=None, inner_padding_factor=0.0, outer_padding=(0, 0), default_square=False):
|
||||
"""
|
||||
Function:
|
||||
----------
|
||||
get reference 5 key points according to crop settings:
|
||||
0. Set default crop_size:
|
||||
if default_square:
|
||||
crop_size = (112, 112)
|
||||
else:
|
||||
crop_size = (96, 112)
|
||||
1. Pad the crop_size by inner_padding_factor in each side;
|
||||
2. Resize crop_size into (output_size - outer_padding*2),
|
||||
pad into output_size with outer_padding;
|
||||
3. Output reference_5point;
|
||||
Parameters:
|
||||
----------
|
||||
@output_size: (w, h) or None
|
||||
size of aligned face image
|
||||
@inner_padding_factor: (w_factor, h_factor)
|
||||
padding factor for inner (w, h)
|
||||
@outer_padding: (w_pad, h_pad)
|
||||
each row is a pair of coordinates (x, y)
|
||||
@default_square: True or False
|
||||
if True:
|
||||
default crop_size = (112, 112)
|
||||
else:
|
||||
default crop_size = (96, 112);
|
||||
!!! make sure, if output_size is not None:
|
||||
(output_size - outer_padding)
|
||||
= some_scale * (default crop_size * (1.0 +
|
||||
inner_padding_factor))
|
||||
Returns:
|
||||
----------
|
||||
@reference_5point: 5x2 np.array
|
||||
each row is a pair of transformed coordinates (x, y)
|
||||
"""
|
||||
|
||||
tmp_5pts = np.array(REFERENCE_FACIAL_POINTS)
|
||||
tmp_crop_size = np.array(DEFAULT_CROP_SIZE)
|
||||
|
||||
# 0) make the inner region a square
|
||||
if default_square:
|
||||
size_diff = max(tmp_crop_size) - tmp_crop_size
|
||||
tmp_5pts += size_diff / 2
|
||||
tmp_crop_size += size_diff
|
||||
|
||||
if (output_size and output_size[0] == tmp_crop_size[0] and output_size[1] == tmp_crop_size[1]):
|
||||
|
||||
return tmp_5pts
|
||||
|
||||
if (inner_padding_factor == 0 and outer_padding == (0, 0)):
|
||||
if output_size is None:
|
||||
return tmp_5pts
|
||||
else:
|
||||
raise FaceWarpException('No paddings to do, output_size must be None or {}'.format(tmp_crop_size))
|
||||
|
||||
# check output size
|
||||
if not (0 <= inner_padding_factor <= 1.0):
|
||||
raise FaceWarpException('Not (0 <= inner_padding_factor <= 1.0)')
|
||||
|
||||
if ((inner_padding_factor > 0 or outer_padding[0] > 0 or outer_padding[1] > 0) and output_size is None):
|
||||
output_size = tmp_crop_size * \
|
||||
(1 + inner_padding_factor * 2).astype(np.int32)
|
||||
output_size += np.array(outer_padding)
|
||||
if not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1]):
|
||||
raise FaceWarpException('Not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1])')
|
||||
|
||||
# 1) pad the inner region according inner_padding_factor
|
||||
if inner_padding_factor > 0:
|
||||
size_diff = tmp_crop_size * inner_padding_factor * 2
|
||||
tmp_5pts += size_diff / 2
|
||||
tmp_crop_size += np.round(size_diff).astype(np.int32)
|
||||
|
||||
# 2) resize the padded inner region
|
||||
size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2
|
||||
|
||||
if size_bf_outer_pad[0] * tmp_crop_size[1] != size_bf_outer_pad[1] * tmp_crop_size[0]:
|
||||
raise FaceWarpException('Must have (output_size - outer_padding)'
|
||||
'= some_scale * (crop_size * (1.0 + inner_padding_factor)')
|
||||
|
||||
scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0]
|
||||
tmp_5pts = tmp_5pts * scale_factor
|
||||
# size_diff = tmp_crop_size * (scale_factor - min(scale_factor))
|
||||
# tmp_5pts = tmp_5pts + size_diff / 2
|
||||
tmp_crop_size = size_bf_outer_pad
|
||||
|
||||
# 3) add outer_padding to make output_size
|
||||
reference_5point = tmp_5pts + np.array(outer_padding)
|
||||
tmp_crop_size = output_size
|
||||
|
||||
return reference_5point
|
||||
|
||||
|
||||
def get_affine_transform_matrix(src_pts, dst_pts):
|
||||
"""
|
||||
Function:
|
||||
----------
|
||||
get affine transform matrix 'tfm' from src_pts to dst_pts
|
||||
Parameters:
|
||||
----------
|
||||
@src_pts: Kx2 np.array
|
||||
source points matrix, each row is a pair of coordinates (x, y)
|
||||
@dst_pts: Kx2 np.array
|
||||
destination points matrix, each row is a pair of coordinates (x, y)
|
||||
Returns:
|
||||
----------
|
||||
@tfm: 2x3 np.array
|
||||
transform matrix from src_pts to dst_pts
|
||||
"""
|
||||
|
||||
tfm = np.float32([[1, 0, 0], [0, 1, 0]])
|
||||
n_pts = src_pts.shape[0]
|
||||
ones = np.ones((n_pts, 1), src_pts.dtype)
|
||||
src_pts_ = np.hstack([src_pts, ones])
|
||||
dst_pts_ = np.hstack([dst_pts, ones])
|
||||
|
||||
A, res, rank, s = np.linalg.lstsq(src_pts_, dst_pts_)
|
||||
|
||||
if rank == 3:
|
||||
tfm = np.float32([[A[0, 0], A[1, 0], A[2, 0]], [A[0, 1], A[1, 1], A[2, 1]]])
|
||||
elif rank == 2:
|
||||
tfm = np.float32([[A[0, 0], A[1, 0], 0], [A[0, 1], A[1, 1], 0]])
|
||||
|
||||
return tfm
|
||||
|
||||
|
||||
def warp_and_crop_face(src_img, facial_pts, reference_pts=None, crop_size=(96, 112), align_type='smilarity'):
|
||||
"""
|
||||
Function:
|
||||
----------
|
||||
apply affine transform 'trans' to uv
|
||||
Parameters:
|
||||
----------
|
||||
@src_img: 3x3 np.array
|
||||
input image
|
||||
@facial_pts: could be
|
||||
1)a list of K coordinates (x,y)
|
||||
or
|
||||
2) Kx2 or 2xK np.array
|
||||
each row or col is a pair of coordinates (x, y)
|
||||
@reference_pts: could be
|
||||
1) a list of K coordinates (x,y)
|
||||
or
|
||||
2) Kx2 or 2xK np.array
|
||||
each row or col is a pair of coordinates (x, y)
|
||||
or
|
||||
3) None
|
||||
if None, use default reference facial points
|
||||
@crop_size: (w, h)
|
||||
output face image size
|
||||
@align_type: transform type, could be one of
|
||||
1) 'similarity': use similarity transform
|
||||
2) 'cv2_affine': use the first 3 points to do affine transform,
|
||||
by calling cv2.getAffineTransform()
|
||||
3) 'affine': use all points to do affine transform
|
||||
Returns:
|
||||
----------
|
||||
@face_img: output face image with size (w, h) = @crop_size
|
||||
"""
|
||||
|
||||
if reference_pts is None:
|
||||
if crop_size[0] == 96 and crop_size[1] == 112:
|
||||
reference_pts = REFERENCE_FACIAL_POINTS
|
||||
else:
|
||||
default_square = False
|
||||
inner_padding_factor = 0
|
||||
outer_padding = (0, 0)
|
||||
output_size = crop_size
|
||||
|
||||
reference_pts = get_reference_facial_points(output_size, inner_padding_factor, outer_padding,
|
||||
default_square)
|
||||
|
||||
ref_pts = np.float32(reference_pts)
|
||||
ref_pts_shp = ref_pts.shape
|
||||
if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2:
|
||||
raise FaceWarpException('reference_pts.shape must be (K,2) or (2,K) and K>2')
|
||||
|
||||
if ref_pts_shp[0] == 2:
|
||||
ref_pts = ref_pts.T
|
||||
|
||||
src_pts = np.float32(facial_pts)
|
||||
src_pts_shp = src_pts.shape
|
||||
if max(src_pts_shp) < 3 or min(src_pts_shp) != 2:
|
||||
raise FaceWarpException('facial_pts.shape must be (K,2) or (2,K) and K>2')
|
||||
|
||||
if src_pts_shp[0] == 2:
|
||||
src_pts = src_pts.T
|
||||
|
||||
if src_pts.shape != ref_pts.shape:
|
||||
raise FaceWarpException('facial_pts and reference_pts must have the same shape')
|
||||
|
||||
if align_type == 'cv2_affine':
|
||||
tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])
|
||||
elif align_type == 'affine':
|
||||
tfm = get_affine_transform_matrix(src_pts, ref_pts)
|
||||
else:
|
||||
tfm = get_similarity_transform_for_cv2(src_pts, ref_pts)
|
||||
|
||||
face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]))
|
||||
|
||||
return face_img
|
||||
317
facelib/detection/matlab_cp2tform.py
Normal file
317
facelib/detection/matlab_cp2tform.py
Normal file
@@ -0,0 +1,317 @@
|
||||
import numpy as np
|
||||
from numpy.linalg import inv, lstsq
|
||||
from numpy.linalg import matrix_rank as rank
|
||||
from numpy.linalg import norm
|
||||
|
||||
|
||||
class MatlabCp2tormException(Exception):
|
||||
|
||||
def __str__(self):
|
||||
return 'In File {}:{}'.format(__file__, super.__str__(self))
|
||||
|
||||
|
||||
def tformfwd(trans, uv):
|
||||
"""
|
||||
Function:
|
||||
----------
|
||||
apply affine transform 'trans' to uv
|
||||
|
||||
Parameters:
|
||||
----------
|
||||
@trans: 3x3 np.array
|
||||
transform matrix
|
||||
@uv: Kx2 np.array
|
||||
each row is a pair of coordinates (x, y)
|
||||
|
||||
Returns:
|
||||
----------
|
||||
@xy: Kx2 np.array
|
||||
each row is a pair of transformed coordinates (x, y)
|
||||
"""
|
||||
uv = np.hstack((uv, np.ones((uv.shape[0], 1))))
|
||||
xy = np.dot(uv, trans)
|
||||
xy = xy[:, 0:-1]
|
||||
return xy
|
||||
|
||||
|
||||
def tforminv(trans, uv):
|
||||
"""
|
||||
Function:
|
||||
----------
|
||||
apply the inverse of affine transform 'trans' to uv
|
||||
|
||||
Parameters:
|
||||
----------
|
||||
@trans: 3x3 np.array
|
||||
transform matrix
|
||||
@uv: Kx2 np.array
|
||||
each row is a pair of coordinates (x, y)
|
||||
|
||||
Returns:
|
||||
----------
|
||||
@xy: Kx2 np.array
|
||||
each row is a pair of inverse-transformed coordinates (x, y)
|
||||
"""
|
||||
Tinv = inv(trans)
|
||||
xy = tformfwd(Tinv, uv)
|
||||
return xy
|
||||
|
||||
|
||||
def findNonreflectiveSimilarity(uv, xy, options=None):
|
||||
options = {'K': 2}
|
||||
|
||||
K = options['K']
|
||||
M = xy.shape[0]
|
||||
x = xy[:, 0].reshape((-1, 1)) # use reshape to keep a column vector
|
||||
y = xy[:, 1].reshape((-1, 1)) # use reshape to keep a column vector
|
||||
|
||||
tmp1 = np.hstack((x, y, np.ones((M, 1)), np.zeros((M, 1))))
|
||||
tmp2 = np.hstack((y, -x, np.zeros((M, 1)), np.ones((M, 1))))
|
||||
X = np.vstack((tmp1, tmp2))
|
||||
|
||||
u = uv[:, 0].reshape((-1, 1)) # use reshape to keep a column vector
|
||||
v = uv[:, 1].reshape((-1, 1)) # use reshape to keep a column vector
|
||||
U = np.vstack((u, v))
|
||||
|
||||
# We know that X * r = U
|
||||
if rank(X) >= 2 * K:
|
||||
r, _, _, _ = lstsq(X, U, rcond=-1)
|
||||
r = np.squeeze(r)
|
||||
else:
|
||||
raise Exception('cp2tform:twoUniquePointsReq')
|
||||
sc = r[0]
|
||||
ss = r[1]
|
||||
tx = r[2]
|
||||
ty = r[3]
|
||||
|
||||
Tinv = np.array([[sc, -ss, 0], [ss, sc, 0], [tx, ty, 1]])
|
||||
T = inv(Tinv)
|
||||
T[:, 2] = np.array([0, 0, 1])
|
||||
|
||||
return T, Tinv
|
||||
|
||||
|
||||
def findSimilarity(uv, xy, options=None):
|
||||
options = {'K': 2}
|
||||
|
||||
# uv = np.array(uv)
|
||||
# xy = np.array(xy)
|
||||
|
||||
# Solve for trans1
|
||||
trans1, trans1_inv = findNonreflectiveSimilarity(uv, xy, options)
|
||||
|
||||
# Solve for trans2
|
||||
|
||||
# manually reflect the xy data across the Y-axis
|
||||
xyR = xy
|
||||
xyR[:, 0] = -1 * xyR[:, 0]
|
||||
|
||||
trans2r, trans2r_inv = findNonreflectiveSimilarity(uv, xyR, options)
|
||||
|
||||
# manually reflect the tform to undo the reflection done on xyR
|
||||
TreflectY = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])
|
||||
|
||||
trans2 = np.dot(trans2r, TreflectY)
|
||||
|
||||
# Figure out if trans1 or trans2 is better
|
||||
xy1 = tformfwd(trans1, uv)
|
||||
norm1 = norm(xy1 - xy)
|
||||
|
||||
xy2 = tformfwd(trans2, uv)
|
||||
norm2 = norm(xy2 - xy)
|
||||
|
||||
if norm1 <= norm2:
|
||||
return trans1, trans1_inv
|
||||
else:
|
||||
trans2_inv = inv(trans2)
|
||||
return trans2, trans2_inv
|
||||
|
||||
|
||||
def get_similarity_transform(src_pts, dst_pts, reflective=True):
|
||||
"""
|
||||
Function:
|
||||
----------
|
||||
Find Similarity Transform Matrix 'trans':
|
||||
u = src_pts[:, 0]
|
||||
v = src_pts[:, 1]
|
||||
x = dst_pts[:, 0]
|
||||
y = dst_pts[:, 1]
|
||||
[x, y, 1] = [u, v, 1] * trans
|
||||
|
||||
Parameters:
|
||||
----------
|
||||
@src_pts: Kx2 np.array
|
||||
source points, each row is a pair of coordinates (x, y)
|
||||
@dst_pts: Kx2 np.array
|
||||
destination points, each row is a pair of transformed
|
||||
coordinates (x, y)
|
||||
@reflective: True or False
|
||||
if True:
|
||||
use reflective similarity transform
|
||||
else:
|
||||
use non-reflective similarity transform
|
||||
|
||||
Returns:
|
||||
----------
|
||||
@trans: 3x3 np.array
|
||||
transform matrix from uv to xy
|
||||
trans_inv: 3x3 np.array
|
||||
inverse of trans, transform matrix from xy to uv
|
||||
"""
|
||||
|
||||
if reflective:
|
||||
trans, trans_inv = findSimilarity(src_pts, dst_pts)
|
||||
else:
|
||||
trans, trans_inv = findNonreflectiveSimilarity(src_pts, dst_pts)
|
||||
|
||||
return trans, trans_inv
|
||||
|
||||
|
||||
def cvt_tform_mat_for_cv2(trans):
|
||||
"""
|
||||
Function:
|
||||
----------
|
||||
Convert Transform Matrix 'trans' into 'cv2_trans' which could be
|
||||
directly used by cv2.warpAffine():
|
||||
u = src_pts[:, 0]
|
||||
v = src_pts[:, 1]
|
||||
x = dst_pts[:, 0]
|
||||
y = dst_pts[:, 1]
|
||||
[x, y].T = cv_trans * [u, v, 1].T
|
||||
|
||||
Parameters:
|
||||
----------
|
||||
@trans: 3x3 np.array
|
||||
transform matrix from uv to xy
|
||||
|
||||
Returns:
|
||||
----------
|
||||
@cv2_trans: 2x3 np.array
|
||||
transform matrix from src_pts to dst_pts, could be directly used
|
||||
for cv2.warpAffine()
|
||||
"""
|
||||
cv2_trans = trans[:, 0:2].T
|
||||
|
||||
return cv2_trans
|
||||
|
||||
|
||||
def get_similarity_transform_for_cv2(src_pts, dst_pts, reflective=True):
|
||||
"""
|
||||
Function:
|
||||
----------
|
||||
Find Similarity Transform Matrix 'cv2_trans' which could be
|
||||
directly used by cv2.warpAffine():
|
||||
u = src_pts[:, 0]
|
||||
v = src_pts[:, 1]
|
||||
x = dst_pts[:, 0]
|
||||
y = dst_pts[:, 1]
|
||||
[x, y].T = cv_trans * [u, v, 1].T
|
||||
|
||||
Parameters:
|
||||
----------
|
||||
@src_pts: Kx2 np.array
|
||||
source points, each row is a pair of coordinates (x, y)
|
||||
@dst_pts: Kx2 np.array
|
||||
destination points, each row is a pair of transformed
|
||||
coordinates (x, y)
|
||||
reflective: True or False
|
||||
if True:
|
||||
use reflective similarity transform
|
||||
else:
|
||||
use non-reflective similarity transform
|
||||
|
||||
Returns:
|
||||
----------
|
||||
@cv2_trans: 2x3 np.array
|
||||
transform matrix from src_pts to dst_pts, could be directly used
|
||||
for cv2.warpAffine()
|
||||
"""
|
||||
trans, trans_inv = get_similarity_transform(src_pts, dst_pts, reflective)
|
||||
cv2_trans = cvt_tform_mat_for_cv2(trans)
|
||||
|
||||
return cv2_trans
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
"""
|
||||
u = [0, 6, -2]
|
||||
v = [0, 3, 5]
|
||||
x = [-1, 0, 4]
|
||||
y = [-1, -10, 4]
|
||||
|
||||
# In Matlab, run:
|
||||
#
|
||||
# uv = [u'; v'];
|
||||
# xy = [x'; y'];
|
||||
# tform_sim=cp2tform(uv,xy,'similarity');
|
||||
#
|
||||
# trans = tform_sim.tdata.T
|
||||
# ans =
|
||||
# -0.0764 -1.6190 0
|
||||
# 1.6190 -0.0764 0
|
||||
# -3.2156 0.0290 1.0000
|
||||
# trans_inv = tform_sim.tdata.Tinv
|
||||
# ans =
|
||||
#
|
||||
# -0.0291 0.6163 0
|
||||
# -0.6163 -0.0291 0
|
||||
# -0.0756 1.9826 1.0000
|
||||
# xy_m=tformfwd(tform_sim, u,v)
|
||||
#
|
||||
# xy_m =
|
||||
#
|
||||
# -3.2156 0.0290
|
||||
# 1.1833 -9.9143
|
||||
# 5.0323 2.8853
|
||||
# uv_m=tforminv(tform_sim, x,y)
|
||||
#
|
||||
# uv_m =
|
||||
#
|
||||
# 0.5698 1.3953
|
||||
# 6.0872 2.2733
|
||||
# -2.6570 4.3314
|
||||
"""
|
||||
u = [0, 6, -2]
|
||||
v = [0, 3, 5]
|
||||
x = [-1, 0, 4]
|
||||
y = [-1, -10, 4]
|
||||
|
||||
uv = np.array((u, v)).T
|
||||
xy = np.array((x, y)).T
|
||||
|
||||
print('\n--->uv:')
|
||||
print(uv)
|
||||
print('\n--->xy:')
|
||||
print(xy)
|
||||
|
||||
trans, trans_inv = get_similarity_transform(uv, xy)
|
||||
|
||||
print('\n--->trans matrix:')
|
||||
print(trans)
|
||||
|
||||
print('\n--->trans_inv matrix:')
|
||||
print(trans_inv)
|
||||
|
||||
print('\n---> apply transform to uv')
|
||||
print('\nxy_m = uv_augmented * trans')
|
||||
uv_aug = np.hstack((uv, np.ones((uv.shape[0], 1))))
|
||||
xy_m = np.dot(uv_aug, trans)
|
||||
print(xy_m)
|
||||
|
||||
print('\nxy_m = tformfwd(trans, uv)')
|
||||
xy_m = tformfwd(trans, uv)
|
||||
print(xy_m)
|
||||
|
||||
print('\n---> apply inverse transform to xy')
|
||||
print('\nuv_m = xy_augmented * trans_inv')
|
||||
xy_aug = np.hstack((xy, np.ones((xy.shape[0], 1))))
|
||||
uv_m = np.dot(xy_aug, trans_inv)
|
||||
print(uv_m)
|
||||
|
||||
print('\nuv_m = tformfwd(trans_inv, xy)')
|
||||
uv_m = tformfwd(trans_inv, xy)
|
||||
print(uv_m)
|
||||
|
||||
uv_m = tforminv(trans, xy)
|
||||
print('\nuv_m = tforminv(trans, xy)')
|
||||
print(uv_m)
|
||||
370
facelib/detection/retinaface/retinaface.py
Normal file
370
facelib/detection/retinaface/retinaface.py
Normal file
@@ -0,0 +1,370 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from PIL import Image
|
||||
from torchvision.models._utils import IntermediateLayerGetter as IntermediateLayerGetter
|
||||
|
||||
from facelib.detection.align_trans import get_reference_facial_points, warp_and_crop_face
|
||||
from facelib.detection.retinaface.retinaface_net import FPN, SSH, MobileNetV1, make_bbox_head, make_class_head, make_landmark_head
|
||||
from facelib.detection.retinaface.retinaface_utils import (PriorBox, batched_decode, batched_decode_landm, decode, decode_landm,
|
||||
py_cpu_nms)
|
||||
|
||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
|
||||
|
||||
def generate_config(network_name):
|
||||
|
||||
cfg_mnet = {
|
||||
'name': 'mobilenet0.25',
|
||||
'min_sizes': [[16, 32], [64, 128], [256, 512]],
|
||||
'steps': [8, 16, 32],
|
||||
'variance': [0.1, 0.2],
|
||||
'clip': False,
|
||||
'loc_weight': 2.0,
|
||||
'gpu_train': True,
|
||||
'batch_size': 32,
|
||||
'ngpu': 1,
|
||||
'epoch': 250,
|
||||
'decay1': 190,
|
||||
'decay2': 220,
|
||||
'image_size': 640,
|
||||
'return_layers': {
|
||||
'stage1': 1,
|
||||
'stage2': 2,
|
||||
'stage3': 3
|
||||
},
|
||||
'in_channel': 32,
|
||||
'out_channel': 64
|
||||
}
|
||||
|
||||
cfg_re50 = {
|
||||
'name': 'Resnet50',
|
||||
'min_sizes': [[16, 32], [64, 128], [256, 512]],
|
||||
'steps': [8, 16, 32],
|
||||
'variance': [0.1, 0.2],
|
||||
'clip': False,
|
||||
'loc_weight': 2.0,
|
||||
'gpu_train': True,
|
||||
'batch_size': 24,
|
||||
'ngpu': 4,
|
||||
'epoch': 100,
|
||||
'decay1': 70,
|
||||
'decay2': 90,
|
||||
'image_size': 840,
|
||||
'return_layers': {
|
||||
'layer2': 1,
|
||||
'layer3': 2,
|
||||
'layer4': 3
|
||||
},
|
||||
'in_channel': 256,
|
||||
'out_channel': 256
|
||||
}
|
||||
|
||||
if network_name == 'mobile0.25':
|
||||
return cfg_mnet
|
||||
elif network_name == 'resnet50':
|
||||
return cfg_re50
|
||||
else:
|
||||
raise NotImplementedError(f'network_name={network_name}')
|
||||
|
||||
|
||||
class RetinaFace(nn.Module):
|
||||
|
||||
def __init__(self, network_name='resnet50', half=False, phase='test'):
|
||||
super(RetinaFace, self).__init__()
|
||||
self.half_inference = half
|
||||
cfg = generate_config(network_name)
|
||||
self.backbone = cfg['name']
|
||||
|
||||
self.model_name = f'retinaface_{network_name}'
|
||||
self.cfg = cfg
|
||||
self.phase = phase
|
||||
self.target_size, self.max_size = 1600, 2150
|
||||
self.resize, self.scale, self.scale1 = 1., None, None
|
||||
self.mean_tensor = torch.tensor([[[[104.]], [[117.]], [[123.]]]]).to(device)
|
||||
self.reference = get_reference_facial_points(default_square=True)
|
||||
# Build network.
|
||||
backbone = None
|
||||
if cfg['name'] == 'mobilenet0.25':
|
||||
backbone = MobileNetV1()
|
||||
self.body = IntermediateLayerGetter(backbone, cfg['return_layers'])
|
||||
elif cfg['name'] == 'Resnet50':
|
||||
import torchvision.models as models
|
||||
backbone = models.resnet50(pretrained=False)
|
||||
self.body = IntermediateLayerGetter(backbone, cfg['return_layers'])
|
||||
|
||||
in_channels_stage2 = cfg['in_channel']
|
||||
in_channels_list = [
|
||||
in_channels_stage2 * 2,
|
||||
in_channels_stage2 * 4,
|
||||
in_channels_stage2 * 8,
|
||||
]
|
||||
|
||||
out_channels = cfg['out_channel']
|
||||
self.fpn = FPN(in_channels_list, out_channels)
|
||||
self.ssh1 = SSH(out_channels, out_channels)
|
||||
self.ssh2 = SSH(out_channels, out_channels)
|
||||
self.ssh3 = SSH(out_channels, out_channels)
|
||||
|
||||
self.ClassHead = make_class_head(fpn_num=3, inchannels=cfg['out_channel'])
|
||||
self.BboxHead = make_bbox_head(fpn_num=3, inchannels=cfg['out_channel'])
|
||||
self.LandmarkHead = make_landmark_head(fpn_num=3, inchannels=cfg['out_channel'])
|
||||
|
||||
self.to(device)
|
||||
self.eval()
|
||||
if self.half_inference:
|
||||
self.half()
|
||||
|
||||
def forward(self, inputs):
|
||||
out = self.body(inputs)
|
||||
|
||||
if self.backbone == 'mobilenet0.25' or self.backbone == 'Resnet50':
|
||||
out = list(out.values())
|
||||
# FPN
|
||||
fpn = self.fpn(out)
|
||||
|
||||
# SSH
|
||||
feature1 = self.ssh1(fpn[0])
|
||||
feature2 = self.ssh2(fpn[1])
|
||||
feature3 = self.ssh3(fpn[2])
|
||||
features = [feature1, feature2, feature3]
|
||||
|
||||
bbox_regressions = torch.cat([self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1)
|
||||
classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)], dim=1)
|
||||
tmp = [self.LandmarkHead[i](feature) for i, feature in enumerate(features)]
|
||||
ldm_regressions = (torch.cat(tmp, dim=1))
|
||||
|
||||
if self.phase == 'train':
|
||||
output = (bbox_regressions, classifications, ldm_regressions)
|
||||
else:
|
||||
output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions)
|
||||
return output
|
||||
|
||||
def __detect_faces(self, inputs):
|
||||
# get scale
|
||||
height, width = inputs.shape[2:]
|
||||
self.scale = torch.tensor([width, height, width, height], dtype=torch.float32).to(device)
|
||||
tmp = [width, height, width, height, width, height, width, height, width, height]
|
||||
self.scale1 = torch.tensor(tmp, dtype=torch.float32).to(device)
|
||||
|
||||
# forawrd
|
||||
inputs = inputs.to(device)
|
||||
if self.half_inference:
|
||||
inputs = inputs.half()
|
||||
loc, conf, landmarks = self(inputs)
|
||||
|
||||
# get priorbox
|
||||
priorbox = PriorBox(self.cfg, image_size=inputs.shape[2:])
|
||||
priors = priorbox.forward().to(device)
|
||||
|
||||
return loc, conf, landmarks, priors
|
||||
|
||||
# single image detection
|
||||
def transform(self, image, use_origin_size):
|
||||
# convert to opencv format
|
||||
if isinstance(image, Image.Image):
|
||||
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
|
||||
image = image.astype(np.float32)
|
||||
|
||||
# testing scale
|
||||
im_size_min = np.min(image.shape[0:2])
|
||||
im_size_max = np.max(image.shape[0:2])
|
||||
resize = float(self.target_size) / float(im_size_min)
|
||||
|
||||
# prevent bigger axis from being more than max_size
|
||||
if np.round(resize * im_size_max) > self.max_size:
|
||||
resize = float(self.max_size) / float(im_size_max)
|
||||
resize = 1 if use_origin_size else resize
|
||||
|
||||
# resize
|
||||
if resize != 1:
|
||||
image = cv2.resize(image, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
|
||||
|
||||
# convert to torch.tensor format
|
||||
# image -= (104, 117, 123)
|
||||
image = image.transpose(2, 0, 1)
|
||||
image = torch.from_numpy(image).unsqueeze(0)
|
||||
|
||||
return image, resize
|
||||
|
||||
def detect_faces(
|
||||
self,
|
||||
image,
|
||||
conf_threshold=0.8,
|
||||
nms_threshold=0.4,
|
||||
use_origin_size=True,
|
||||
):
|
||||
"""
|
||||
Params:
|
||||
imgs: BGR image
|
||||
"""
|
||||
image, self.resize = self.transform(image, use_origin_size)
|
||||
image = image.to(device)
|
||||
if self.half_inference:
|
||||
image = image.half()
|
||||
image = image - self.mean_tensor
|
||||
|
||||
loc, conf, landmarks, priors = self.__detect_faces(image)
|
||||
|
||||
boxes = decode(loc.data.squeeze(0), priors.data, self.cfg['variance'])
|
||||
boxes = boxes * self.scale / self.resize
|
||||
boxes = boxes.cpu().numpy()
|
||||
|
||||
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
|
||||
|
||||
landmarks = decode_landm(landmarks.squeeze(0), priors, self.cfg['variance'])
|
||||
landmarks = landmarks * self.scale1 / self.resize
|
||||
landmarks = landmarks.cpu().numpy()
|
||||
|
||||
# ignore low scores
|
||||
inds = np.where(scores > conf_threshold)[0]
|
||||
boxes, landmarks, scores = boxes[inds], landmarks[inds], scores[inds]
|
||||
|
||||
# sort
|
||||
order = scores.argsort()[::-1]
|
||||
boxes, landmarks, scores = boxes[order], landmarks[order], scores[order]
|
||||
|
||||
# do NMS
|
||||
bounding_boxes = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
|
||||
keep = py_cpu_nms(bounding_boxes, nms_threshold)
|
||||
bounding_boxes, landmarks = bounding_boxes[keep, :], landmarks[keep]
|
||||
# self.t['forward_pass'].toc()
|
||||
# print(self.t['forward_pass'].average_time)
|
||||
# import sys
|
||||
# sys.stdout.flush()
|
||||
return np.concatenate((bounding_boxes, landmarks), axis=1)
|
||||
|
||||
def __align_multi(self, image, boxes, landmarks, limit=None):
|
||||
|
||||
if len(boxes) < 1:
|
||||
return [], []
|
||||
|
||||
if limit:
|
||||
boxes = boxes[:limit]
|
||||
landmarks = landmarks[:limit]
|
||||
|
||||
faces = []
|
||||
for landmark in landmarks:
|
||||
facial5points = [[landmark[2 * j], landmark[2 * j + 1]] for j in range(5)]
|
||||
|
||||
warped_face = warp_and_crop_face(np.array(image), facial5points, self.reference, crop_size=(112, 112))
|
||||
faces.append(warped_face)
|
||||
|
||||
return np.concatenate((boxes, landmarks), axis=1), faces
|
||||
|
||||
def align_multi(self, img, conf_threshold=0.8, limit=None):
|
||||
|
||||
rlt = self.detect_faces(img, conf_threshold=conf_threshold)
|
||||
boxes, landmarks = rlt[:, 0:5], rlt[:, 5:]
|
||||
|
||||
return self.__align_multi(img, boxes, landmarks, limit)
|
||||
|
||||
# batched detection
|
||||
def batched_transform(self, frames, use_origin_size):
|
||||
"""
|
||||
Arguments:
|
||||
frames: a list of PIL.Image, or torch.Tensor(shape=[n, h, w, c],
|
||||
type=np.float32, BGR format).
|
||||
use_origin_size: whether to use origin size.
|
||||
"""
|
||||
from_PIL = True if isinstance(frames[0], Image.Image) else False
|
||||
|
||||
# convert to opencv format
|
||||
if from_PIL:
|
||||
frames = [cv2.cvtColor(np.asarray(frame), cv2.COLOR_RGB2BGR) for frame in frames]
|
||||
frames = np.asarray(frames, dtype=np.float32)
|
||||
|
||||
# testing scale
|
||||
im_size_min = np.min(frames[0].shape[0:2])
|
||||
im_size_max = np.max(frames[0].shape[0:2])
|
||||
resize = float(self.target_size) / float(im_size_min)
|
||||
|
||||
# prevent bigger axis from being more than max_size
|
||||
if np.round(resize * im_size_max) > self.max_size:
|
||||
resize = float(self.max_size) / float(im_size_max)
|
||||
resize = 1 if use_origin_size else resize
|
||||
|
||||
# resize
|
||||
if resize != 1:
|
||||
if not from_PIL:
|
||||
frames = F.interpolate(frames, scale_factor=resize)
|
||||
else:
|
||||
frames = [
|
||||
cv2.resize(frame, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
|
||||
for frame in frames
|
||||
]
|
||||
|
||||
# convert to torch.tensor format
|
||||
if not from_PIL:
|
||||
frames = frames.transpose(1, 2).transpose(1, 3).contiguous()
|
||||
else:
|
||||
frames = frames.transpose((0, 3, 1, 2))
|
||||
frames = torch.from_numpy(frames)
|
||||
|
||||
return frames, resize
|
||||
|
||||
def batched_detect_faces(self, frames, conf_threshold=0.8, nms_threshold=0.4, use_origin_size=True):
|
||||
"""
|
||||
Arguments:
|
||||
frames: a list of PIL.Image, or np.array(shape=[n, h, w, c],
|
||||
type=np.uint8, BGR format).
|
||||
conf_threshold: confidence threshold.
|
||||
nms_threshold: nms threshold.
|
||||
use_origin_size: whether to use origin size.
|
||||
Returns:
|
||||
final_bounding_boxes: list of np.array ([n_boxes, 5],
|
||||
type=np.float32).
|
||||
final_landmarks: list of np.array ([n_boxes, 10], type=np.float32).
|
||||
"""
|
||||
# self.t['forward_pass'].tic()
|
||||
frames, self.resize = self.batched_transform(frames, use_origin_size)
|
||||
frames = frames.to(device)
|
||||
frames = frames - self.mean_tensor
|
||||
|
||||
b_loc, b_conf, b_landmarks, priors = self.__detect_faces(frames)
|
||||
|
||||
final_bounding_boxes, final_landmarks = [], []
|
||||
|
||||
# decode
|
||||
priors = priors.unsqueeze(0)
|
||||
b_loc = batched_decode(b_loc, priors, self.cfg['variance']) * self.scale / self.resize
|
||||
b_landmarks = batched_decode_landm(b_landmarks, priors, self.cfg['variance']) * self.scale1 / self.resize
|
||||
b_conf = b_conf[:, :, 1]
|
||||
|
||||
# index for selection
|
||||
b_indice = b_conf > conf_threshold
|
||||
|
||||
# concat
|
||||
b_loc_and_conf = torch.cat((b_loc, b_conf.unsqueeze(-1)), dim=2).float()
|
||||
|
||||
for pred, landm, inds in zip(b_loc_and_conf, b_landmarks, b_indice):
|
||||
|
||||
# ignore low scores
|
||||
pred, landm = pred[inds, :], landm[inds, :]
|
||||
if pred.shape[0] == 0:
|
||||
final_bounding_boxes.append(np.array([], dtype=np.float32))
|
||||
final_landmarks.append(np.array([], dtype=np.float32))
|
||||
continue
|
||||
|
||||
# sort
|
||||
# order = score.argsort(descending=True)
|
||||
# box, landm, score = box[order], landm[order], score[order]
|
||||
|
||||
# to CPU
|
||||
bounding_boxes, landm = pred.cpu().numpy(), landm.cpu().numpy()
|
||||
|
||||
# NMS
|
||||
keep = py_cpu_nms(bounding_boxes, nms_threshold)
|
||||
bounding_boxes, landmarks = bounding_boxes[keep, :], landm[keep]
|
||||
|
||||
# append
|
||||
final_bounding_boxes.append(bounding_boxes)
|
||||
final_landmarks.append(landmarks)
|
||||
# self.t['forward_pass'].toc(average=True)
|
||||
# self.batch_time += self.t['forward_pass'].diff
|
||||
# self.total_frame += len(frames)
|
||||
# print(self.batch_time / self.total_frame)
|
||||
|
||||
return final_bounding_boxes, final_landmarks
|
||||
196
facelib/detection/retinaface/retinaface_net.py
Normal file
196
facelib/detection/retinaface/retinaface_net.py
Normal file
@@ -0,0 +1,196 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
def conv_bn(inp, oup, stride=1, leaky=0):
|
||||
return nn.Sequential(
|
||||
nn.Conv2d(inp, oup, 3, stride, 1, bias=False), nn.BatchNorm2d(oup),
|
||||
nn.LeakyReLU(negative_slope=leaky, inplace=True))
|
||||
|
||||
|
||||
def conv_bn_no_relu(inp, oup, stride):
|
||||
return nn.Sequential(
|
||||
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
|
||||
nn.BatchNorm2d(oup),
|
||||
)
|
||||
|
||||
|
||||
def conv_bn1X1(inp, oup, stride, leaky=0):
|
||||
return nn.Sequential(
|
||||
nn.Conv2d(inp, oup, 1, stride, padding=0, bias=False), nn.BatchNorm2d(oup),
|
||||
nn.LeakyReLU(negative_slope=leaky, inplace=True))
|
||||
|
||||
|
||||
def conv_dw(inp, oup, stride, leaky=0.1):
|
||||
return nn.Sequential(
|
||||
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
|
||||
nn.BatchNorm2d(inp),
|
||||
nn.LeakyReLU(negative_slope=leaky, inplace=True),
|
||||
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
|
||||
nn.BatchNorm2d(oup),
|
||||
nn.LeakyReLU(negative_slope=leaky, inplace=True),
|
||||
)
|
||||
|
||||
|
||||
class SSH(nn.Module):
|
||||
|
||||
def __init__(self, in_channel, out_channel):
|
||||
super(SSH, self).__init__()
|
||||
assert out_channel % 4 == 0
|
||||
leaky = 0
|
||||
if (out_channel <= 64):
|
||||
leaky = 0.1
|
||||
self.conv3X3 = conv_bn_no_relu(in_channel, out_channel // 2, stride=1)
|
||||
|
||||
self.conv5X5_1 = conv_bn(in_channel, out_channel // 4, stride=1, leaky=leaky)
|
||||
self.conv5X5_2 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1)
|
||||
|
||||
self.conv7X7_2 = conv_bn(out_channel // 4, out_channel // 4, stride=1, leaky=leaky)
|
||||
self.conv7x7_3 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1)
|
||||
|
||||
def forward(self, input):
|
||||
conv3X3 = self.conv3X3(input)
|
||||
|
||||
conv5X5_1 = self.conv5X5_1(input)
|
||||
conv5X5 = self.conv5X5_2(conv5X5_1)
|
||||
|
||||
conv7X7_2 = self.conv7X7_2(conv5X5_1)
|
||||
conv7X7 = self.conv7x7_3(conv7X7_2)
|
||||
|
||||
out = torch.cat([conv3X3, conv5X5, conv7X7], dim=1)
|
||||
out = F.relu(out)
|
||||
return out
|
||||
|
||||
|
||||
class FPN(nn.Module):
|
||||
|
||||
def __init__(self, in_channels_list, out_channels):
|
||||
super(FPN, self).__init__()
|
||||
leaky = 0
|
||||
if (out_channels <= 64):
|
||||
leaky = 0.1
|
||||
self.output1 = conv_bn1X1(in_channels_list[0], out_channels, stride=1, leaky=leaky)
|
||||
self.output2 = conv_bn1X1(in_channels_list[1], out_channels, stride=1, leaky=leaky)
|
||||
self.output3 = conv_bn1X1(in_channels_list[2], out_channels, stride=1, leaky=leaky)
|
||||
|
||||
self.merge1 = conv_bn(out_channels, out_channels, leaky=leaky)
|
||||
self.merge2 = conv_bn(out_channels, out_channels, leaky=leaky)
|
||||
|
||||
def forward(self, input):
|
||||
# names = list(input.keys())
|
||||
# input = list(input.values())
|
||||
|
||||
output1 = self.output1(input[0])
|
||||
output2 = self.output2(input[1])
|
||||
output3 = self.output3(input[2])
|
||||
|
||||
up3 = F.interpolate(output3, size=[output2.size(2), output2.size(3)], mode='nearest')
|
||||
output2 = output2 + up3
|
||||
output2 = self.merge2(output2)
|
||||
|
||||
up2 = F.interpolate(output2, size=[output1.size(2), output1.size(3)], mode='nearest')
|
||||
output1 = output1 + up2
|
||||
output1 = self.merge1(output1)
|
||||
|
||||
out = [output1, output2, output3]
|
||||
return out
|
||||
|
||||
|
||||
class MobileNetV1(nn.Module):
|
||||
|
||||
def __init__(self):
|
||||
super(MobileNetV1, self).__init__()
|
||||
self.stage1 = nn.Sequential(
|
||||
conv_bn(3, 8, 2, leaky=0.1), # 3
|
||||
conv_dw(8, 16, 1), # 7
|
||||
conv_dw(16, 32, 2), # 11
|
||||
conv_dw(32, 32, 1), # 19
|
||||
conv_dw(32, 64, 2), # 27
|
||||
conv_dw(64, 64, 1), # 43
|
||||
)
|
||||
self.stage2 = nn.Sequential(
|
||||
conv_dw(64, 128, 2), # 43 + 16 = 59
|
||||
conv_dw(128, 128, 1), # 59 + 32 = 91
|
||||
conv_dw(128, 128, 1), # 91 + 32 = 123
|
||||
conv_dw(128, 128, 1), # 123 + 32 = 155
|
||||
conv_dw(128, 128, 1), # 155 + 32 = 187
|
||||
conv_dw(128, 128, 1), # 187 + 32 = 219
|
||||
)
|
||||
self.stage3 = nn.Sequential(
|
||||
conv_dw(128, 256, 2), # 219 +3 2 = 241
|
||||
conv_dw(256, 256, 1), # 241 + 64 = 301
|
||||
)
|
||||
self.avg = nn.AdaptiveAvgPool2d((1, 1))
|
||||
self.fc = nn.Linear(256, 1000)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.stage1(x)
|
||||
x = self.stage2(x)
|
||||
x = self.stage3(x)
|
||||
x = self.avg(x)
|
||||
# x = self.model(x)
|
||||
x = x.view(-1, 256)
|
||||
x = self.fc(x)
|
||||
return x
|
||||
|
||||
|
||||
class ClassHead(nn.Module):
|
||||
|
||||
def __init__(self, inchannels=512, num_anchors=3):
|
||||
super(ClassHead, self).__init__()
|
||||
self.num_anchors = num_anchors
|
||||
self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0)
|
||||
|
||||
def forward(self, x):
|
||||
out = self.conv1x1(x)
|
||||
out = out.permute(0, 2, 3, 1).contiguous()
|
||||
|
||||
return out.view(out.shape[0], -1, 2)
|
||||
|
||||
|
||||
class BboxHead(nn.Module):
|
||||
|
||||
def __init__(self, inchannels=512, num_anchors=3):
|
||||
super(BboxHead, self).__init__()
|
||||
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=(1, 1), stride=1, padding=0)
|
||||
|
||||
def forward(self, x):
|
||||
out = self.conv1x1(x)
|
||||
out = out.permute(0, 2, 3, 1).contiguous()
|
||||
|
||||
return out.view(out.shape[0], -1, 4)
|
||||
|
||||
|
||||
class LandmarkHead(nn.Module):
|
||||
|
||||
def __init__(self, inchannels=512, num_anchors=3):
|
||||
super(LandmarkHead, self).__init__()
|
||||
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size=(1, 1), stride=1, padding=0)
|
||||
|
||||
def forward(self, x):
|
||||
out = self.conv1x1(x)
|
||||
out = out.permute(0, 2, 3, 1).contiguous()
|
||||
|
||||
return out.view(out.shape[0], -1, 10)
|
||||
|
||||
|
||||
def make_class_head(fpn_num=3, inchannels=64, anchor_num=2):
|
||||
classhead = nn.ModuleList()
|
||||
for i in range(fpn_num):
|
||||
classhead.append(ClassHead(inchannels, anchor_num))
|
||||
return classhead
|
||||
|
||||
|
||||
def make_bbox_head(fpn_num=3, inchannels=64, anchor_num=2):
|
||||
bboxhead = nn.ModuleList()
|
||||
for i in range(fpn_num):
|
||||
bboxhead.append(BboxHead(inchannels, anchor_num))
|
||||
return bboxhead
|
||||
|
||||
|
||||
def make_landmark_head(fpn_num=3, inchannels=64, anchor_num=2):
|
||||
landmarkhead = nn.ModuleList()
|
||||
for i in range(fpn_num):
|
||||
landmarkhead.append(LandmarkHead(inchannels, anchor_num))
|
||||
return landmarkhead
|
||||
421
facelib/detection/retinaface/retinaface_utils.py
Normal file
421
facelib/detection/retinaface/retinaface_utils.py
Normal file
@@ -0,0 +1,421 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchvision
|
||||
from itertools import product as product
|
||||
from math import ceil
|
||||
|
||||
|
||||
class PriorBox(object):
|
||||
|
||||
def __init__(self, cfg, image_size=None, phase='train'):
|
||||
super(PriorBox, self).__init__()
|
||||
self.min_sizes = cfg['min_sizes']
|
||||
self.steps = cfg['steps']
|
||||
self.clip = cfg['clip']
|
||||
self.image_size = image_size
|
||||
self.feature_maps = [[ceil(self.image_size[0] / step), ceil(self.image_size[1] / step)] for step in self.steps]
|
||||
self.name = 's'
|
||||
|
||||
def forward(self):
|
||||
anchors = []
|
||||
for k, f in enumerate(self.feature_maps):
|
||||
min_sizes = self.min_sizes[k]
|
||||
for i, j in product(range(f[0]), range(f[1])):
|
||||
for min_size in min_sizes:
|
||||
s_kx = min_size / self.image_size[1]
|
||||
s_ky = min_size / self.image_size[0]
|
||||
dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]]
|
||||
dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]]
|
||||
for cy, cx in product(dense_cy, dense_cx):
|
||||
anchors += [cx, cy, s_kx, s_ky]
|
||||
|
||||
# back to torch land
|
||||
output = torch.Tensor(anchors).view(-1, 4)
|
||||
if self.clip:
|
||||
output.clamp_(max=1, min=0)
|
||||
return output
|
||||
|
||||
|
||||
def py_cpu_nms(dets, thresh):
|
||||
"""Pure Python NMS baseline."""
|
||||
keep = torchvision.ops.nms(
|
||||
boxes=torch.Tensor(dets[:, :4]),
|
||||
scores=torch.Tensor(dets[:, 4]),
|
||||
iou_threshold=thresh,
|
||||
)
|
||||
|
||||
return list(keep)
|
||||
|
||||
|
||||
def point_form(boxes):
|
||||
""" Convert prior_boxes to (xmin, ymin, xmax, ymax)
|
||||
representation for comparison to point form ground truth data.
|
||||
Args:
|
||||
boxes: (tensor) center-size default boxes from priorbox layers.
|
||||
Return:
|
||||
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
|
||||
"""
|
||||
return torch.cat(
|
||||
(
|
||||
boxes[:, :2] - boxes[:, 2:] / 2, # xmin, ymin
|
||||
boxes[:, :2] + boxes[:, 2:] / 2),
|
||||
1) # xmax, ymax
|
||||
|
||||
|
||||
def center_size(boxes):
|
||||
""" Convert prior_boxes to (cx, cy, w, h)
|
||||
representation for comparison to center-size form ground truth data.
|
||||
Args:
|
||||
boxes: (tensor) point_form boxes
|
||||
Return:
|
||||
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
|
||||
"""
|
||||
return torch.cat(
|
||||
(boxes[:, 2:] + boxes[:, :2]) / 2, # cx, cy
|
||||
boxes[:, 2:] - boxes[:, :2],
|
||||
1) # w, h
|
||||
|
||||
|
||||
def intersect(box_a, box_b):
|
||||
""" We resize both tensors to [A,B,2] without new malloc:
|
||||
[A,2] -> [A,1,2] -> [A,B,2]
|
||||
[B,2] -> [1,B,2] -> [A,B,2]
|
||||
Then we compute the area of intersect between box_a and box_b.
|
||||
Args:
|
||||
box_a: (tensor) bounding boxes, Shape: [A,4].
|
||||
box_b: (tensor) bounding boxes, Shape: [B,4].
|
||||
Return:
|
||||
(tensor) intersection area, Shape: [A,B].
|
||||
"""
|
||||
A = box_a.size(0)
|
||||
B = box_b.size(0)
|
||||
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2), box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
|
||||
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2), box_b[:, :2].unsqueeze(0).expand(A, B, 2))
|
||||
inter = torch.clamp((max_xy - min_xy), min=0)
|
||||
return inter[:, :, 0] * inter[:, :, 1]
|
||||
|
||||
|
||||
def jaccard(box_a, box_b):
|
||||
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
|
||||
is simply the intersection over union of two boxes. Here we operate on
|
||||
ground truth boxes and default boxes.
|
||||
E.g.:
|
||||
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
|
||||
Args:
|
||||
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
|
||||
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
|
||||
Return:
|
||||
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
|
||||
"""
|
||||
inter = intersect(box_a, box_b)
|
||||
area_a = ((box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]
|
||||
area_b = ((box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]
|
||||
union = area_a + area_b - inter
|
||||
return inter / union # [A,B]
|
||||
|
||||
|
||||
def matrix_iou(a, b):
|
||||
"""
|
||||
return iou of a and b, numpy version for data augenmentation
|
||||
"""
|
||||
lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
|
||||
rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
|
||||
|
||||
area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
|
||||
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
|
||||
area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
|
||||
return area_i / (area_a[:, np.newaxis] + area_b - area_i)
|
||||
|
||||
|
||||
def matrix_iof(a, b):
|
||||
"""
|
||||
return iof of a and b, numpy version for data augenmentation
|
||||
"""
|
||||
lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
|
||||
rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
|
||||
|
||||
area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
|
||||
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
|
||||
return area_i / np.maximum(area_a[:, np.newaxis], 1)
|
||||
|
||||
|
||||
def match(threshold, truths, priors, variances, labels, landms, loc_t, conf_t, landm_t, idx):
|
||||
"""Match each prior box with the ground truth box of the highest jaccard
|
||||
overlap, encode the bounding boxes, then return the matched indices
|
||||
corresponding to both confidence and location preds.
|
||||
Args:
|
||||
threshold: (float) The overlap threshold used when matching boxes.
|
||||
truths: (tensor) Ground truth boxes, Shape: [num_obj, 4].
|
||||
priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].
|
||||
variances: (tensor) Variances corresponding to each prior coord,
|
||||
Shape: [num_priors, 4].
|
||||
labels: (tensor) All the class labels for the image, Shape: [num_obj].
|
||||
landms: (tensor) Ground truth landms, Shape [num_obj, 10].
|
||||
loc_t: (tensor) Tensor to be filled w/ encoded location targets.
|
||||
conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.
|
||||
landm_t: (tensor) Tensor to be filled w/ encoded landm targets.
|
||||
idx: (int) current batch index
|
||||
Return:
|
||||
The matched indices corresponding to 1)location 2)confidence
|
||||
3)landm preds.
|
||||
"""
|
||||
# jaccard index
|
||||
overlaps = jaccard(truths, point_form(priors))
|
||||
# (Bipartite Matching)
|
||||
# [1,num_objects] best prior for each ground truth
|
||||
best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)
|
||||
|
||||
# ignore hard gt
|
||||
valid_gt_idx = best_prior_overlap[:, 0] >= 0.2
|
||||
best_prior_idx_filter = best_prior_idx[valid_gt_idx, :]
|
||||
if best_prior_idx_filter.shape[0] <= 0:
|
||||
loc_t[idx] = 0
|
||||
conf_t[idx] = 0
|
||||
return
|
||||
|
||||
# [1,num_priors] best ground truth for each prior
|
||||
best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)
|
||||
best_truth_idx.squeeze_(0)
|
||||
best_truth_overlap.squeeze_(0)
|
||||
best_prior_idx.squeeze_(1)
|
||||
best_prior_idx_filter.squeeze_(1)
|
||||
best_prior_overlap.squeeze_(1)
|
||||
best_truth_overlap.index_fill_(0, best_prior_idx_filter, 2) # ensure best prior
|
||||
# TODO refactor: index best_prior_idx with long tensor
|
||||
# ensure every gt matches with its prior of max overlap
|
||||
for j in range(best_prior_idx.size(0)): # 判别此anchor是预测哪一个boxes
|
||||
best_truth_idx[best_prior_idx[j]] = j
|
||||
matches = truths[best_truth_idx] # Shape: [num_priors,4] 此处为每一个anchor对应的bbox取出来
|
||||
conf = labels[best_truth_idx] # Shape: [num_priors] 此处为每一个anchor对应的label取出来
|
||||
conf[best_truth_overlap < threshold] = 0 # label as background overlap<0.35的全部作为负样本
|
||||
loc = encode(matches, priors, variances)
|
||||
|
||||
matches_landm = landms[best_truth_idx]
|
||||
landm = encode_landm(matches_landm, priors, variances)
|
||||
loc_t[idx] = loc # [num_priors,4] encoded offsets to learn
|
||||
conf_t[idx] = conf # [num_priors] top class label for each prior
|
||||
landm_t[idx] = landm
|
||||
|
||||
|
||||
def encode(matched, priors, variances):
|
||||
"""Encode the variances from the priorbox layers into the ground truth boxes
|
||||
we have matched (based on jaccard overlap) with the prior boxes.
|
||||
Args:
|
||||
matched: (tensor) Coords of ground truth for each prior in point-form
|
||||
Shape: [num_priors, 4].
|
||||
priors: (tensor) Prior boxes in center-offset form
|
||||
Shape: [num_priors,4].
|
||||
variances: (list[float]) Variances of priorboxes
|
||||
Return:
|
||||
encoded boxes (tensor), Shape: [num_priors, 4]
|
||||
"""
|
||||
|
||||
# dist b/t match center and prior's center
|
||||
g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 - priors[:, :2]
|
||||
# encode variance
|
||||
g_cxcy /= (variances[0] * priors[:, 2:])
|
||||
# match wh / prior wh
|
||||
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
|
||||
g_wh = torch.log(g_wh) / variances[1]
|
||||
# return target for smooth_l1_loss
|
||||
return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
|
||||
|
||||
|
||||
def encode_landm(matched, priors, variances):
|
||||
"""Encode the variances from the priorbox layers into the ground truth boxes
|
||||
we have matched (based on jaccard overlap) with the prior boxes.
|
||||
Args:
|
||||
matched: (tensor) Coords of ground truth for each prior in point-form
|
||||
Shape: [num_priors, 10].
|
||||
priors: (tensor) Prior boxes in center-offset form
|
||||
Shape: [num_priors,4].
|
||||
variances: (list[float]) Variances of priorboxes
|
||||
Return:
|
||||
encoded landm (tensor), Shape: [num_priors, 10]
|
||||
"""
|
||||
|
||||
# dist b/t match center and prior's center
|
||||
matched = torch.reshape(matched, (matched.size(0), 5, 2))
|
||||
priors_cx = priors[:, 0].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
|
||||
priors_cy = priors[:, 1].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
|
||||
priors_w = priors[:, 2].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
|
||||
priors_h = priors[:, 3].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
|
||||
priors = torch.cat([priors_cx, priors_cy, priors_w, priors_h], dim=2)
|
||||
g_cxcy = matched[:, :, :2] - priors[:, :, :2]
|
||||
# encode variance
|
||||
g_cxcy /= (variances[0] * priors[:, :, 2:])
|
||||
# g_cxcy /= priors[:, :, 2:]
|
||||
g_cxcy = g_cxcy.reshape(g_cxcy.size(0), -1)
|
||||
# return target for smooth_l1_loss
|
||||
return g_cxcy
|
||||
|
||||
|
||||
# Adapted from https://github.com/Hakuyume/chainer-ssd
|
||||
def decode(loc, priors, variances):
|
||||
"""Decode locations from predictions using priors to undo
|
||||
the encoding we did for offset regression at train time.
|
||||
Args:
|
||||
loc (tensor): location predictions for loc layers,
|
||||
Shape: [num_priors,4]
|
||||
priors (tensor): Prior boxes in center-offset form.
|
||||
Shape: [num_priors,4].
|
||||
variances: (list[float]) Variances of priorboxes
|
||||
Return:
|
||||
decoded bounding box predictions
|
||||
"""
|
||||
|
||||
boxes = torch.cat((priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
|
||||
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
|
||||
boxes[:, :2] -= boxes[:, 2:] / 2
|
||||
boxes[:, 2:] += boxes[:, :2]
|
||||
return boxes
|
||||
|
||||
|
||||
def decode_landm(pre, priors, variances):
|
||||
"""Decode landm from predictions using priors to undo
|
||||
the encoding we did for offset regression at train time.
|
||||
Args:
|
||||
pre (tensor): landm predictions for loc layers,
|
||||
Shape: [num_priors,10]
|
||||
priors (tensor): Prior boxes in center-offset form.
|
||||
Shape: [num_priors,4].
|
||||
variances: (list[float]) Variances of priorboxes
|
||||
Return:
|
||||
decoded landm predictions
|
||||
"""
|
||||
tmp = (
|
||||
priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
|
||||
priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
|
||||
priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
|
||||
priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
|
||||
priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:],
|
||||
)
|
||||
landms = torch.cat(tmp, dim=1)
|
||||
return landms
|
||||
|
||||
|
||||
def batched_decode(b_loc, priors, variances):
|
||||
"""Decode locations from predictions using priors to undo
|
||||
the encoding we did for offset regression at train time.
|
||||
Args:
|
||||
b_loc (tensor): location predictions for loc layers,
|
||||
Shape: [num_batches,num_priors,4]
|
||||
priors (tensor): Prior boxes in center-offset form.
|
||||
Shape: [1,num_priors,4].
|
||||
variances: (list[float]) Variances of priorboxes
|
||||
Return:
|
||||
decoded bounding box predictions
|
||||
"""
|
||||
boxes = (
|
||||
priors[:, :, :2] + b_loc[:, :, :2] * variances[0] * priors[:, :, 2:],
|
||||
priors[:, :, 2:] * torch.exp(b_loc[:, :, 2:] * variances[1]),
|
||||
)
|
||||
boxes = torch.cat(boxes, dim=2)
|
||||
|
||||
boxes[:, :, :2] -= boxes[:, :, 2:] / 2
|
||||
boxes[:, :, 2:] += boxes[:, :, :2]
|
||||
return boxes
|
||||
|
||||
|
||||
def batched_decode_landm(pre, priors, variances):
|
||||
"""Decode landm from predictions using priors to undo
|
||||
the encoding we did for offset regression at train time.
|
||||
Args:
|
||||
pre (tensor): landm predictions for loc layers,
|
||||
Shape: [num_batches,num_priors,10]
|
||||
priors (tensor): Prior boxes in center-offset form.
|
||||
Shape: [1,num_priors,4].
|
||||
variances: (list[float]) Variances of priorboxes
|
||||
Return:
|
||||
decoded landm predictions
|
||||
"""
|
||||
landms = (
|
||||
priors[:, :, :2] + pre[:, :, :2] * variances[0] * priors[:, :, 2:],
|
||||
priors[:, :, :2] + pre[:, :, 2:4] * variances[0] * priors[:, :, 2:],
|
||||
priors[:, :, :2] + pre[:, :, 4:6] * variances[0] * priors[:, :, 2:],
|
||||
priors[:, :, :2] + pre[:, :, 6:8] * variances[0] * priors[:, :, 2:],
|
||||
priors[:, :, :2] + pre[:, :, 8:10] * variances[0] * priors[:, :, 2:],
|
||||
)
|
||||
landms = torch.cat(landms, dim=2)
|
||||
return landms
|
||||
|
||||
|
||||
def log_sum_exp(x):
|
||||
"""Utility function for computing log_sum_exp while determining
|
||||
This will be used to determine unaveraged confidence loss across
|
||||
all examples in a batch.
|
||||
Args:
|
||||
x (Variable(tensor)): conf_preds from conf layers
|
||||
"""
|
||||
x_max = x.data.max()
|
||||
return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max
|
||||
|
||||
|
||||
# Original author: Francisco Massa:
|
||||
# https://github.com/fmassa/object-detection.torch
|
||||
# Ported to PyTorch by Max deGroot (02/01/2017)
|
||||
def nms(boxes, scores, overlap=0.5, top_k=200):
|
||||
"""Apply non-maximum suppression at test time to avoid detecting too many
|
||||
overlapping bounding boxes for a given object.
|
||||
Args:
|
||||
boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
|
||||
scores: (tensor) The class predscores for the img, Shape:[num_priors].
|
||||
overlap: (float) The overlap thresh for suppressing unnecessary boxes.
|
||||
top_k: (int) The Maximum number of box preds to consider.
|
||||
Return:
|
||||
The indices of the kept boxes with respect to num_priors.
|
||||
"""
|
||||
|
||||
keep = torch.Tensor(scores.size(0)).fill_(0).long()
|
||||
if boxes.numel() == 0:
|
||||
return keep
|
||||
x1 = boxes[:, 0]
|
||||
y1 = boxes[:, 1]
|
||||
x2 = boxes[:, 2]
|
||||
y2 = boxes[:, 3]
|
||||
area = torch.mul(x2 - x1, y2 - y1)
|
||||
v, idx = scores.sort(0) # sort in ascending order
|
||||
# I = I[v >= 0.01]
|
||||
idx = idx[-top_k:] # indices of the top-k largest vals
|
||||
xx1 = boxes.new()
|
||||
yy1 = boxes.new()
|
||||
xx2 = boxes.new()
|
||||
yy2 = boxes.new()
|
||||
w = boxes.new()
|
||||
h = boxes.new()
|
||||
|
||||
# keep = torch.Tensor()
|
||||
count = 0
|
||||
while idx.numel() > 0:
|
||||
i = idx[-1] # index of current largest val
|
||||
# keep.append(i)
|
||||
keep[count] = i
|
||||
count += 1
|
||||
if idx.size(0) == 1:
|
||||
break
|
||||
idx = idx[:-1] # remove kept element from view
|
||||
# load bboxes of next highest vals
|
||||
torch.index_select(x1, 0, idx, out=xx1)
|
||||
torch.index_select(y1, 0, idx, out=yy1)
|
||||
torch.index_select(x2, 0, idx, out=xx2)
|
||||
torch.index_select(y2, 0, idx, out=yy2)
|
||||
# store element-wise max with next highest score
|
||||
xx1 = torch.clamp(xx1, min=x1[i])
|
||||
yy1 = torch.clamp(yy1, min=y1[i])
|
||||
xx2 = torch.clamp(xx2, max=x2[i])
|
||||
yy2 = torch.clamp(yy2, max=y2[i])
|
||||
w.resize_as_(xx2)
|
||||
h.resize_as_(yy2)
|
||||
w = xx2 - xx1
|
||||
h = yy2 - yy1
|
||||
# check sizes of xx1 and xx2.. after each iteration
|
||||
w = torch.clamp(w, min=0.0)
|
||||
h = torch.clamp(h, min=0.0)
|
||||
inter = w * h
|
||||
# IoU = i / (area(a) + area(b) - i)
|
||||
rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
|
||||
union = (rem_areas - inter) + area[i]
|
||||
IoU = inter / union # store result in iou
|
||||
# keep only elements with an IoU <= overlap
|
||||
idx = idx[IoU.le(overlap)]
|
||||
return keep, count
|
||||
0
facelib/detection/yolov5face/__init__.py
Normal file
0
facelib/detection/yolov5face/__init__.py
Normal file
142
facelib/detection/yolov5face/face_detector.py
Normal file
142
facelib/detection/yolov5face/face_detector.py
Normal file
@@ -0,0 +1,142 @@
|
||||
import copy
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from facelib.detection.yolov5face.models.common import Conv
|
||||
from facelib.detection.yolov5face.models.yolo import Model
|
||||
from facelib.detection.yolov5face.utils.datasets import letterbox
|
||||
from facelib.detection.yolov5face.utils.general import (
|
||||
check_img_size,
|
||||
non_max_suppression_face,
|
||||
scale_coords,
|
||||
scale_coords_landmarks,
|
||||
)
|
||||
|
||||
IS_HIGH_VERSION = tuple(map(int, torch.__version__.split('+')[0].split('.'))) >= (1, 9, 0)
|
||||
|
||||
|
||||
def isListempty(inList):
|
||||
if isinstance(inList, list): # Is a list
|
||||
return all(map(isListempty, inList))
|
||||
return False # Not a list
|
||||
|
||||
class YoloDetector:
|
||||
def __init__(
|
||||
self,
|
||||
config_name,
|
||||
min_face=10,
|
||||
target_size=None,
|
||||
device='cuda',
|
||||
):
|
||||
"""
|
||||
config_name: name of .yaml config with network configuration from models/ folder.
|
||||
min_face : minimal face size in pixels.
|
||||
target_size : target size of smaller image axis (choose lower for faster work). e.g. 480, 720, 1080.
|
||||
None for original resolution.
|
||||
"""
|
||||
self._class_path = Path(__file__).parent.absolute()
|
||||
self.target_size = target_size
|
||||
self.min_face = min_face
|
||||
self.detector = Model(cfg=config_name)
|
||||
self.device = device
|
||||
|
||||
|
||||
def _preprocess(self, imgs):
|
||||
"""
|
||||
Preprocessing image before passing through the network. Resize and conversion to torch tensor.
|
||||
"""
|
||||
pp_imgs = []
|
||||
for img in imgs:
|
||||
h0, w0 = img.shape[:2] # orig hw
|
||||
if self.target_size:
|
||||
r = self.target_size / min(h0, w0) # resize image to img_size
|
||||
if r < 1:
|
||||
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=cv2.INTER_LINEAR)
|
||||
|
||||
imgsz = check_img_size(max(img.shape[:2]), s=self.detector.stride.max()) # check img_size
|
||||
img = letterbox(img, new_shape=imgsz)[0]
|
||||
pp_imgs.append(img)
|
||||
pp_imgs = np.array(pp_imgs)
|
||||
pp_imgs = pp_imgs.transpose(0, 3, 1, 2)
|
||||
pp_imgs = torch.from_numpy(pp_imgs).to(self.device)
|
||||
pp_imgs = pp_imgs.float() # uint8 to fp16/32
|
||||
return pp_imgs / 255.0 # 0 - 255 to 0.0 - 1.0
|
||||
|
||||
def _postprocess(self, imgs, origimgs, pred, conf_thres, iou_thres):
|
||||
"""
|
||||
Postprocessing of raw pytorch model output.
|
||||
Returns:
|
||||
bboxes: list of arrays with 4 coordinates of bounding boxes with format x1,y1,x2,y2.
|
||||
points: list of arrays with coordinates of 5 facial keypoints (eyes, nose, lips corners).
|
||||
"""
|
||||
bboxes = [[] for _ in range(len(origimgs))]
|
||||
landmarks = [[] for _ in range(len(origimgs))]
|
||||
|
||||
pred = non_max_suppression_face(pred, conf_thres, iou_thres)
|
||||
|
||||
for image_id, origimg in enumerate(origimgs):
|
||||
img_shape = origimg.shape
|
||||
image_height, image_width = img_shape[:2]
|
||||
gn = torch.tensor(img_shape)[[1, 0, 1, 0]] # normalization gain whwh
|
||||
gn_lks = torch.tensor(img_shape)[[1, 0, 1, 0, 1, 0, 1, 0, 1, 0]] # normalization gain landmarks
|
||||
det = pred[image_id].cpu()
|
||||
scale_coords(imgs[image_id].shape[1:], det[:, :4], img_shape).round()
|
||||
scale_coords_landmarks(imgs[image_id].shape[1:], det[:, 5:15], img_shape).round()
|
||||
|
||||
for j in range(det.size()[0]):
|
||||
box = (det[j, :4].view(1, 4) / gn).view(-1).tolist()
|
||||
box = list(
|
||||
map(int, [box[0] * image_width, box[1] * image_height, box[2] * image_width, box[3] * image_height])
|
||||
)
|
||||
if box[3] - box[1] < self.min_face:
|
||||
continue
|
||||
lm = (det[j, 5:15].view(1, 10) / gn_lks).view(-1).tolist()
|
||||
lm = list(map(int, [i * image_width if j % 2 == 0 else i * image_height for j, i in enumerate(lm)]))
|
||||
lm = [lm[i : i + 2] for i in range(0, len(lm), 2)]
|
||||
bboxes[image_id].append(box)
|
||||
landmarks[image_id].append(lm)
|
||||
return bboxes, landmarks
|
||||
|
||||
def detect_faces(self, imgs, conf_thres=0.7, iou_thres=0.5):
|
||||
"""
|
||||
Get bbox coordinates and keypoints of faces on original image.
|
||||
Params:
|
||||
imgs: image or list of images to detect faces on with BGR order (convert to RGB order for inference)
|
||||
conf_thres: confidence threshold for each prediction
|
||||
iou_thres: threshold for NMS (filter of intersecting bboxes)
|
||||
Returns:
|
||||
bboxes: list of arrays with 4 coordinates of bounding boxes with format x1,y1,x2,y2.
|
||||
points: list of arrays with coordinates of 5 facial keypoints (eyes, nose, lips corners).
|
||||
"""
|
||||
# Pass input images through face detector
|
||||
images = imgs if isinstance(imgs, list) else [imgs]
|
||||
images = [cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for img in images]
|
||||
origimgs = copy.deepcopy(images)
|
||||
|
||||
images = self._preprocess(images)
|
||||
|
||||
if IS_HIGH_VERSION:
|
||||
with torch.inference_mode(): # for pytorch>=1.9
|
||||
pred = self.detector(images)[0]
|
||||
else:
|
||||
with torch.no_grad(): # for pytorch<1.9
|
||||
pred = self.detector(images)[0]
|
||||
|
||||
bboxes, points = self._postprocess(images, origimgs, pred, conf_thres, iou_thres)
|
||||
|
||||
# return bboxes, points
|
||||
if not isListempty(points):
|
||||
bboxes = np.array(bboxes).reshape(-1,4)
|
||||
points = np.array(points).reshape(-1,10)
|
||||
padding = bboxes[:,0].reshape(-1,1)
|
||||
return np.concatenate((bboxes, padding, points), axis=1)
|
||||
else:
|
||||
return None
|
||||
|
||||
def __call__(self, *args):
|
||||
return self.predict(*args)
|
||||
0
facelib/detection/yolov5face/models/__init__.py
Normal file
0
facelib/detection/yolov5face/models/__init__.py
Normal file
299
facelib/detection/yolov5face/models/common.py
Normal file
299
facelib/detection/yolov5face/models/common.py
Normal file
@@ -0,0 +1,299 @@
|
||||
# This file contains modules common to various models
|
||||
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from facelib.detection.yolov5face.utils.datasets import letterbox
|
||||
from facelib.detection.yolov5face.utils.general import (
|
||||
make_divisible,
|
||||
non_max_suppression,
|
||||
scale_coords,
|
||||
xyxy2xywh,
|
||||
)
|
||||
|
||||
|
||||
def autopad(k, p=None): # kernel, padding
|
||||
# Pad to 'same'
|
||||
if p is None:
|
||||
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
|
||||
return p
|
||||
|
||||
|
||||
def channel_shuffle(x, groups):
|
||||
batchsize, num_channels, height, width = x.data.size()
|
||||
channels_per_group = torch.div(num_channels, groups, rounding_mode="trunc")
|
||||
|
||||
# reshape
|
||||
x = x.view(batchsize, groups, channels_per_group, height, width)
|
||||
x = torch.transpose(x, 1, 2).contiguous()
|
||||
|
||||
# flatten
|
||||
return x.view(batchsize, -1, height, width)
|
||||
|
||||
|
||||
def DWConv(c1, c2, k=1, s=1, act=True):
|
||||
# Depthwise convolution
|
||||
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
|
||||
|
||||
|
||||
class Conv(nn.Module):
|
||||
# Standard convolution
|
||||
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
|
||||
super().__init__()
|
||||
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
|
||||
self.bn = nn.BatchNorm2d(c2)
|
||||
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
|
||||
|
||||
def forward(self, x):
|
||||
return self.act(self.bn(self.conv(x)))
|
||||
|
||||
def fuseforward(self, x):
|
||||
return self.act(self.conv(x))
|
||||
|
||||
|
||||
class StemBlock(nn.Module):
|
||||
def __init__(self, c1, c2, k=3, s=2, p=None, g=1, act=True):
|
||||
super().__init__()
|
||||
self.stem_1 = Conv(c1, c2, k, s, p, g, act)
|
||||
self.stem_2a = Conv(c2, c2 // 2, 1, 1, 0)
|
||||
self.stem_2b = Conv(c2 // 2, c2, 3, 2, 1)
|
||||
self.stem_2p = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
|
||||
self.stem_3 = Conv(c2 * 2, c2, 1, 1, 0)
|
||||
|
||||
def forward(self, x):
|
||||
stem_1_out = self.stem_1(x)
|
||||
stem_2a_out = self.stem_2a(stem_1_out)
|
||||
stem_2b_out = self.stem_2b(stem_2a_out)
|
||||
stem_2p_out = self.stem_2p(stem_1_out)
|
||||
return self.stem_3(torch.cat((stem_2b_out, stem_2p_out), 1))
|
||||
|
||||
|
||||
class Bottleneck(nn.Module):
|
||||
# Standard bottleneck
|
||||
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = Conv(c1, c_, 1, 1)
|
||||
self.cv2 = Conv(c_, c2, 3, 1, g=g)
|
||||
self.add = shortcut and c1 == c2
|
||||
|
||||
def forward(self, x):
|
||||
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
||||
|
||||
|
||||
class BottleneckCSP(nn.Module):
|
||||
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = Conv(c1, c_, 1, 1)
|
||||
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
|
||||
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
|
||||
self.cv4 = Conv(2 * c_, c2, 1, 1)
|
||||
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
|
||||
self.act = nn.LeakyReLU(0.1, inplace=True)
|
||||
self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
|
||||
|
||||
def forward(self, x):
|
||||
y1 = self.cv3(self.m(self.cv1(x)))
|
||||
y2 = self.cv2(x)
|
||||
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
|
||||
|
||||
|
||||
class C3(nn.Module):
|
||||
# CSP Bottleneck with 3 convolutions
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = Conv(c1, c_, 1, 1)
|
||||
self.cv2 = Conv(c1, c_, 1, 1)
|
||||
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
|
||||
self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
|
||||
|
||||
def forward(self, x):
|
||||
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
|
||||
|
||||
|
||||
class ShuffleV2Block(nn.Module):
|
||||
def __init__(self, inp, oup, stride):
|
||||
super().__init__()
|
||||
|
||||
if not 1 <= stride <= 3:
|
||||
raise ValueError("illegal stride value")
|
||||
self.stride = stride
|
||||
|
||||
branch_features = oup // 2
|
||||
|
||||
if self.stride > 1:
|
||||
self.branch1 = nn.Sequential(
|
||||
self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1),
|
||||
nn.BatchNorm2d(inp),
|
||||
nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
|
||||
nn.BatchNorm2d(branch_features),
|
||||
nn.SiLU(),
|
||||
)
|
||||
else:
|
||||
self.branch1 = nn.Sequential()
|
||||
|
||||
self.branch2 = nn.Sequential(
|
||||
nn.Conv2d(
|
||||
inp if (self.stride > 1) else branch_features,
|
||||
branch_features,
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0,
|
||||
bias=False,
|
||||
),
|
||||
nn.BatchNorm2d(branch_features),
|
||||
nn.SiLU(),
|
||||
self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1),
|
||||
nn.BatchNorm2d(branch_features),
|
||||
nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
|
||||
nn.BatchNorm2d(branch_features),
|
||||
nn.SiLU(),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
|
||||
return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
|
||||
|
||||
def forward(self, x):
|
||||
if self.stride == 1:
|
||||
x1, x2 = x.chunk(2, dim=1)
|
||||
out = torch.cat((x1, self.branch2(x2)), dim=1)
|
||||
else:
|
||||
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
|
||||
out = channel_shuffle(out, 2)
|
||||
return out
|
||||
|
||||
|
||||
class SPP(nn.Module):
|
||||
# Spatial pyramid pooling layer used in YOLOv3-SPP
|
||||
def __init__(self, c1, c2, k=(5, 9, 13)):
|
||||
super().__init__()
|
||||
c_ = c1 // 2 # hidden channels
|
||||
self.cv1 = Conv(c1, c_, 1, 1)
|
||||
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
|
||||
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
|
||||
|
||||
def forward(self, x):
|
||||
x = self.cv1(x)
|
||||
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
|
||||
|
||||
|
||||
class Focus(nn.Module):
|
||||
# Focus wh information into c-space
|
||||
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
|
||||
super().__init__()
|
||||
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
|
||||
|
||||
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
|
||||
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
|
||||
|
||||
|
||||
class Concat(nn.Module):
|
||||
# Concatenate a list of tensors along dimension
|
||||
def __init__(self, dimension=1):
|
||||
super().__init__()
|
||||
self.d = dimension
|
||||
|
||||
def forward(self, x):
|
||||
return torch.cat(x, self.d)
|
||||
|
||||
|
||||
class NMS(nn.Module):
|
||||
# Non-Maximum Suppression (NMS) module
|
||||
conf = 0.25 # confidence threshold
|
||||
iou = 0.45 # IoU threshold
|
||||
classes = None # (optional list) filter by class
|
||||
|
||||
def forward(self, x):
|
||||
return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes)
|
||||
|
||||
|
||||
class AutoShape(nn.Module):
|
||||
# input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
|
||||
img_size = 640 # inference size (pixels)
|
||||
conf = 0.25 # NMS confidence threshold
|
||||
iou = 0.45 # NMS IoU threshold
|
||||
classes = None # (optional list) filter by class
|
||||
|
||||
def __init__(self, model):
|
||||
super().__init__()
|
||||
self.model = model.eval()
|
||||
|
||||
def autoshape(self):
|
||||
print("autoShape already enabled, skipping... ") # model already converted to model.autoshape()
|
||||
return self
|
||||
|
||||
def forward(self, imgs, size=640, augment=False, profile=False):
|
||||
# Inference from various sources. For height=720, width=1280, RGB images example inputs are:
|
||||
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(720,1280,3)
|
||||
# PIL: = Image.open('image.jpg') # HWC x(720,1280,3)
|
||||
# numpy: = np.zeros((720,1280,3)) # HWC
|
||||
# torch: = torch.zeros(16,3,720,1280) # BCHW
|
||||
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
|
||||
|
||||
p = next(self.model.parameters()) # for device and type
|
||||
if isinstance(imgs, torch.Tensor): # torch
|
||||
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
|
||||
|
||||
# Pre-process
|
||||
n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
|
||||
shape0, shape1 = [], [] # image and inference shapes
|
||||
for i, im in enumerate(imgs):
|
||||
im = np.array(im) # to numpy
|
||||
if im.shape[0] < 5: # image in CHW
|
||||
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
|
||||
im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input
|
||||
s = im.shape[:2] # HWC
|
||||
shape0.append(s) # image shape
|
||||
g = size / max(s) # gain
|
||||
shape1.append([y * g for y in s])
|
||||
imgs[i] = im # update
|
||||
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
|
||||
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
|
||||
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
|
||||
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
|
||||
x = torch.from_numpy(x).to(p.device).type_as(p) / 255.0 # uint8 to fp16/32
|
||||
|
||||
# Inference
|
||||
with torch.no_grad():
|
||||
y = self.model(x, augment, profile)[0] # forward
|
||||
y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS
|
||||
|
||||
# Post-process
|
||||
for i in range(n):
|
||||
scale_coords(shape1, y[i][:, :4], shape0[i])
|
||||
|
||||
return Detections(imgs, y, self.names)
|
||||
|
||||
|
||||
class Detections:
|
||||
# detections class for YOLOv5 inference results
|
||||
def __init__(self, imgs, pred, names=None):
|
||||
super().__init__()
|
||||
d = pred[0].device # device
|
||||
gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1.0, 1.0], device=d) for im in imgs] # normalizations
|
||||
self.imgs = imgs # list of images as numpy arrays
|
||||
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
|
||||
self.names = names # class names
|
||||
self.xyxy = pred # xyxy pixels
|
||||
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
|
||||
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
|
||||
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
|
||||
self.n = len(self.pred)
|
||||
|
||||
def __len__(self):
|
||||
return self.n
|
||||
|
||||
def tolist(self):
|
||||
# return a list of Detections objects, i.e. 'for result in results.tolist():'
|
||||
x = [Detections([self.imgs[i]], [self.pred[i]], self.names) for i in range(self.n)]
|
||||
for d in x:
|
||||
for k in ["imgs", "pred", "xyxy", "xyxyn", "xywh", "xywhn"]:
|
||||
setattr(d, k, getattr(d, k)[0]) # pop out of list
|
||||
return x
|
||||
45
facelib/detection/yolov5face/models/experimental.py
Normal file
45
facelib/detection/yolov5face/models/experimental.py
Normal file
@@ -0,0 +1,45 @@
|
||||
# # This file contains experimental modules
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from facelib.detection.yolov5face.models.common import Conv
|
||||
|
||||
|
||||
class CrossConv(nn.Module):
|
||||
# Cross Convolution Downsample
|
||||
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
|
||||
# ch_in, ch_out, kernel, stride, groups, expansion, shortcut
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = Conv(c1, c_, (1, k), (1, s))
|
||||
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
|
||||
self.add = shortcut and c1 == c2
|
||||
|
||||
def forward(self, x):
|
||||
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
||||
|
||||
|
||||
class MixConv2d(nn.Module):
|
||||
# Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
|
||||
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
|
||||
super().__init__()
|
||||
groups = len(k)
|
||||
if equal_ch: # equal c_ per group
|
||||
i = torch.linspace(0, groups - 1e-6, c2).floor() # c2 indices
|
||||
c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
|
||||
else: # equal weight.numel() per group
|
||||
b = [c2] + [0] * groups
|
||||
a = np.eye(groups + 1, groups, k=-1)
|
||||
a -= np.roll(a, 1, axis=1)
|
||||
a *= np.array(k) ** 2
|
||||
a[0] = 1
|
||||
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
|
||||
|
||||
self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
|
||||
self.bn = nn.BatchNorm2d(c2)
|
||||
self.act = nn.LeakyReLU(0.1, inplace=True)
|
||||
|
||||
def forward(self, x):
|
||||
return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
|
||||
235
facelib/detection/yolov5face/models/yolo.py
Normal file
235
facelib/detection/yolov5face/models/yolo.py
Normal file
@@ -0,0 +1,235 @@
|
||||
import math
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
import yaml # for torch hub
|
||||
from torch import nn
|
||||
|
||||
from facelib.detection.yolov5face.models.common import (
|
||||
C3,
|
||||
NMS,
|
||||
SPP,
|
||||
AutoShape,
|
||||
Bottleneck,
|
||||
BottleneckCSP,
|
||||
Concat,
|
||||
Conv,
|
||||
DWConv,
|
||||
Focus,
|
||||
ShuffleV2Block,
|
||||
StemBlock,
|
||||
)
|
||||
from facelib.detection.yolov5face.models.experimental import CrossConv, MixConv2d
|
||||
from facelib.detection.yolov5face.utils.autoanchor import check_anchor_order
|
||||
from facelib.detection.yolov5face.utils.general import make_divisible
|
||||
from facelib.detection.yolov5face.utils.torch_utils import copy_attr, fuse_conv_and_bn
|
||||
|
||||
|
||||
class Detect(nn.Module):
|
||||
stride = None # strides computed during build
|
||||
export = False # onnx export
|
||||
|
||||
def __init__(self, nc=80, anchors=(), ch=()): # detection layer
|
||||
super().__init__()
|
||||
self.nc = nc # number of classes
|
||||
self.no = nc + 5 + 10 # number of outputs per anchor
|
||||
|
||||
self.nl = len(anchors) # number of detection layers
|
||||
self.na = len(anchors[0]) // 2 # number of anchors
|
||||
self.grid = [torch.zeros(1)] * self.nl # init grid
|
||||
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
|
||||
self.register_buffer("anchors", a) # shape(nl,na,2)
|
||||
self.register_buffer("anchor_grid", a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
|
||||
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
|
||||
|
||||
def forward(self, x):
|
||||
z = [] # inference output
|
||||
if self.export:
|
||||
for i in range(self.nl):
|
||||
x[i] = self.m[i](x[i])
|
||||
return x
|
||||
for i in range(self.nl):
|
||||
x[i] = self.m[i](x[i]) # conv
|
||||
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
|
||||
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
|
||||
|
||||
if not self.training: # inference
|
||||
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
|
||||
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
|
||||
|
||||
y = torch.full_like(x[i], 0)
|
||||
y[..., [0, 1, 2, 3, 4, 15]] = x[i][..., [0, 1, 2, 3, 4, 15]].sigmoid()
|
||||
y[..., 5:15] = x[i][..., 5:15]
|
||||
|
||||
y[..., 0:2] = (y[..., 0:2] * 2.0 - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
|
||||
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
|
||||
|
||||
y[..., 5:7] = (
|
||||
y[..., 5:7] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]
|
||||
) # landmark x1 y1
|
||||
y[..., 7:9] = (
|
||||
y[..., 7:9] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]
|
||||
) # landmark x2 y2
|
||||
y[..., 9:11] = (
|
||||
y[..., 9:11] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]
|
||||
) # landmark x3 y3
|
||||
y[..., 11:13] = (
|
||||
y[..., 11:13] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]
|
||||
) # landmark x4 y4
|
||||
y[..., 13:15] = (
|
||||
y[..., 13:15] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]
|
||||
) # landmark x5 y5
|
||||
|
||||
z.append(y.view(bs, -1, self.no))
|
||||
|
||||
return x if self.training else (torch.cat(z, 1), x)
|
||||
|
||||
@staticmethod
|
||||
def _make_grid(nx=20, ny=20):
|
||||
# yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)], indexing="ij") # for pytorch>=1.10
|
||||
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
|
||||
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
|
||||
|
||||
|
||||
class Model(nn.Module):
|
||||
def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None): # model, input channels, number of classes
|
||||
super().__init__()
|
||||
self.yaml_file = Path(cfg).name
|
||||
with Path(cfg).open(encoding="utf8") as f:
|
||||
self.yaml = yaml.safe_load(f) # model dict
|
||||
|
||||
# Define model
|
||||
ch = self.yaml["ch"] = self.yaml.get("ch", ch) # input channels
|
||||
if nc and nc != self.yaml["nc"]:
|
||||
self.yaml["nc"] = nc # override yaml value
|
||||
|
||||
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
|
||||
self.names = [str(i) for i in range(self.yaml["nc"])] # default names
|
||||
|
||||
# Build strides, anchors
|
||||
m = self.model[-1] # Detect()
|
||||
if isinstance(m, Detect):
|
||||
s = 128 # 2x min stride
|
||||
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
|
||||
m.anchors /= m.stride.view(-1, 1, 1)
|
||||
check_anchor_order(m)
|
||||
self.stride = m.stride
|
||||
self._initialize_biases() # only run once
|
||||
|
||||
def forward(self, x):
|
||||
return self.forward_once(x) # single-scale inference, train
|
||||
|
||||
def forward_once(self, x):
|
||||
y = [] # outputs
|
||||
for m in self.model:
|
||||
if m.f != -1: # if not from previous layer
|
||||
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
|
||||
|
||||
x = m(x) # run
|
||||
y.append(x if m.i in self.save else None) # save output
|
||||
|
||||
return x
|
||||
|
||||
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
|
||||
# https://arxiv.org/abs/1708.02002 section 3.3
|
||||
m = self.model[-1] # Detect() module
|
||||
for mi, s in zip(m.m, m.stride): # from
|
||||
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
|
||||
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
|
||||
b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
|
||||
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
|
||||
|
||||
def _print_biases(self):
|
||||
m = self.model[-1] # Detect() module
|
||||
for mi in m.m: # from
|
||||
b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
|
||||
print(("%6g Conv2d.bias:" + "%10.3g" * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
|
||||
|
||||
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
|
||||
print("Fusing layers... ")
|
||||
for m in self.model.modules():
|
||||
if isinstance(m, Conv) and hasattr(m, "bn"):
|
||||
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
|
||||
delattr(m, "bn") # remove batchnorm
|
||||
m.forward = m.fuseforward # update forward
|
||||
elif type(m) is nn.Upsample:
|
||||
m.recompute_scale_factor = None # torch 1.11.0 compatibility
|
||||
return self
|
||||
|
||||
def nms(self, mode=True): # add or remove NMS module
|
||||
present = isinstance(self.model[-1], NMS) # last layer is NMS
|
||||
if mode and not present:
|
||||
print("Adding NMS... ")
|
||||
m = NMS() # module
|
||||
m.f = -1 # from
|
||||
m.i = self.model[-1].i + 1 # index
|
||||
self.model.add_module(name=str(m.i), module=m) # add
|
||||
self.eval()
|
||||
elif not mode and present:
|
||||
print("Removing NMS... ")
|
||||
self.model = self.model[:-1] # remove
|
||||
return self
|
||||
|
||||
def autoshape(self): # add autoShape module
|
||||
print("Adding autoShape... ")
|
||||
m = AutoShape(self) # wrap model
|
||||
copy_attr(m, self, include=("yaml", "nc", "hyp", "names", "stride"), exclude=()) # copy attributes
|
||||
return m
|
||||
|
||||
|
||||
def parse_model(d, ch): # model_dict, input_channels(3)
|
||||
anchors, nc, gd, gw = d["anchors"], d["nc"], d["depth_multiple"], d["width_multiple"]
|
||||
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
|
||||
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
|
||||
|
||||
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
|
||||
for i, (f, n, m, args) in enumerate(d["backbone"] + d["head"]): # from, number, module, args
|
||||
m = eval(m) if isinstance(m, str) else m # eval strings
|
||||
for j, a in enumerate(args):
|
||||
try:
|
||||
args[j] = eval(a) if isinstance(a, str) else a # eval strings
|
||||
except:
|
||||
pass
|
||||
|
||||
n = max(round(n * gd), 1) if n > 1 else n # depth gain
|
||||
if m in [
|
||||
Conv,
|
||||
Bottleneck,
|
||||
SPP,
|
||||
DWConv,
|
||||
MixConv2d,
|
||||
Focus,
|
||||
CrossConv,
|
||||
BottleneckCSP,
|
||||
C3,
|
||||
ShuffleV2Block,
|
||||
StemBlock,
|
||||
]:
|
||||
c1, c2 = ch[f], args[0]
|
||||
|
||||
c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
|
||||
|
||||
args = [c1, c2, *args[1:]]
|
||||
if m in [BottleneckCSP, C3]:
|
||||
args.insert(2, n)
|
||||
n = 1
|
||||
elif m is nn.BatchNorm2d:
|
||||
args = [ch[f]]
|
||||
elif m is Concat:
|
||||
c2 = sum(ch[-1 if x == -1 else x + 1] for x in f)
|
||||
elif m is Detect:
|
||||
args.append([ch[x + 1] for x in f])
|
||||
if isinstance(args[1], int): # number of anchors
|
||||
args[1] = [list(range(args[1] * 2))] * len(f)
|
||||
else:
|
||||
c2 = ch[f]
|
||||
|
||||
m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
|
||||
t = str(m)[8:-2].replace("__main__.", "") # module type
|
||||
np = sum(x.numel() for x in m_.parameters()) # number params
|
||||
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
|
||||
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
|
||||
layers.append(m_)
|
||||
ch.append(c2)
|
||||
return nn.Sequential(*layers), sorted(save)
|
||||
47
facelib/detection/yolov5face/models/yolov5l.yaml
Normal file
47
facelib/detection/yolov5face/models/yolov5l.yaml
Normal file
@@ -0,0 +1,47 @@
|
||||
# parameters
|
||||
nc: 1 # number of classes
|
||||
depth_multiple: 1.0 # model depth multiple
|
||||
width_multiple: 1.0 # layer channel multiple
|
||||
|
||||
# anchors
|
||||
anchors:
|
||||
- [4,5, 8,10, 13,16] # P3/8
|
||||
- [23,29, 43,55, 73,105] # P4/16
|
||||
- [146,217, 231,300, 335,433] # P5/32
|
||||
|
||||
# YOLOv5 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, StemBlock, [64, 3, 2]], # 0-P1/2
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 2-P3/8
|
||||
[-1, 9, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 4-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 6-P5/32
|
||||
[-1, 1, SPP, [1024, [3,5,7]]],
|
||||
[-1, 3, C3, [1024, False]], # 8
|
||||
]
|
||||
|
||||
# YOLOv5 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 5], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 12
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 3], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 16 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 13], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 19 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 9], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 22 (P5/32-large)
|
||||
|
||||
[[16, 19, 22], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
||||
45
facelib/detection/yolov5face/models/yolov5n.yaml
Normal file
45
facelib/detection/yolov5face/models/yolov5n.yaml
Normal file
@@ -0,0 +1,45 @@
|
||||
# parameters
|
||||
nc: 1 # number of classes
|
||||
depth_multiple: 1.0 # model depth multiple
|
||||
width_multiple: 1.0 # layer channel multiple
|
||||
|
||||
# anchors
|
||||
anchors:
|
||||
- [4,5, 8,10, 13,16] # P3/8
|
||||
- [23,29, 43,55, 73,105] # P4/16
|
||||
- [146,217, 231,300, 335,433] # P5/32
|
||||
|
||||
# YOLOv5 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, StemBlock, [32, 3, 2]], # 0-P2/4
|
||||
[-1, 1, ShuffleV2Block, [128, 2]], # 1-P3/8
|
||||
[-1, 3, ShuffleV2Block, [128, 1]], # 2
|
||||
[-1, 1, ShuffleV2Block, [256, 2]], # 3-P4/16
|
||||
[-1, 7, ShuffleV2Block, [256, 1]], # 4
|
||||
[-1, 1, ShuffleV2Block, [512, 2]], # 5-P5/32
|
||||
[-1, 3, ShuffleV2Block, [512, 1]], # 6
|
||||
]
|
||||
|
||||
# YOLOv5 head
|
||||
head:
|
||||
[[-1, 1, Conv, [128, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 1, C3, [128, False]], # 10
|
||||
|
||||
[-1, 1, Conv, [128, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 2], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 1, C3, [128, False]], # 14 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [128, 3, 2]],
|
||||
[[-1, 11], 1, Concat, [1]], # cat head P4
|
||||
[-1, 1, C3, [128, False]], # 17 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [128, 3, 2]],
|
||||
[[-1, 7], 1, Concat, [1]], # cat head P5
|
||||
[-1, 1, C3, [128, False]], # 20 (P5/32-large)
|
||||
|
||||
[[14, 17, 20], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
||||
0
facelib/detection/yolov5face/utils/__init__.py
Normal file
0
facelib/detection/yolov5face/utils/__init__.py
Normal file
12
facelib/detection/yolov5face/utils/autoanchor.py
Normal file
12
facelib/detection/yolov5face/utils/autoanchor.py
Normal file
@@ -0,0 +1,12 @@
|
||||
# Auto-anchor utils
|
||||
|
||||
|
||||
def check_anchor_order(m):
|
||||
# Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
|
||||
a = m.anchor_grid.prod(-1).view(-1) # anchor area
|
||||
da = a[-1] - a[0] # delta a
|
||||
ds = m.stride[-1] - m.stride[0] # delta s
|
||||
if da.sign() != ds.sign(): # same order
|
||||
print("Reversing anchor order")
|
||||
m.anchors[:] = m.anchors.flip(0)
|
||||
m.anchor_grid[:] = m.anchor_grid.flip(0)
|
||||
35
facelib/detection/yolov5face/utils/datasets.py
Normal file
35
facelib/detection/yolov5face/utils/datasets.py
Normal file
@@ -0,0 +1,35 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
|
||||
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scale_fill=False, scaleup=True):
|
||||
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
|
||||
shape = img.shape[:2] # current shape [height, width]
|
||||
if isinstance(new_shape, int):
|
||||
new_shape = (new_shape, new_shape)
|
||||
|
||||
# Scale ratio (new / old)
|
||||
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
|
||||
if not scaleup: # only scale down, do not scale up (for better test mAP)
|
||||
r = min(r, 1.0)
|
||||
|
||||
# Compute padding
|
||||
ratio = r, r # width, height ratios
|
||||
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
|
||||
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
|
||||
if auto: # minimum rectangle
|
||||
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
|
||||
elif scale_fill: # stretch
|
||||
dw, dh = 0.0, 0.0
|
||||
new_unpad = (new_shape[1], new_shape[0])
|
||||
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
|
||||
|
||||
dw /= 2 # divide padding into 2 sides
|
||||
dh /= 2
|
||||
|
||||
if shape[::-1] != new_unpad: # resize
|
||||
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
|
||||
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
|
||||
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
|
||||
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
|
||||
return img, ratio, (dw, dh)
|
||||
5
facelib/detection/yolov5face/utils/extract_ckpt.py
Normal file
5
facelib/detection/yolov5face/utils/extract_ckpt.py
Normal file
@@ -0,0 +1,5 @@
|
||||
import torch
|
||||
import sys
|
||||
sys.path.insert(0,'./facelib/detection/yolov5face')
|
||||
model = torch.load('facelib/detection/yolov5face/yolov5n-face.pt', map_location='cpu')['model']
|
||||
torch.save(model.state_dict(),'weights/facelib/yolov5n-face.pth')
|
||||
271
facelib/detection/yolov5face/utils/general.py
Normal file
271
facelib/detection/yolov5face/utils/general.py
Normal file
@@ -0,0 +1,271 @@
|
||||
import math
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchvision
|
||||
|
||||
|
||||
def check_img_size(img_size, s=32):
|
||||
# Verify img_size is a multiple of stride s
|
||||
new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
|
||||
# if new_size != img_size:
|
||||
# print(f"WARNING: --img-size {img_size:g} must be multiple of max stride {s:g}, updating to {new_size:g}")
|
||||
return new_size
|
||||
|
||||
|
||||
def make_divisible(x, divisor):
|
||||
# Returns x evenly divisible by divisor
|
||||
return math.ceil(x / divisor) * divisor
|
||||
|
||||
|
||||
def xyxy2xywh(x):
|
||||
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
|
||||
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
||||
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
|
||||
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
|
||||
y[:, 2] = x[:, 2] - x[:, 0] # width
|
||||
y[:, 3] = x[:, 3] - x[:, 1] # height
|
||||
return y
|
||||
|
||||
|
||||
def xywh2xyxy(x):
|
||||
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
|
||||
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
||||
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
|
||||
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
|
||||
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
|
||||
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
|
||||
return y
|
||||
|
||||
|
||||
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
|
||||
# Rescale coords (xyxy) from img1_shape to img0_shape
|
||||
if ratio_pad is None: # calculate from img0_shape
|
||||
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
|
||||
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
|
||||
else:
|
||||
gain = ratio_pad[0][0]
|
||||
pad = ratio_pad[1]
|
||||
|
||||
coords[:, [0, 2]] -= pad[0] # x padding
|
||||
coords[:, [1, 3]] -= pad[1] # y padding
|
||||
coords[:, :4] /= gain
|
||||
clip_coords(coords, img0_shape)
|
||||
return coords
|
||||
|
||||
|
||||
def clip_coords(boxes, img_shape):
|
||||
# Clip bounding xyxy bounding boxes to image shape (height, width)
|
||||
boxes[:, 0].clamp_(0, img_shape[1]) # x1
|
||||
boxes[:, 1].clamp_(0, img_shape[0]) # y1
|
||||
boxes[:, 2].clamp_(0, img_shape[1]) # x2
|
||||
boxes[:, 3].clamp_(0, img_shape[0]) # y2
|
||||
|
||||
|
||||
def box_iou(box1, box2):
|
||||
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
|
||||
"""
|
||||
Return intersection-over-union (Jaccard index) of boxes.
|
||||
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
|
||||
Arguments:
|
||||
box1 (Tensor[N, 4])
|
||||
box2 (Tensor[M, 4])
|
||||
Returns:
|
||||
iou (Tensor[N, M]): the NxM matrix containing the pairwise
|
||||
IoU values for every element in boxes1 and boxes2
|
||||
"""
|
||||
|
||||
def box_area(box):
|
||||
return (box[2] - box[0]) * (box[3] - box[1])
|
||||
|
||||
area1 = box_area(box1.T)
|
||||
area2 = box_area(box2.T)
|
||||
|
||||
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
|
||||
return inter / (area1[:, None] + area2 - inter)
|
||||
|
||||
|
||||
def non_max_suppression_face(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):
|
||||
"""Performs Non-Maximum Suppression (NMS) on inference results
|
||||
Returns:
|
||||
detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
|
||||
"""
|
||||
|
||||
nc = prediction.shape[2] - 15 # number of classes
|
||||
xc = prediction[..., 4] > conf_thres # candidates
|
||||
|
||||
# Settings
|
||||
# (pixels) maximum box width and height
|
||||
max_wh = 4096
|
||||
time_limit = 10.0 # seconds to quit after
|
||||
redundant = True # require redundant detections
|
||||
multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
|
||||
merge = False # use merge-NMS
|
||||
|
||||
t = time.time()
|
||||
output = [torch.zeros((0, 16), device=prediction.device)] * prediction.shape[0]
|
||||
for xi, x in enumerate(prediction): # image index, image inference
|
||||
# Apply constraints
|
||||
x = x[xc[xi]] # confidence
|
||||
|
||||
# Cat apriori labels if autolabelling
|
||||
if labels and len(labels[xi]):
|
||||
label = labels[xi]
|
||||
v = torch.zeros((len(label), nc + 15), device=x.device)
|
||||
v[:, :4] = label[:, 1:5] # box
|
||||
v[:, 4] = 1.0 # conf
|
||||
v[range(len(label)), label[:, 0].long() + 15] = 1.0 # cls
|
||||
x = torch.cat((x, v), 0)
|
||||
|
||||
# If none remain process next image
|
||||
if not x.shape[0]:
|
||||
continue
|
||||
|
||||
# Compute conf
|
||||
x[:, 15:] *= x[:, 4:5] # conf = obj_conf * cls_conf
|
||||
|
||||
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
|
||||
box = xywh2xyxy(x[:, :4])
|
||||
|
||||
# Detections matrix nx6 (xyxy, conf, landmarks, cls)
|
||||
if multi_label:
|
||||
i, j = (x[:, 15:] > conf_thres).nonzero(as_tuple=False).T
|
||||
x = torch.cat((box[i], x[i, j + 15, None], x[:, 5:15], j[:, None].float()), 1)
|
||||
else: # best class only
|
||||
conf, j = x[:, 15:].max(1, keepdim=True)
|
||||
x = torch.cat((box, conf, x[:, 5:15], j.float()), 1)[conf.view(-1) > conf_thres]
|
||||
|
||||
# Filter by class
|
||||
if classes is not None:
|
||||
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
|
||||
|
||||
# If none remain process next image
|
||||
n = x.shape[0] # number of boxes
|
||||
if not n:
|
||||
continue
|
||||
|
||||
# Batched NMS
|
||||
c = x[:, 15:16] * (0 if agnostic else max_wh) # classes
|
||||
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
|
||||
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
|
||||
|
||||
if merge and (1 < n < 3e3): # Merge NMS (boxes merged using weighted mean)
|
||||
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
|
||||
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
|
||||
weights = iou * scores[None] # box weights
|
||||
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
|
||||
if redundant:
|
||||
i = i[iou.sum(1) > 1] # require redundancy
|
||||
|
||||
output[xi] = x[i]
|
||||
if (time.time() - t) > time_limit:
|
||||
break # time limit exceeded
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):
|
||||
"""Performs Non-Maximum Suppression (NMS) on inference results
|
||||
|
||||
Returns:
|
||||
detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
|
||||
"""
|
||||
|
||||
nc = prediction.shape[2] - 5 # number of classes
|
||||
xc = prediction[..., 4] > conf_thres # candidates
|
||||
|
||||
# Settings
|
||||
# (pixels) maximum box width and height
|
||||
max_wh = 4096
|
||||
time_limit = 10.0 # seconds to quit after
|
||||
redundant = True # require redundant detections
|
||||
multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
|
||||
merge = False # use merge-NMS
|
||||
|
||||
t = time.time()
|
||||
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
|
||||
for xi, x in enumerate(prediction): # image index, image inference
|
||||
x = x[xc[xi]] # confidence
|
||||
|
||||
# Cat apriori labels if autolabelling
|
||||
if labels and len(labels[xi]):
|
||||
label_id = labels[xi]
|
||||
v = torch.zeros((len(label_id), nc + 5), device=x.device)
|
||||
v[:, :4] = label_id[:, 1:5] # box
|
||||
v[:, 4] = 1.0 # conf
|
||||
v[range(len(label_id)), label_id[:, 0].long() + 5] = 1.0 # cls
|
||||
x = torch.cat((x, v), 0)
|
||||
|
||||
# If none remain process next image
|
||||
if not x.shape[0]:
|
||||
continue
|
||||
|
||||
# Compute conf
|
||||
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
|
||||
|
||||
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
|
||||
box = xywh2xyxy(x[:, :4])
|
||||
|
||||
# Detections matrix nx6 (xyxy, conf, cls)
|
||||
if multi_label:
|
||||
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
|
||||
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
|
||||
else: # best class only
|
||||
conf, j = x[:, 5:].max(1, keepdim=True)
|
||||
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
|
||||
|
||||
# Filter by class
|
||||
if classes is not None:
|
||||
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
|
||||
|
||||
# Check shape
|
||||
n = x.shape[0] # number of boxes
|
||||
if not n: # no boxes
|
||||
continue
|
||||
|
||||
x = x[x[:, 4].argsort(descending=True)] # sort by confidence
|
||||
|
||||
# Batched NMS
|
||||
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
|
||||
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
|
||||
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
|
||||
if merge and (1 < n < 3e3): # Merge NMS (boxes merged using weighted mean)
|
||||
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
|
||||
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
|
||||
weights = iou * scores[None] # box weights
|
||||
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
|
||||
if redundant:
|
||||
i = i[iou.sum(1) > 1] # require redundancy
|
||||
|
||||
output[xi] = x[i]
|
||||
if (time.time() - t) > time_limit:
|
||||
print(f"WARNING: NMS time limit {time_limit}s exceeded")
|
||||
break # time limit exceeded
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None):
|
||||
# Rescale coords (xyxy) from img1_shape to img0_shape
|
||||
if ratio_pad is None: # calculate from img0_shape
|
||||
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
|
||||
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
|
||||
else:
|
||||
gain = ratio_pad[0][0]
|
||||
pad = ratio_pad[1]
|
||||
|
||||
coords[:, [0, 2, 4, 6, 8]] -= pad[0] # x padding
|
||||
coords[:, [1, 3, 5, 7, 9]] -= pad[1] # y padding
|
||||
coords[:, :10] /= gain
|
||||
coords[:, 0].clamp_(0, img0_shape[1]) # x1
|
||||
coords[:, 1].clamp_(0, img0_shape[0]) # y1
|
||||
coords[:, 2].clamp_(0, img0_shape[1]) # x2
|
||||
coords[:, 3].clamp_(0, img0_shape[0]) # y2
|
||||
coords[:, 4].clamp_(0, img0_shape[1]) # x3
|
||||
coords[:, 5].clamp_(0, img0_shape[0]) # y3
|
||||
coords[:, 6].clamp_(0, img0_shape[1]) # x4
|
||||
coords[:, 7].clamp_(0, img0_shape[0]) # y4
|
||||
coords[:, 8].clamp_(0, img0_shape[1]) # x5
|
||||
coords[:, 9].clamp_(0, img0_shape[0]) # y5
|
||||
return coords
|
||||
40
facelib/detection/yolov5face/utils/torch_utils.py
Normal file
40
facelib/detection/yolov5face/utils/torch_utils.py
Normal file
@@ -0,0 +1,40 @@
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
|
||||
def fuse_conv_and_bn(conv, bn):
|
||||
# Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
|
||||
fusedconv = (
|
||||
nn.Conv2d(
|
||||
conv.in_channels,
|
||||
conv.out_channels,
|
||||
kernel_size=conv.kernel_size,
|
||||
stride=conv.stride,
|
||||
padding=conv.padding,
|
||||
groups=conv.groups,
|
||||
bias=True,
|
||||
)
|
||||
.requires_grad_(False)
|
||||
.to(conv.weight.device)
|
||||
)
|
||||
|
||||
# prepare filters
|
||||
w_conv = conv.weight.clone().view(conv.out_channels, -1)
|
||||
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
|
||||
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
|
||||
|
||||
# prepare spatial bias
|
||||
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
|
||||
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
|
||||
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
|
||||
|
||||
return fusedconv
|
||||
|
||||
|
||||
def copy_attr(a, b, include=(), exclude=()):
|
||||
# Copy attributes from b to a, options to only include [...] and to exclude [...]
|
||||
for k, v in b.__dict__.items():
|
||||
if (include and k not in include) or k.startswith("_") or k in exclude:
|
||||
continue
|
||||
|
||||
setattr(a, k, v)
|
||||
Reference in New Issue
Block a user