1
This commit is contained in:
24
trellis/models/sparse_elastic_mixin.py
Normal file
24
trellis/models/sparse_elastic_mixin.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from contextlib import contextmanager
|
||||
from typing import *
|
||||
import math
|
||||
from ..modules import sparse as sp
|
||||
from ..utils.elastic_utils import ElasticModuleMixin
|
||||
|
||||
|
||||
class SparseTransformerElasticMixin(ElasticModuleMixin):
|
||||
def _get_input_size(self, x: sp.SparseTensor, *args, **kwargs):
|
||||
return x.feats.shape[0]
|
||||
|
||||
@contextmanager
|
||||
def with_mem_ratio(self, mem_ratio=1.0):
|
||||
if mem_ratio == 1.0:
|
||||
yield 1.0
|
||||
return
|
||||
num_blocks = len(self.blocks)
|
||||
num_checkpoint_blocks = min(math.ceil((1 - mem_ratio) * num_blocks) + 1, num_blocks)
|
||||
exact_mem_ratio = 1 - (num_checkpoint_blocks - 1) / num_blocks
|
||||
for i in range(num_blocks):
|
||||
self.blocks[i].use_checkpoint = i < num_checkpoint_blocks
|
||||
yield exact_mem_ratio
|
||||
for i in range(num_blocks):
|
||||
self.blocks[i].use_checkpoint = False
|
||||
200
trellis/models/sparse_structure_flow.py
Normal file
200
trellis/models/sparse_structure_flow.py
Normal file
@@ -0,0 +1,200 @@
|
||||
from typing import *
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import numpy as np
|
||||
from ..modules.utils import convert_module_to_f16, convert_module_to_f32
|
||||
from ..modules.transformer import AbsolutePositionEmbedder, ModulatedTransformerCrossBlock
|
||||
from ..modules.spatial import patchify, unpatchify
|
||||
|
||||
|
||||
class TimestepEmbedder(nn.Module):
|
||||
"""
|
||||
Embeds scalar timesteps into vector representations.
|
||||
"""
|
||||
def __init__(self, hidden_size, frequency_embedding_size=256):
|
||||
super().__init__()
|
||||
self.mlp = nn.Sequential(
|
||||
nn.Linear(frequency_embedding_size, hidden_size, bias=True),
|
||||
nn.SiLU(),
|
||||
nn.Linear(hidden_size, hidden_size, bias=True),
|
||||
)
|
||||
self.frequency_embedding_size = frequency_embedding_size
|
||||
|
||||
@staticmethod
|
||||
def timestep_embedding(t, dim, max_period=10000):
|
||||
"""
|
||||
Create sinusoidal timestep embeddings.
|
||||
|
||||
Args:
|
||||
t: a 1-D Tensor of N indices, one per batch element.
|
||||
These may be fractional.
|
||||
dim: the dimension of the output.
|
||||
max_period: controls the minimum frequency of the embeddings.
|
||||
|
||||
Returns:
|
||||
an (N, D) Tensor of positional embeddings.
|
||||
"""
|
||||
# https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
|
||||
half = dim // 2
|
||||
freqs = torch.exp(
|
||||
-np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
|
||||
).to(device=t.device)
|
||||
args = t[:, None].float() * freqs[None]
|
||||
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
||||
if dim % 2:
|
||||
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
||||
return embedding
|
||||
|
||||
def forward(self, t):
|
||||
t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
|
||||
t_emb = self.mlp(t_freq)
|
||||
return t_emb
|
||||
|
||||
|
||||
class SparseStructureFlowModel(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
resolution: int,
|
||||
in_channels: int,
|
||||
model_channels: int,
|
||||
cond_channels: int,
|
||||
out_channels: int,
|
||||
num_blocks: int,
|
||||
num_heads: Optional[int] = None,
|
||||
num_head_channels: Optional[int] = 64,
|
||||
mlp_ratio: float = 4,
|
||||
patch_size: int = 2,
|
||||
pe_mode: Literal["ape", "rope"] = "ape",
|
||||
use_fp16: bool = False,
|
||||
use_checkpoint: bool = False,
|
||||
share_mod: bool = False,
|
||||
qk_rms_norm: bool = False,
|
||||
qk_rms_norm_cross: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
self.resolution = resolution
|
||||
self.in_channels = in_channels
|
||||
self.model_channels = model_channels
|
||||
self.cond_channels = cond_channels
|
||||
self.out_channels = out_channels
|
||||
self.num_blocks = num_blocks
|
||||
self.num_heads = num_heads or model_channels // num_head_channels
|
||||
self.mlp_ratio = mlp_ratio
|
||||
self.patch_size = patch_size
|
||||
self.pe_mode = pe_mode
|
||||
self.use_fp16 = use_fp16
|
||||
self.use_checkpoint = use_checkpoint
|
||||
self.share_mod = share_mod
|
||||
self.qk_rms_norm = qk_rms_norm
|
||||
self.qk_rms_norm_cross = qk_rms_norm_cross
|
||||
self.dtype = torch.float16 if use_fp16 else torch.float32
|
||||
|
||||
self.t_embedder = TimestepEmbedder(model_channels)
|
||||
if share_mod:
|
||||
self.adaLN_modulation = nn.Sequential(
|
||||
nn.SiLU(),
|
||||
nn.Linear(model_channels, 6 * model_channels, bias=True)
|
||||
)
|
||||
|
||||
if pe_mode == "ape":
|
||||
pos_embedder = AbsolutePositionEmbedder(model_channels, 3)
|
||||
coords = torch.meshgrid(*[torch.arange(res, device=self.device) for res in [resolution // patch_size] * 3], indexing='ij')
|
||||
coords = torch.stack(coords, dim=-1).reshape(-1, 3)
|
||||
pos_emb = pos_embedder(coords)
|
||||
self.register_buffer("pos_emb", pos_emb)
|
||||
|
||||
self.input_layer = nn.Linear(in_channels * patch_size**3, model_channels)
|
||||
|
||||
self.blocks = nn.ModuleList([
|
||||
ModulatedTransformerCrossBlock(
|
||||
model_channels,
|
||||
cond_channels,
|
||||
num_heads=self.num_heads,
|
||||
mlp_ratio=self.mlp_ratio,
|
||||
attn_mode='full',
|
||||
use_checkpoint=self.use_checkpoint,
|
||||
use_rope=(pe_mode == "rope"),
|
||||
share_mod=share_mod,
|
||||
qk_rms_norm=self.qk_rms_norm,
|
||||
qk_rms_norm_cross=self.qk_rms_norm_cross,
|
||||
)
|
||||
for _ in range(num_blocks)
|
||||
])
|
||||
|
||||
self.out_layer = nn.Linear(model_channels, out_channels * patch_size**3)
|
||||
|
||||
self.initialize_weights()
|
||||
if use_fp16:
|
||||
self.convert_to_fp16()
|
||||
|
||||
@property
|
||||
def device(self) -> torch.device:
|
||||
"""
|
||||
Return the device of the model.
|
||||
"""
|
||||
return next(self.parameters()).device
|
||||
|
||||
def convert_to_fp16(self) -> None:
|
||||
"""
|
||||
Convert the torso of the model to float16.
|
||||
"""
|
||||
self.blocks.apply(convert_module_to_f16)
|
||||
|
||||
def convert_to_fp32(self) -> None:
|
||||
"""
|
||||
Convert the torso of the model to float32.
|
||||
"""
|
||||
self.blocks.apply(convert_module_to_f32)
|
||||
|
||||
def initialize_weights(self) -> None:
|
||||
# Initialize transformer layers:
|
||||
def _basic_init(module):
|
||||
if isinstance(module, nn.Linear):
|
||||
torch.nn.init.xavier_uniform_(module.weight)
|
||||
if module.bias is not None:
|
||||
nn.init.constant_(module.bias, 0)
|
||||
self.apply(_basic_init)
|
||||
|
||||
# Initialize timestep embedding MLP:
|
||||
nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
|
||||
nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
|
||||
|
||||
# Zero-out adaLN modulation layers in DiT blocks:
|
||||
if self.share_mod:
|
||||
nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
|
||||
nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
|
||||
else:
|
||||
for block in self.blocks:
|
||||
nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
|
||||
nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
|
||||
|
||||
# Zero-out output layers:
|
||||
nn.init.constant_(self.out_layer.weight, 0)
|
||||
nn.init.constant_(self.out_layer.bias, 0)
|
||||
|
||||
def forward(self, x: torch.Tensor, t: torch.Tensor, cond: torch.Tensor) -> torch.Tensor:
|
||||
assert [*x.shape] == [x.shape[0], self.in_channels, *[self.resolution] * 3], \
|
||||
f"Input shape mismatch, got {x.shape}, expected {[x.shape[0], self.in_channels, *[self.resolution] * 3]}"
|
||||
|
||||
h = patchify(x, self.patch_size)
|
||||
h = h.view(*h.shape[:2], -1).permute(0, 2, 1).contiguous()
|
||||
|
||||
h = self.input_layer(h)
|
||||
h = h + self.pos_emb[None]
|
||||
t_emb = self.t_embedder(t)
|
||||
if self.share_mod:
|
||||
t_emb = self.adaLN_modulation(t_emb)
|
||||
t_emb = t_emb.type(self.dtype)
|
||||
h = h.type(self.dtype)
|
||||
cond = cond.type(self.dtype)
|
||||
for block in self.blocks:
|
||||
h = block(h, t_emb, cond)
|
||||
h = h.type(x.dtype)
|
||||
h = F.layer_norm(h, h.shape[-1:])
|
||||
h = self.out_layer(h)
|
||||
|
||||
h = h.permute(0, 2, 1).view(h.shape[0], h.shape[2], *[self.resolution // self.patch_size] * 3)
|
||||
h = unpatchify(h, self.patch_size).contiguous()
|
||||
|
||||
return h
|
||||
306
trellis/models/sparse_structure_vae.py
Normal file
306
trellis/models/sparse_structure_vae.py
Normal file
@@ -0,0 +1,306 @@
|
||||
from typing import *
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from ..modules.norm import GroupNorm32, ChannelLayerNorm32
|
||||
from ..modules.spatial import pixel_shuffle_3d
|
||||
from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
|
||||
|
||||
|
||||
def norm_layer(norm_type: str, *args, **kwargs) -> nn.Module:
|
||||
"""
|
||||
Return a normalization layer.
|
||||
"""
|
||||
if norm_type == "group":
|
||||
return GroupNorm32(32, *args, **kwargs)
|
||||
elif norm_type == "layer":
|
||||
return ChannelLayerNorm32(*args, **kwargs)
|
||||
else:
|
||||
raise ValueError(f"Invalid norm type {norm_type}")
|
||||
|
||||
|
||||
class ResBlock3d(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
channels: int,
|
||||
out_channels: Optional[int] = None,
|
||||
norm_type: Literal["group", "layer"] = "layer",
|
||||
):
|
||||
super().__init__()
|
||||
self.channels = channels
|
||||
self.out_channels = out_channels or channels
|
||||
|
||||
self.norm1 = norm_layer(norm_type, channels)
|
||||
self.norm2 = norm_layer(norm_type, self.out_channels)
|
||||
self.conv1 = nn.Conv3d(channels, self.out_channels, 3, padding=1)
|
||||
self.conv2 = zero_module(nn.Conv3d(self.out_channels, self.out_channels, 3, padding=1))
|
||||
self.skip_connection = nn.Conv3d(channels, self.out_channels, 1) if channels != self.out_channels else nn.Identity()
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
h = self.norm1(x)
|
||||
h = F.silu(h)
|
||||
h = self.conv1(h)
|
||||
h = self.norm2(h)
|
||||
h = F.silu(h)
|
||||
h = self.conv2(h)
|
||||
h = h + self.skip_connection(x)
|
||||
return h
|
||||
|
||||
|
||||
class DownsampleBlock3d(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
in_channels: int,
|
||||
out_channels: int,
|
||||
mode: Literal["conv", "avgpool"] = "conv",
|
||||
):
|
||||
assert mode in ["conv", "avgpool"], f"Invalid mode {mode}"
|
||||
|
||||
super().__init__()
|
||||
self.in_channels = in_channels
|
||||
self.out_channels = out_channels
|
||||
|
||||
if mode == "conv":
|
||||
self.conv = nn.Conv3d(in_channels, out_channels, 2, stride=2)
|
||||
elif mode == "avgpool":
|
||||
assert in_channels == out_channels, "Pooling mode requires in_channels to be equal to out_channels"
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
if hasattr(self, "conv"):
|
||||
return self.conv(x)
|
||||
else:
|
||||
return F.avg_pool3d(x, 2)
|
||||
|
||||
|
||||
class UpsampleBlock3d(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
in_channels: int,
|
||||
out_channels: int,
|
||||
mode: Literal["conv", "nearest"] = "conv",
|
||||
):
|
||||
assert mode in ["conv", "nearest"], f"Invalid mode {mode}"
|
||||
|
||||
super().__init__()
|
||||
self.in_channels = in_channels
|
||||
self.out_channels = out_channels
|
||||
|
||||
if mode == "conv":
|
||||
self.conv = nn.Conv3d(in_channels, out_channels*8, 3, padding=1)
|
||||
elif mode == "nearest":
|
||||
assert in_channels == out_channels, "Nearest mode requires in_channels to be equal to out_channels"
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
if hasattr(self, "conv"):
|
||||
x = self.conv(x)
|
||||
return pixel_shuffle_3d(x, 2)
|
||||
else:
|
||||
return F.interpolate(x, scale_factor=2, mode="nearest")
|
||||
|
||||
|
||||
class SparseStructureEncoder(nn.Module):
|
||||
"""
|
||||
Encoder for Sparse Structure (\mathcal{E}_S in the paper Sec. 3.3).
|
||||
|
||||
Args:
|
||||
in_channels (int): Channels of the input.
|
||||
latent_channels (int): Channels of the latent representation.
|
||||
num_res_blocks (int): Number of residual blocks at each resolution.
|
||||
channels (List[int]): Channels of the encoder blocks.
|
||||
num_res_blocks_middle (int): Number of residual blocks in the middle.
|
||||
norm_type (Literal["group", "layer"]): Type of normalization layer.
|
||||
use_fp16 (bool): Whether to use FP16.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
in_channels: int,
|
||||
latent_channels: int,
|
||||
num_res_blocks: int,
|
||||
channels: List[int],
|
||||
num_res_blocks_middle: int = 2,
|
||||
norm_type: Literal["group", "layer"] = "layer",
|
||||
use_fp16: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
self.in_channels = in_channels
|
||||
self.latent_channels = latent_channels
|
||||
self.num_res_blocks = num_res_blocks
|
||||
self.channels = channels
|
||||
self.num_res_blocks_middle = num_res_blocks_middle
|
||||
self.norm_type = norm_type
|
||||
self.use_fp16 = use_fp16
|
||||
self.dtype = torch.float16 if use_fp16 else torch.float32
|
||||
|
||||
self.input_layer = nn.Conv3d(in_channels, channels[0], 3, padding=1)
|
||||
|
||||
self.blocks = nn.ModuleList([])
|
||||
for i, ch in enumerate(channels):
|
||||
self.blocks.extend([
|
||||
ResBlock3d(ch, ch)
|
||||
for _ in range(num_res_blocks)
|
||||
])
|
||||
if i < len(channels) - 1:
|
||||
self.blocks.append(
|
||||
DownsampleBlock3d(ch, channels[i+1])
|
||||
)
|
||||
|
||||
self.middle_block = nn.Sequential(*[
|
||||
ResBlock3d(channels[-1], channels[-1])
|
||||
for _ in range(num_res_blocks_middle)
|
||||
])
|
||||
|
||||
self.out_layer = nn.Sequential(
|
||||
norm_layer(norm_type, channels[-1]),
|
||||
nn.SiLU(),
|
||||
nn.Conv3d(channels[-1], latent_channels*2, 3, padding=1)
|
||||
)
|
||||
|
||||
if use_fp16:
|
||||
self.convert_to_fp16()
|
||||
|
||||
@property
|
||||
def device(self) -> torch.device:
|
||||
"""
|
||||
Return the device of the model.
|
||||
"""
|
||||
return next(self.parameters()).device
|
||||
|
||||
def convert_to_fp16(self) -> None:
|
||||
"""
|
||||
Convert the torso of the model to float16.
|
||||
"""
|
||||
self.use_fp16 = True
|
||||
self.dtype = torch.float16
|
||||
self.blocks.apply(convert_module_to_f16)
|
||||
self.middle_block.apply(convert_module_to_f16)
|
||||
|
||||
def convert_to_fp32(self) -> None:
|
||||
"""
|
||||
Convert the torso of the model to float32.
|
||||
"""
|
||||
self.use_fp16 = False
|
||||
self.dtype = torch.float32
|
||||
self.blocks.apply(convert_module_to_f32)
|
||||
self.middle_block.apply(convert_module_to_f32)
|
||||
|
||||
def forward(self, x: torch.Tensor, sample_posterior: bool = False, return_raw: bool = False) -> torch.Tensor:
|
||||
h = self.input_layer(x)
|
||||
h = h.type(self.dtype)
|
||||
|
||||
for block in self.blocks:
|
||||
h = block(h)
|
||||
h = self.middle_block(h)
|
||||
|
||||
h = h.type(x.dtype)
|
||||
h = self.out_layer(h)
|
||||
|
||||
mean, logvar = h.chunk(2, dim=1)
|
||||
|
||||
if sample_posterior:
|
||||
std = torch.exp(0.5 * logvar)
|
||||
z = mean + std * torch.randn_like(std)
|
||||
else:
|
||||
z = mean
|
||||
|
||||
if return_raw:
|
||||
return z, mean, logvar
|
||||
return z
|
||||
|
||||
|
||||
class SparseStructureDecoder(nn.Module):
|
||||
"""
|
||||
Decoder for Sparse Structure (\mathcal{D}_S in the paper Sec. 3.3).
|
||||
|
||||
Args:
|
||||
out_channels (int): Channels of the output.
|
||||
latent_channels (int): Channels of the latent representation.
|
||||
num_res_blocks (int): Number of residual blocks at each resolution.
|
||||
channels (List[int]): Channels of the decoder blocks.
|
||||
num_res_blocks_middle (int): Number of residual blocks in the middle.
|
||||
norm_type (Literal["group", "layer"]): Type of normalization layer.
|
||||
use_fp16 (bool): Whether to use FP16.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
out_channels: int,
|
||||
latent_channels: int,
|
||||
num_res_blocks: int,
|
||||
channels: List[int],
|
||||
num_res_blocks_middle: int = 2,
|
||||
norm_type: Literal["group", "layer"] = "layer",
|
||||
use_fp16: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
self.out_channels = out_channels
|
||||
self.latent_channels = latent_channels
|
||||
self.num_res_blocks = num_res_blocks
|
||||
self.channels = channels
|
||||
self.num_res_blocks_middle = num_res_blocks_middle
|
||||
self.norm_type = norm_type
|
||||
self.use_fp16 = use_fp16
|
||||
self.dtype = torch.float16 if use_fp16 else torch.float32
|
||||
|
||||
self.input_layer = nn.Conv3d(latent_channels, channels[0], 3, padding=1)
|
||||
|
||||
self.middle_block = nn.Sequential(*[
|
||||
ResBlock3d(channels[0], channels[0])
|
||||
for _ in range(num_res_blocks_middle)
|
||||
])
|
||||
|
||||
self.blocks = nn.ModuleList([])
|
||||
for i, ch in enumerate(channels):
|
||||
self.blocks.extend([
|
||||
ResBlock3d(ch, ch)
|
||||
for _ in range(num_res_blocks)
|
||||
])
|
||||
if i < len(channels) - 1:
|
||||
self.blocks.append(
|
||||
UpsampleBlock3d(ch, channels[i+1])
|
||||
)
|
||||
|
||||
self.out_layer = nn.Sequential(
|
||||
norm_layer(norm_type, channels[-1]),
|
||||
nn.SiLU(),
|
||||
nn.Conv3d(channels[-1], out_channels, 3, padding=1)
|
||||
)
|
||||
|
||||
if use_fp16:
|
||||
self.convert_to_fp16()
|
||||
|
||||
@property
|
||||
def device(self) -> torch.device:
|
||||
"""
|
||||
Return the device of the model.
|
||||
"""
|
||||
return next(self.parameters()).device
|
||||
|
||||
def convert_to_fp16(self) -> None:
|
||||
"""
|
||||
Convert the torso of the model to float16.
|
||||
"""
|
||||
self.use_fp16 = True
|
||||
self.dtype = torch.float16
|
||||
self.blocks.apply(convert_module_to_f16)
|
||||
self.middle_block.apply(convert_module_to_f16)
|
||||
|
||||
def convert_to_fp32(self) -> None:
|
||||
"""
|
||||
Convert the torso of the model to float32.
|
||||
"""
|
||||
self.use_fp16 = False
|
||||
self.dtype = torch.float32
|
||||
self.blocks.apply(convert_module_to_f32)
|
||||
self.middle_block.apply(convert_module_to_f32)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
h = self.input_layer(x)
|
||||
|
||||
h = h.type(self.dtype)
|
||||
|
||||
h = self.middle_block(h)
|
||||
for block in self.blocks:
|
||||
h = block(h)
|
||||
|
||||
h = h.type(x.dtype)
|
||||
h = self.out_layer(h)
|
||||
return h
|
||||
Reference in New Issue
Block a user