From 07ed844f3192cd1e77469ef86983c7f06befc68c Mon Sep 17 00:00:00 2001 From: zcr Date: Tue, 17 Mar 2026 11:30:00 +0800 Subject: [PATCH] 1 --- trellis/datasets/sparse_feat2render.py | 134 ++++++++ trellis/datasets/sparse_structure_latent.py | 188 ++++++++++++ trellis/models/sparse_elastic_mixin.py | 24 ++ trellis/models/sparse_structure_flow.py | 200 ++++++++++++ trellis/models/sparse_structure_vae.py | 306 +++++++++++++++++++ trellis/trainers/vae/sparse_structure_vae.py | 130 ++++++++ 6 files changed, 982 insertions(+) create mode 100644 trellis/datasets/sparse_feat2render.py create mode 100644 trellis/datasets/sparse_structure_latent.py create mode 100644 trellis/models/sparse_elastic_mixin.py create mode 100644 trellis/models/sparse_structure_flow.py create mode 100644 trellis/models/sparse_structure_vae.py create mode 100644 trellis/trainers/vae/sparse_structure_vae.py diff --git a/trellis/datasets/sparse_feat2render.py b/trellis/datasets/sparse_feat2render.py new file mode 100644 index 0000000..65bf488 --- /dev/null +++ b/trellis/datasets/sparse_feat2render.py @@ -0,0 +1,134 @@ +import os +from PIL import Image +import json +import numpy as np +import pandas as pd +import torch +import utils3d.torch +from ..modules.sparse.basic import SparseTensor +from .components import StandardDatasetBase + + +class SparseFeat2Render(StandardDatasetBase): + """ + SparseFeat2Render dataset. + + Args: + roots (str): paths to the dataset + image_size (int): size of the image + model (str): model name + resolution (int): resolution of the data + min_aesthetic_score (float): minimum aesthetic score + max_num_voxels (int): maximum number of voxels + """ + def __init__( + self, + roots: str, + image_size: int, + model: str = 'dinov2_vitl14_reg', + resolution: int = 64, + min_aesthetic_score: float = 5.0, + max_num_voxels: int = 32768, + ): + self.image_size = image_size + self.model = model + self.resolution = resolution + self.min_aesthetic_score = min_aesthetic_score + self.max_num_voxels = max_num_voxels + self.value_range = (0, 1) + + super().__init__(roots) + + def filter_metadata(self, metadata): + stats = {} + metadata = metadata[metadata[f'feature_{self.model}']] + stats['With features'] = len(metadata) + metadata = metadata[metadata['aesthetic_score'] >= self.min_aesthetic_score] + stats[f'Aesthetic score >= {self.min_aesthetic_score}'] = len(metadata) + metadata = metadata[metadata['num_voxels'] <= self.max_num_voxels] + stats[f'Num voxels <= {self.max_num_voxels}'] = len(metadata) + return metadata, stats + + def _get_image(self, root, instance): + with open(os.path.join(root, 'renders', instance, 'transforms.json')) as f: + metadata = json.load(f) + n_views = len(metadata['frames']) + view = np.random.randint(n_views) + metadata = metadata['frames'][view] + fov = metadata['camera_angle_x'] + intrinsics = utils3d.torch.intrinsics_from_fov_xy(torch.tensor(fov), torch.tensor(fov)) + c2w = torch.tensor(metadata['transform_matrix']) + c2w[:3, 1:3] *= -1 + extrinsics = torch.inverse(c2w) + + image_path = os.path.join(root, 'renders', instance, metadata['file_path']) + image = Image.open(image_path) + alpha = image.getchannel(3) + image = image.convert('RGB') + image = image.resize((self.image_size, self.image_size), Image.Resampling.LANCZOS) + alpha = alpha.resize((self.image_size, self.image_size), Image.Resampling.LANCZOS) + image = torch.tensor(np.array(image)).permute(2, 0, 1).float() / 255.0 + alpha = torch.tensor(np.array(alpha)).float() / 255.0 + + return { + 'image': image, + 'alpha': alpha, + 'extrinsics': extrinsics, + 'intrinsics': intrinsics, + } + + def _get_feat(self, root, instance): + DATA_RESOLUTION = 64 + feats_path = os.path.join(root, 'features', self.model, f'{instance}.npz') + feats = np.load(feats_path, allow_pickle=True) + coords = torch.tensor(feats['indices']).int() + feats = torch.tensor(feats['patchtokens']).float() + + if self.resolution != DATA_RESOLUTION: + factor = DATA_RESOLUTION // self.resolution + coords = coords // factor + coords, idx = coords.unique(return_inverse=True, dim=0) + feats = torch.scatter_reduce( + torch.zeros(coords.shape[0], feats.shape[1], device=feats.device), + dim=0, + index=idx.unsqueeze(-1).expand(-1, feats.shape[1]), + src=feats, + reduce='mean' + ) + + return { + 'coords': coords, + 'feats': feats, + } + + @torch.no_grad() + def visualize_sample(self, sample: dict): + return sample['image'] + + @staticmethod + def collate_fn(batch): + pack = {} + coords = [] + for i, b in enumerate(batch): + coords.append(torch.cat([torch.full((b['coords'].shape[0], 1), i, dtype=torch.int32), b['coords']], dim=-1)) + coords = torch.cat(coords) + feats = torch.cat([b['feats'] for b in batch]) + pack['feats'] = SparseTensor( + coords=coords, + feats=feats, + ) + + pack['image'] = torch.stack([b['image'] for b in batch]) + pack['alpha'] = torch.stack([b['alpha'] for b in batch]) + pack['extrinsics'] = torch.stack([b['extrinsics'] for b in batch]) + pack['intrinsics'] = torch.stack([b['intrinsics'] for b in batch]) + + return pack + + def get_instance(self, root, instance): + image = self._get_image(root, instance) + feat = self._get_feat(root, instance) + return { + **image, + **feat, + } diff --git a/trellis/datasets/sparse_structure_latent.py b/trellis/datasets/sparse_structure_latent.py new file mode 100644 index 0000000..af4c5d1 --- /dev/null +++ b/trellis/datasets/sparse_structure_latent.py @@ -0,0 +1,188 @@ +import os +import json +from typing import * +import numpy as np +import torch +import utils3d +from ..representations.octree import DfsOctree as Octree +from ..renderers import OctreeRenderer +from .components import StandardDatasetBase, TextConditionedMixin, ImageConditionedMixin +from .. import models + + +class SparseStructureLatentVisMixin: + def __init__( + self, + *args, + pretrained_ss_dec: str = 'microsoft/TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16', + ss_dec_path: Optional[str] = None, + ss_dec_ckpt: Optional[str] = None, + **kwargs + ): + super().__init__(*args, **kwargs) + self.ss_dec = None + self.pretrained_ss_dec = pretrained_ss_dec + self.ss_dec_path = ss_dec_path + self.ss_dec_ckpt = ss_dec_ckpt + + def _loading_ss_dec(self): + if self.ss_dec is not None: + return + if self.ss_dec_path is not None: + cfg = json.load(open(os.path.join(self.ss_dec_path, 'config.json'), 'r')) + decoder = getattr(models, cfg['models']['decoder']['name'])(**cfg['models']['decoder']['args']) + ckpt_path = os.path.join(self.ss_dec_path, 'ckpts', f'decoder_{self.ss_dec_ckpt}.pt') + decoder.load_state_dict(torch.load(ckpt_path, map_location='cpu', weights_only=True)) + else: + decoder = models.from_pretrained(self.pretrained_ss_dec) + self.ss_dec = decoder.cuda().eval() + + def _delete_ss_dec(self): + del self.ss_dec + self.ss_dec = None + + @torch.no_grad() + def decode_latent(self, z, batch_size=4): + self._loading_ss_dec() + ss = [] + if self.normalization is not None: + z = z * self.std.to(z.device) + self.mean.to(z.device) + for i in range(0, z.shape[0], batch_size): + ss.append(self.ss_dec(z[i:i+batch_size])) + ss = torch.cat(ss, dim=0) + self._delete_ss_dec() + return ss + + @torch.no_grad() + def visualize_sample(self, x_0: Union[torch.Tensor, dict]): + x_0 = x_0 if isinstance(x_0, torch.Tensor) else x_0['x_0'] + x_0 = self.decode_latent(x_0.cuda()) + + renderer = OctreeRenderer() + renderer.rendering_options.resolution = 512 + renderer.rendering_options.near = 0.8 + renderer.rendering_options.far = 1.6 + renderer.rendering_options.bg_color = (0, 0, 0) + renderer.rendering_options.ssaa = 4 + renderer.pipe.primitive = 'voxel' + + # Build camera + yaws = [0, np.pi / 2, np.pi, 3 * np.pi / 2] + yaws_offset = np.random.uniform(-np.pi / 4, np.pi / 4) + yaws = [y + yaws_offset for y in yaws] + pitch = [np.random.uniform(-np.pi / 4, np.pi / 4) for _ in range(4)] + + exts = [] + ints = [] + for yaw, pitch in zip(yaws, pitch): + orig = torch.tensor([ + np.sin(yaw) * np.cos(pitch), + np.cos(yaw) * np.cos(pitch), + np.sin(pitch), + ]).float().cuda() * 2 + fov = torch.deg2rad(torch.tensor(30)).cuda() + extrinsics = utils3d.torch.extrinsics_look_at(orig, torch.tensor([0, 0, 0]).float().cuda(), torch.tensor([0, 0, 1]).float().cuda()) + intrinsics = utils3d.torch.intrinsics_from_fov_xy(fov, fov) + exts.append(extrinsics) + ints.append(intrinsics) + + images = [] + + # Build each representation + x_0 = x_0.cuda() + for i in range(x_0.shape[0]): + representation = Octree( + depth=10, + aabb=[-0.5, -0.5, -0.5, 1, 1, 1], + device='cuda', + primitive='voxel', + sh_degree=0, + primitive_config={'solid': True}, + ) + coords = torch.nonzero(x_0[i, 0] > 0, as_tuple=False) + resolution = x_0.shape[-1] + representation.position = coords.float() / resolution + representation.depth = torch.full((representation.position.shape[0], 1), int(np.log2(resolution)), dtype=torch.uint8, device='cuda') + + image = torch.zeros(3, 1024, 1024).cuda() + tile = [2, 2] + for j, (ext, intr) in enumerate(zip(exts, ints)): + res = renderer.render(representation, ext, intr, colors_overwrite=representation.position) + image[:, 512 * (j // tile[1]):512 * (j // tile[1] + 1), 512 * (j % tile[1]):512 * (j % tile[1] + 1)] = res['color'] + images.append(image) + + return torch.stack(images) + + +class SparseStructureLatent(SparseStructureLatentVisMixin, StandardDatasetBase): + """ + Sparse structure latent dataset + + Args: + roots (str): path to the dataset + latent_model (str): name of the latent model + min_aesthetic_score (float): minimum aesthetic score + normalization (dict): normalization stats + pretrained_ss_dec (str): name of the pretrained sparse structure decoder + ss_dec_path (str): path to the sparse structure decoder, if given, will override the pretrained_ss_dec + ss_dec_ckpt (str): name of the sparse structure decoder checkpoint + """ + def __init__(self, + roots: str, + *, + latent_model: str, + min_aesthetic_score: float = 5.0, + normalization: Optional[dict] = None, + pretrained_ss_dec: str = 'microsoft/TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16', + ss_dec_path: Optional[str] = None, + ss_dec_ckpt: Optional[str] = None, + ): + self.latent_model = latent_model + self.min_aesthetic_score = min_aesthetic_score + self.normalization = normalization + self.value_range = (0, 1) + + super().__init__( + roots, + pretrained_ss_dec=pretrained_ss_dec, + ss_dec_path=ss_dec_path, + ss_dec_ckpt=ss_dec_ckpt, + ) + + if self.normalization is not None: + self.mean = torch.tensor(self.normalization['mean']).reshape(-1, 1, 1, 1) + self.std = torch.tensor(self.normalization['std']).reshape(-1, 1, 1, 1) + + def filter_metadata(self, metadata): + stats = {} + metadata = metadata[metadata[f'ss_latent_{self.latent_model}']] + stats['With sparse structure latents'] = len(metadata) + metadata = metadata[metadata['aesthetic_score'] >= self.min_aesthetic_score] + stats[f'Aesthetic score >= {self.min_aesthetic_score}'] = len(metadata) + return metadata, stats + + def get_instance(self, root, instance): + latent = np.load(os.path.join(root, 'ss_latents', self.latent_model, f'{instance}.npz')) + z = torch.tensor(latent['mean']).float() + if self.normalization is not None: + z = (z - self.mean) / self.std + + pack = { + 'x_0': z, + } + return pack + + +class TextConditionedSparseStructureLatent(TextConditionedMixin, SparseStructureLatent): + """ + Text-conditioned sparse structure dataset + """ + pass + + +class ImageConditionedSparseStructureLatent(ImageConditionedMixin, SparseStructureLatent): + """ + Image-conditioned sparse structure dataset + """ + pass + \ No newline at end of file diff --git a/trellis/models/sparse_elastic_mixin.py b/trellis/models/sparse_elastic_mixin.py new file mode 100644 index 0000000..66d204c --- /dev/null +++ b/trellis/models/sparse_elastic_mixin.py @@ -0,0 +1,24 @@ +from contextlib import contextmanager +from typing import * +import math +from ..modules import sparse as sp +from ..utils.elastic_utils import ElasticModuleMixin + + +class SparseTransformerElasticMixin(ElasticModuleMixin): + def _get_input_size(self, x: sp.SparseTensor, *args, **kwargs): + return x.feats.shape[0] + + @contextmanager + def with_mem_ratio(self, mem_ratio=1.0): + if mem_ratio == 1.0: + yield 1.0 + return + num_blocks = len(self.blocks) + num_checkpoint_blocks = min(math.ceil((1 - mem_ratio) * num_blocks) + 1, num_blocks) + exact_mem_ratio = 1 - (num_checkpoint_blocks - 1) / num_blocks + for i in range(num_blocks): + self.blocks[i].use_checkpoint = i < num_checkpoint_blocks + yield exact_mem_ratio + for i in range(num_blocks): + self.blocks[i].use_checkpoint = False diff --git a/trellis/models/sparse_structure_flow.py b/trellis/models/sparse_structure_flow.py new file mode 100644 index 0000000..aee71a9 --- /dev/null +++ b/trellis/models/sparse_structure_flow.py @@ -0,0 +1,200 @@ +from typing import * +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from ..modules.utils import convert_module_to_f16, convert_module_to_f32 +from ..modules.transformer import AbsolutePositionEmbedder, ModulatedTransformerCrossBlock +from ..modules.spatial import patchify, unpatchify + + +class TimestepEmbedder(nn.Module): + """ + Embeds scalar timesteps into vector representations. + """ + def __init__(self, hidden_size, frequency_embedding_size=256): + super().__init__() + self.mlp = nn.Sequential( + nn.Linear(frequency_embedding_size, hidden_size, bias=True), + nn.SiLU(), + nn.Linear(hidden_size, hidden_size, bias=True), + ) + self.frequency_embedding_size = frequency_embedding_size + + @staticmethod + def timestep_embedding(t, dim, max_period=10000): + """ + Create sinusoidal timestep embeddings. + + Args: + t: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + dim: the dimension of the output. + max_period: controls the minimum frequency of the embeddings. + + Returns: + an (N, D) Tensor of positional embeddings. + """ + # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py + half = dim // 2 + freqs = torch.exp( + -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half + ).to(device=t.device) + args = t[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + return embedding + + def forward(self, t): + t_freq = self.timestep_embedding(t, self.frequency_embedding_size) + t_emb = self.mlp(t_freq) + return t_emb + + +class SparseStructureFlowModel(nn.Module): + def __init__( + self, + resolution: int, + in_channels: int, + model_channels: int, + cond_channels: int, + out_channels: int, + num_blocks: int, + num_heads: Optional[int] = None, + num_head_channels: Optional[int] = 64, + mlp_ratio: float = 4, + patch_size: int = 2, + pe_mode: Literal["ape", "rope"] = "ape", + use_fp16: bool = False, + use_checkpoint: bool = False, + share_mod: bool = False, + qk_rms_norm: bool = False, + qk_rms_norm_cross: bool = False, + ): + super().__init__() + self.resolution = resolution + self.in_channels = in_channels + self.model_channels = model_channels + self.cond_channels = cond_channels + self.out_channels = out_channels + self.num_blocks = num_blocks + self.num_heads = num_heads or model_channels // num_head_channels + self.mlp_ratio = mlp_ratio + self.patch_size = patch_size + self.pe_mode = pe_mode + self.use_fp16 = use_fp16 + self.use_checkpoint = use_checkpoint + self.share_mod = share_mod + self.qk_rms_norm = qk_rms_norm + self.qk_rms_norm_cross = qk_rms_norm_cross + self.dtype = torch.float16 if use_fp16 else torch.float32 + + self.t_embedder = TimestepEmbedder(model_channels) + if share_mod: + self.adaLN_modulation = nn.Sequential( + nn.SiLU(), + nn.Linear(model_channels, 6 * model_channels, bias=True) + ) + + if pe_mode == "ape": + pos_embedder = AbsolutePositionEmbedder(model_channels, 3) + coords = torch.meshgrid(*[torch.arange(res, device=self.device) for res in [resolution // patch_size] * 3], indexing='ij') + coords = torch.stack(coords, dim=-1).reshape(-1, 3) + pos_emb = pos_embedder(coords) + self.register_buffer("pos_emb", pos_emb) + + self.input_layer = nn.Linear(in_channels * patch_size**3, model_channels) + + self.blocks = nn.ModuleList([ + ModulatedTransformerCrossBlock( + model_channels, + cond_channels, + num_heads=self.num_heads, + mlp_ratio=self.mlp_ratio, + attn_mode='full', + use_checkpoint=self.use_checkpoint, + use_rope=(pe_mode == "rope"), + share_mod=share_mod, + qk_rms_norm=self.qk_rms_norm, + qk_rms_norm_cross=self.qk_rms_norm_cross, + ) + for _ in range(num_blocks) + ]) + + self.out_layer = nn.Linear(model_channels, out_channels * patch_size**3) + + self.initialize_weights() + if use_fp16: + self.convert_to_fp16() + + @property + def device(self) -> torch.device: + """ + Return the device of the model. + """ + return next(self.parameters()).device + + def convert_to_fp16(self) -> None: + """ + Convert the torso of the model to float16. + """ + self.blocks.apply(convert_module_to_f16) + + def convert_to_fp32(self) -> None: + """ + Convert the torso of the model to float32. + """ + self.blocks.apply(convert_module_to_f32) + + def initialize_weights(self) -> None: + # Initialize transformer layers: + def _basic_init(module): + if isinstance(module, nn.Linear): + torch.nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.constant_(module.bias, 0) + self.apply(_basic_init) + + # Initialize timestep embedding MLP: + nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02) + nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02) + + # Zero-out adaLN modulation layers in DiT blocks: + if self.share_mod: + nn.init.constant_(self.adaLN_modulation[-1].weight, 0) + nn.init.constant_(self.adaLN_modulation[-1].bias, 0) + else: + for block in self.blocks: + nn.init.constant_(block.adaLN_modulation[-1].weight, 0) + nn.init.constant_(block.adaLN_modulation[-1].bias, 0) + + # Zero-out output layers: + nn.init.constant_(self.out_layer.weight, 0) + nn.init.constant_(self.out_layer.bias, 0) + + def forward(self, x: torch.Tensor, t: torch.Tensor, cond: torch.Tensor) -> torch.Tensor: + assert [*x.shape] == [x.shape[0], self.in_channels, *[self.resolution] * 3], \ + f"Input shape mismatch, got {x.shape}, expected {[x.shape[0], self.in_channels, *[self.resolution] * 3]}" + + h = patchify(x, self.patch_size) + h = h.view(*h.shape[:2], -1).permute(0, 2, 1).contiguous() + + h = self.input_layer(h) + h = h + self.pos_emb[None] + t_emb = self.t_embedder(t) + if self.share_mod: + t_emb = self.adaLN_modulation(t_emb) + t_emb = t_emb.type(self.dtype) + h = h.type(self.dtype) + cond = cond.type(self.dtype) + for block in self.blocks: + h = block(h, t_emb, cond) + h = h.type(x.dtype) + h = F.layer_norm(h, h.shape[-1:]) + h = self.out_layer(h) + + h = h.permute(0, 2, 1).view(h.shape[0], h.shape[2], *[self.resolution // self.patch_size] * 3) + h = unpatchify(h, self.patch_size).contiguous() + + return h diff --git a/trellis/models/sparse_structure_vae.py b/trellis/models/sparse_structure_vae.py new file mode 100644 index 0000000..c3e0913 --- /dev/null +++ b/trellis/models/sparse_structure_vae.py @@ -0,0 +1,306 @@ +from typing import * +import torch +import torch.nn as nn +import torch.nn.functional as F +from ..modules.norm import GroupNorm32, ChannelLayerNorm32 +from ..modules.spatial import pixel_shuffle_3d +from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32 + + +def norm_layer(norm_type: str, *args, **kwargs) -> nn.Module: + """ + Return a normalization layer. + """ + if norm_type == "group": + return GroupNorm32(32, *args, **kwargs) + elif norm_type == "layer": + return ChannelLayerNorm32(*args, **kwargs) + else: + raise ValueError(f"Invalid norm type {norm_type}") + + +class ResBlock3d(nn.Module): + def __init__( + self, + channels: int, + out_channels: Optional[int] = None, + norm_type: Literal["group", "layer"] = "layer", + ): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + + self.norm1 = norm_layer(norm_type, channels) + self.norm2 = norm_layer(norm_type, self.out_channels) + self.conv1 = nn.Conv3d(channels, self.out_channels, 3, padding=1) + self.conv2 = zero_module(nn.Conv3d(self.out_channels, self.out_channels, 3, padding=1)) + self.skip_connection = nn.Conv3d(channels, self.out_channels, 1) if channels != self.out_channels else nn.Identity() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + h = self.norm1(x) + h = F.silu(h) + h = self.conv1(h) + h = self.norm2(h) + h = F.silu(h) + h = self.conv2(h) + h = h + self.skip_connection(x) + return h + + +class DownsampleBlock3d(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + mode: Literal["conv", "avgpool"] = "conv", + ): + assert mode in ["conv", "avgpool"], f"Invalid mode {mode}" + + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + + if mode == "conv": + self.conv = nn.Conv3d(in_channels, out_channels, 2, stride=2) + elif mode == "avgpool": + assert in_channels == out_channels, "Pooling mode requires in_channels to be equal to out_channels" + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if hasattr(self, "conv"): + return self.conv(x) + else: + return F.avg_pool3d(x, 2) + + +class UpsampleBlock3d(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + mode: Literal["conv", "nearest"] = "conv", + ): + assert mode in ["conv", "nearest"], f"Invalid mode {mode}" + + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + + if mode == "conv": + self.conv = nn.Conv3d(in_channels, out_channels*8, 3, padding=1) + elif mode == "nearest": + assert in_channels == out_channels, "Nearest mode requires in_channels to be equal to out_channels" + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if hasattr(self, "conv"): + x = self.conv(x) + return pixel_shuffle_3d(x, 2) + else: + return F.interpolate(x, scale_factor=2, mode="nearest") + + +class SparseStructureEncoder(nn.Module): + """ + Encoder for Sparse Structure (\mathcal{E}_S in the paper Sec. 3.3). + + Args: + in_channels (int): Channels of the input. + latent_channels (int): Channels of the latent representation. + num_res_blocks (int): Number of residual blocks at each resolution. + channels (List[int]): Channels of the encoder blocks. + num_res_blocks_middle (int): Number of residual blocks in the middle. + norm_type (Literal["group", "layer"]): Type of normalization layer. + use_fp16 (bool): Whether to use FP16. + """ + def __init__( + self, + in_channels: int, + latent_channels: int, + num_res_blocks: int, + channels: List[int], + num_res_blocks_middle: int = 2, + norm_type: Literal["group", "layer"] = "layer", + use_fp16: bool = False, + ): + super().__init__() + self.in_channels = in_channels + self.latent_channels = latent_channels + self.num_res_blocks = num_res_blocks + self.channels = channels + self.num_res_blocks_middle = num_res_blocks_middle + self.norm_type = norm_type + self.use_fp16 = use_fp16 + self.dtype = torch.float16 if use_fp16 else torch.float32 + + self.input_layer = nn.Conv3d(in_channels, channels[0], 3, padding=1) + + self.blocks = nn.ModuleList([]) + for i, ch in enumerate(channels): + self.blocks.extend([ + ResBlock3d(ch, ch) + for _ in range(num_res_blocks) + ]) + if i < len(channels) - 1: + self.blocks.append( + DownsampleBlock3d(ch, channels[i+1]) + ) + + self.middle_block = nn.Sequential(*[ + ResBlock3d(channels[-1], channels[-1]) + for _ in range(num_res_blocks_middle) + ]) + + self.out_layer = nn.Sequential( + norm_layer(norm_type, channels[-1]), + nn.SiLU(), + nn.Conv3d(channels[-1], latent_channels*2, 3, padding=1) + ) + + if use_fp16: + self.convert_to_fp16() + + @property + def device(self) -> torch.device: + """ + Return the device of the model. + """ + return next(self.parameters()).device + + def convert_to_fp16(self) -> None: + """ + Convert the torso of the model to float16. + """ + self.use_fp16 = True + self.dtype = torch.float16 + self.blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + + def convert_to_fp32(self) -> None: + """ + Convert the torso of the model to float32. + """ + self.use_fp16 = False + self.dtype = torch.float32 + self.blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + + def forward(self, x: torch.Tensor, sample_posterior: bool = False, return_raw: bool = False) -> torch.Tensor: + h = self.input_layer(x) + h = h.type(self.dtype) + + for block in self.blocks: + h = block(h) + h = self.middle_block(h) + + h = h.type(x.dtype) + h = self.out_layer(h) + + mean, logvar = h.chunk(2, dim=1) + + if sample_posterior: + std = torch.exp(0.5 * logvar) + z = mean + std * torch.randn_like(std) + else: + z = mean + + if return_raw: + return z, mean, logvar + return z + + +class SparseStructureDecoder(nn.Module): + """ + Decoder for Sparse Structure (\mathcal{D}_S in the paper Sec. 3.3). + + Args: + out_channels (int): Channels of the output. + latent_channels (int): Channels of the latent representation. + num_res_blocks (int): Number of residual blocks at each resolution. + channels (List[int]): Channels of the decoder blocks. + num_res_blocks_middle (int): Number of residual blocks in the middle. + norm_type (Literal["group", "layer"]): Type of normalization layer. + use_fp16 (bool): Whether to use FP16. + """ + def __init__( + self, + out_channels: int, + latent_channels: int, + num_res_blocks: int, + channels: List[int], + num_res_blocks_middle: int = 2, + norm_type: Literal["group", "layer"] = "layer", + use_fp16: bool = False, + ): + super().__init__() + self.out_channels = out_channels + self.latent_channels = latent_channels + self.num_res_blocks = num_res_blocks + self.channels = channels + self.num_res_blocks_middle = num_res_blocks_middle + self.norm_type = norm_type + self.use_fp16 = use_fp16 + self.dtype = torch.float16 if use_fp16 else torch.float32 + + self.input_layer = nn.Conv3d(latent_channels, channels[0], 3, padding=1) + + self.middle_block = nn.Sequential(*[ + ResBlock3d(channels[0], channels[0]) + for _ in range(num_res_blocks_middle) + ]) + + self.blocks = nn.ModuleList([]) + for i, ch in enumerate(channels): + self.blocks.extend([ + ResBlock3d(ch, ch) + for _ in range(num_res_blocks) + ]) + if i < len(channels) - 1: + self.blocks.append( + UpsampleBlock3d(ch, channels[i+1]) + ) + + self.out_layer = nn.Sequential( + norm_layer(norm_type, channels[-1]), + nn.SiLU(), + nn.Conv3d(channels[-1], out_channels, 3, padding=1) + ) + + if use_fp16: + self.convert_to_fp16() + + @property + def device(self) -> torch.device: + """ + Return the device of the model. + """ + return next(self.parameters()).device + + def convert_to_fp16(self) -> None: + """ + Convert the torso of the model to float16. + """ + self.use_fp16 = True + self.dtype = torch.float16 + self.blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + + def convert_to_fp32(self) -> None: + """ + Convert the torso of the model to float32. + """ + self.use_fp16 = False + self.dtype = torch.float32 + self.blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + h = self.input_layer(x) + + h = h.type(self.dtype) + + h = self.middle_block(h) + for block in self.blocks: + h = block(h) + + h = h.type(x.dtype) + h = self.out_layer(h) + return h diff --git a/trellis/trainers/vae/sparse_structure_vae.py b/trellis/trainers/vae/sparse_structure_vae.py new file mode 100644 index 0000000..5c0c418 --- /dev/null +++ b/trellis/trainers/vae/sparse_structure_vae.py @@ -0,0 +1,130 @@ +from typing import * +import copy +import torch +import torch.nn.functional as F +from torch.utils.data import DataLoader +from easydict import EasyDict as edict + +from ..basic import BasicTrainer + + +class SparseStructureVaeTrainer(BasicTrainer): + """ + Trainer for Sparse Structure VAE. + + Args: + models (dict[str, nn.Module]): Models to train. + dataset (torch.utils.data.Dataset): Dataset. + output_dir (str): Output directory. + load_dir (str): Load directory. + step (int): Step to load. + batch_size (int): Batch size. + batch_size_per_gpu (int): Batch size per GPU. If specified, batch_size will be ignored. + batch_split (int): Split batch with gradient accumulation. + max_steps (int): Max steps. + optimizer (dict): Optimizer config. + lr_scheduler (dict): Learning rate scheduler config. + elastic (dict): Elastic memory management config. + grad_clip (float or dict): Gradient clip config. + ema_rate (float or list): Exponential moving average rates. + fp16_mode (str): FP16 mode. + - None: No FP16. + - 'inflat_all': Hold a inflated fp32 master param for all params. + - 'amp': Automatic mixed precision. + fp16_scale_growth (float): Scale growth for FP16 gradient backpropagation. + finetune_ckpt (dict): Finetune checkpoint. + log_param_stats (bool): Log parameter stats. + i_print (int): Print interval. + i_log (int): Log interval. + i_sample (int): Sample interval. + i_save (int): Save interval. + i_ddpcheck (int): DDP check interval. + + loss_type (str): Loss type. 'bce' for binary cross entropy, 'l1' for L1 loss, 'dice' for Dice loss. + lambda_kl (float): KL divergence loss weight. + """ + + def __init__( + self, + *args, + loss_type='bce', + lambda_kl=1e-6, + **kwargs + ): + super().__init__(*args, **kwargs) + self.loss_type = loss_type + self.lambda_kl = lambda_kl + + def training_losses( + self, + ss: torch.Tensor, + **kwargs + ) -> Tuple[Dict, Dict]: + """ + Compute training losses. + + Args: + ss: The [N x 1 x H x W x D] tensor of binary sparse structure. + + Returns: + a dict with the key "loss" containing a scalar tensor. + may also contain other keys for different terms. + """ + z, mean, logvar = self.training_models['encoder'](ss.float(), sample_posterior=True, return_raw=True) + logits = self.training_models['decoder'](z) + + terms = edict(loss = 0.0) + if self.loss_type == 'bce': + terms["bce"] = F.binary_cross_entropy_with_logits(logits, ss.float(), reduction='mean') + terms["loss"] = terms["loss"] + terms["bce"] + elif self.loss_type == 'l1': + terms["l1"] = F.l1_loss(F.sigmoid(logits), ss.float(), reduction='mean') + terms["loss"] = terms["loss"] + terms["l1"] + elif self.loss_type == 'dice': + logits = F.sigmoid(logits) + terms["dice"] = 1 - (2 * (logits * ss.float()).sum() + 1) / (logits.sum() + ss.float().sum() + 1) + terms["loss"] = terms["loss"] + terms["dice"] + else: + raise ValueError(f'Invalid loss type {self.loss_type}') + terms["kl"] = 0.5 * torch.mean(mean.pow(2) + logvar.exp() - logvar - 1) + terms["loss"] = terms["loss"] + self.lambda_kl * terms["kl"] + + return terms, {} + + @torch.no_grad() + def snapshot(self, suffix=None, num_samples=64, batch_size=1, verbose=False): + super().snapshot(suffix=suffix, num_samples=num_samples, batch_size=batch_size, verbose=verbose) + + @torch.no_grad() + def run_snapshot( + self, + num_samples: int, + batch_size: int, + verbose: bool = False, + ) -> Dict: + dataloader = DataLoader( + copy.deepcopy(self.dataset), + batch_size=batch_size, + shuffle=True, + num_workers=0, + collate_fn=self.dataset.collate_fn if hasattr(self.dataset, 'collate_fn') else None, + ) + + # inference + gts = [] + recons = [] + for i in range(0, num_samples, batch_size): + batch = min(batch_size, num_samples - i) + data = next(iter(dataloader)) + args = {k: v[:batch].cuda() if isinstance(v, torch.Tensor) else v[:batch] for k, v in data.items()} + z = self.models['encoder'](args['ss'].float(), sample_posterior=False) + logits = self.models['decoder'](z) + recon = (logits > 0).long() + gts.append(args['ss']) + recons.append(recon) + + sample_dict = { + 'gt': {'value': torch.cat(gts, dim=0), 'type': 'sample'}, + 'recon': {'value': torch.cat(recons, dim=0), 'type': 'sample'}, + } + return sample_dict