Spaces:
Runtime error
Runtime error
Upload 11 files
Browse files- trellis/models/__init__.py +96 -0
- trellis/models/sparse_elastic_mixin.py +24 -0
- trellis/models/sparse_structure_flow.py +200 -0
- trellis/models/sparse_structure_vae.py +306 -0
- trellis/models/structured_latent_flow.py +276 -0
- trellis/models/structured_latent_vae/__init__.py +4 -0
- trellis/models/structured_latent_vae/base.py +117 -0
- trellis/models/structured_latent_vae/decoder_gs.py +131 -0
- trellis/models/structured_latent_vae/decoder_mesh.py +176 -0
- trellis/models/structured_latent_vae/decoder_rf.py +113 -0
- trellis/models/structured_latent_vae/encoder.py +80 -0
trellis/models/__init__.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import importlib
|
| 2 |
+
|
| 3 |
+
__attributes = {
|
| 4 |
+
'SparseStructureEncoder': 'sparse_structure_vae',
|
| 5 |
+
'SparseStructureDecoder': 'sparse_structure_vae',
|
| 6 |
+
|
| 7 |
+
'SparseStructureFlowModel': 'sparse_structure_flow',
|
| 8 |
+
|
| 9 |
+
'SLatEncoder': 'structured_latent_vae',
|
| 10 |
+
'SLatGaussianDecoder': 'structured_latent_vae',
|
| 11 |
+
'SLatRadianceFieldDecoder': 'structured_latent_vae',
|
| 12 |
+
'SLatMeshDecoder': 'structured_latent_vae',
|
| 13 |
+
'ElasticSLatEncoder': 'structured_latent_vae',
|
| 14 |
+
'ElasticSLatGaussianDecoder': 'structured_latent_vae',
|
| 15 |
+
'ElasticSLatRadianceFieldDecoder': 'structured_latent_vae',
|
| 16 |
+
'ElasticSLatMeshDecoder': 'structured_latent_vae',
|
| 17 |
+
|
| 18 |
+
'SLatFlowModel': 'structured_latent_flow',
|
| 19 |
+
'ElasticSLatFlowModel': 'structured_latent_flow',
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
__submodules = []
|
| 23 |
+
|
| 24 |
+
__all__ = list(__attributes.keys()) + __submodules
|
| 25 |
+
|
| 26 |
+
def __getattr__(name):
|
| 27 |
+
if name not in globals():
|
| 28 |
+
if name in __attributes:
|
| 29 |
+
module_name = __attributes[name]
|
| 30 |
+
module = importlib.import_module(f".{module_name}", __name__)
|
| 31 |
+
globals()[name] = getattr(module, name)
|
| 32 |
+
elif name in __submodules:
|
| 33 |
+
module = importlib.import_module(f".{name}", __name__)
|
| 34 |
+
globals()[name] = module
|
| 35 |
+
else:
|
| 36 |
+
raise AttributeError(f"module {__name__} has no attribute {name}")
|
| 37 |
+
return globals()[name]
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def from_pretrained(path: str, **kwargs):
|
| 41 |
+
"""
|
| 42 |
+
Load a model from a pretrained checkpoint.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
path: The path to the checkpoint. Can be either local path or a Hugging Face model name.
|
| 46 |
+
NOTE: config file and model file should take the name f'{path}.json' and f'{path}.safetensors' respectively.
|
| 47 |
+
**kwargs: Additional arguments for the model constructor.
|
| 48 |
+
"""
|
| 49 |
+
import os
|
| 50 |
+
import json
|
| 51 |
+
from safetensors.torch import load_file
|
| 52 |
+
is_local = os.path.exists(f"{path}.json") and os.path.exists(f"{path}.safetensors")
|
| 53 |
+
|
| 54 |
+
if is_local:
|
| 55 |
+
config_file = f"{path}.json"
|
| 56 |
+
model_file = f"{path}.safetensors"
|
| 57 |
+
else:
|
| 58 |
+
from huggingface_hub import hf_hub_download
|
| 59 |
+
path_parts = path.split('/')
|
| 60 |
+
repo_id = f'{path_parts[0]}/{path_parts[1]}'
|
| 61 |
+
model_name = '/'.join(path_parts[2:])
|
| 62 |
+
config_file = hf_hub_download(repo_id, f"{model_name}.json")
|
| 63 |
+
model_file = hf_hub_download(repo_id, f"{model_name}.safetensors")
|
| 64 |
+
|
| 65 |
+
with open(config_file, 'r') as f:
|
| 66 |
+
config = json.load(f)
|
| 67 |
+
model = __getattr__(config['name'])(**config['args'], **kwargs)
|
| 68 |
+
model.load_state_dict(load_file(model_file))
|
| 69 |
+
|
| 70 |
+
return model
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
# For Pylance
|
| 74 |
+
if __name__ == '__main__':
|
| 75 |
+
from .sparse_structure_vae import (
|
| 76 |
+
SparseStructureEncoder,
|
| 77 |
+
SparseStructureDecoder,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
from .sparse_structure_flow import SparseStructureFlowModel
|
| 81 |
+
|
| 82 |
+
from .structured_latent_vae import (
|
| 83 |
+
SLatEncoder,
|
| 84 |
+
SLatGaussianDecoder,
|
| 85 |
+
SLatRadianceFieldDecoder,
|
| 86 |
+
SLatMeshDecoder,
|
| 87 |
+
ElasticSLatEncoder,
|
| 88 |
+
ElasticSLatGaussianDecoder,
|
| 89 |
+
ElasticSLatRadianceFieldDecoder,
|
| 90 |
+
ElasticSLatMeshDecoder,
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
from .structured_latent_flow import (
|
| 94 |
+
SLatFlowModel,
|
| 95 |
+
ElasticSLatFlowModel,
|
| 96 |
+
)
|
trellis/models/sparse_elastic_mixin.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from contextlib import contextmanager
|
| 2 |
+
from typing import *
|
| 3 |
+
import math
|
| 4 |
+
from ..modules import sparse as sp
|
| 5 |
+
from ..utils.elastic_utils import ElasticModuleMixin
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class SparseTransformerElasticMixin(ElasticModuleMixin):
|
| 9 |
+
def _get_input_size(self, x: sp.SparseTensor, *args, **kwargs):
|
| 10 |
+
return x.feats.shape[0]
|
| 11 |
+
|
| 12 |
+
@contextmanager
|
| 13 |
+
def with_mem_ratio(self, mem_ratio=1.0):
|
| 14 |
+
if mem_ratio == 1.0:
|
| 15 |
+
yield 1.0
|
| 16 |
+
return
|
| 17 |
+
num_blocks = len(self.blocks)
|
| 18 |
+
num_checkpoint_blocks = min(math.ceil((1 - mem_ratio) * num_blocks) + 1, num_blocks)
|
| 19 |
+
exact_mem_ratio = 1 - (num_checkpoint_blocks - 1) / num_blocks
|
| 20 |
+
for i in range(num_blocks):
|
| 21 |
+
self.blocks[i].use_checkpoint = i < num_checkpoint_blocks
|
| 22 |
+
yield exact_mem_ratio
|
| 23 |
+
for i in range(num_blocks):
|
| 24 |
+
self.blocks[i].use_checkpoint = False
|
trellis/models/sparse_structure_flow.py
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import *
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
import numpy as np
|
| 6 |
+
from ..modules.utils import convert_module_to_f16, convert_module_to_f32
|
| 7 |
+
from ..modules.transformer import AbsolutePositionEmbedder, ModulatedTransformerCrossBlock
|
| 8 |
+
from ..modules.spatial import patchify, unpatchify
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class TimestepEmbedder(nn.Module):
|
| 12 |
+
"""
|
| 13 |
+
Embeds scalar timesteps into vector representations.
|
| 14 |
+
"""
|
| 15 |
+
def __init__(self, hidden_size, frequency_embedding_size=256):
|
| 16 |
+
super().__init__()
|
| 17 |
+
self.mlp = nn.Sequential(
|
| 18 |
+
nn.Linear(frequency_embedding_size, hidden_size, bias=True),
|
| 19 |
+
nn.SiLU(),
|
| 20 |
+
nn.Linear(hidden_size, hidden_size, bias=True),
|
| 21 |
+
)
|
| 22 |
+
self.frequency_embedding_size = frequency_embedding_size
|
| 23 |
+
|
| 24 |
+
@staticmethod
|
| 25 |
+
def timestep_embedding(t, dim, max_period=10000):
|
| 26 |
+
"""
|
| 27 |
+
Create sinusoidal timestep embeddings.
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
t: a 1-D Tensor of N indices, one per batch element.
|
| 31 |
+
These may be fractional.
|
| 32 |
+
dim: the dimension of the output.
|
| 33 |
+
max_period: controls the minimum frequency of the embeddings.
|
| 34 |
+
|
| 35 |
+
Returns:
|
| 36 |
+
an (N, D) Tensor of positional embeddings.
|
| 37 |
+
"""
|
| 38 |
+
# https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
|
| 39 |
+
half = dim // 2
|
| 40 |
+
freqs = torch.exp(
|
| 41 |
+
-np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
|
| 42 |
+
).to(device=t.device)
|
| 43 |
+
args = t[:, None].float() * freqs[None]
|
| 44 |
+
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
| 45 |
+
if dim % 2:
|
| 46 |
+
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
| 47 |
+
return embedding
|
| 48 |
+
|
| 49 |
+
def forward(self, t):
|
| 50 |
+
t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
|
| 51 |
+
t_emb = self.mlp(t_freq)
|
| 52 |
+
return t_emb
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class SparseStructureFlowModel(nn.Module):
|
| 56 |
+
def __init__(
|
| 57 |
+
self,
|
| 58 |
+
resolution: int,
|
| 59 |
+
in_channels: int,
|
| 60 |
+
model_channels: int,
|
| 61 |
+
cond_channels: int,
|
| 62 |
+
out_channels: int,
|
| 63 |
+
num_blocks: int,
|
| 64 |
+
num_heads: Optional[int] = None,
|
| 65 |
+
num_head_channels: Optional[int] = 64,
|
| 66 |
+
mlp_ratio: float = 4,
|
| 67 |
+
patch_size: int = 2,
|
| 68 |
+
pe_mode: Literal["ape", "rope"] = "ape",
|
| 69 |
+
use_fp16: bool = False,
|
| 70 |
+
use_checkpoint: bool = False,
|
| 71 |
+
share_mod: bool = False,
|
| 72 |
+
qk_rms_norm: bool = False,
|
| 73 |
+
qk_rms_norm_cross: bool = False,
|
| 74 |
+
):
|
| 75 |
+
super().__init__()
|
| 76 |
+
self.resolution = resolution
|
| 77 |
+
self.in_channels = in_channels
|
| 78 |
+
self.model_channels = model_channels
|
| 79 |
+
self.cond_channels = cond_channels
|
| 80 |
+
self.out_channels = out_channels
|
| 81 |
+
self.num_blocks = num_blocks
|
| 82 |
+
self.num_heads = num_heads or model_channels // num_head_channels
|
| 83 |
+
self.mlp_ratio = mlp_ratio
|
| 84 |
+
self.patch_size = patch_size
|
| 85 |
+
self.pe_mode = pe_mode
|
| 86 |
+
self.use_fp16 = use_fp16
|
| 87 |
+
self.use_checkpoint = use_checkpoint
|
| 88 |
+
self.share_mod = share_mod
|
| 89 |
+
self.qk_rms_norm = qk_rms_norm
|
| 90 |
+
self.qk_rms_norm_cross = qk_rms_norm_cross
|
| 91 |
+
self.dtype = torch.float16 if use_fp16 else torch.float32
|
| 92 |
+
|
| 93 |
+
self.t_embedder = TimestepEmbedder(model_channels)
|
| 94 |
+
if share_mod:
|
| 95 |
+
self.adaLN_modulation = nn.Sequential(
|
| 96 |
+
nn.SiLU(),
|
| 97 |
+
nn.Linear(model_channels, 6 * model_channels, bias=True)
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
if pe_mode == "ape":
|
| 101 |
+
pos_embedder = AbsolutePositionEmbedder(model_channels, 3)
|
| 102 |
+
coords = torch.meshgrid(*[torch.arange(res, device=self.device) for res in [resolution // patch_size] * 3], indexing='ij')
|
| 103 |
+
coords = torch.stack(coords, dim=-1).reshape(-1, 3)
|
| 104 |
+
pos_emb = pos_embedder(coords)
|
| 105 |
+
self.register_buffer("pos_emb", pos_emb)
|
| 106 |
+
|
| 107 |
+
self.input_layer = nn.Linear(in_channels * patch_size**3, model_channels)
|
| 108 |
+
|
| 109 |
+
self.blocks = nn.ModuleList([
|
| 110 |
+
ModulatedTransformerCrossBlock(
|
| 111 |
+
model_channels,
|
| 112 |
+
cond_channels,
|
| 113 |
+
num_heads=self.num_heads,
|
| 114 |
+
mlp_ratio=self.mlp_ratio,
|
| 115 |
+
attn_mode='full',
|
| 116 |
+
use_checkpoint=self.use_checkpoint,
|
| 117 |
+
use_rope=(pe_mode == "rope"),
|
| 118 |
+
share_mod=share_mod,
|
| 119 |
+
qk_rms_norm=self.qk_rms_norm,
|
| 120 |
+
qk_rms_norm_cross=self.qk_rms_norm_cross,
|
| 121 |
+
)
|
| 122 |
+
for _ in range(num_blocks)
|
| 123 |
+
])
|
| 124 |
+
|
| 125 |
+
self.out_layer = nn.Linear(model_channels, out_channels * patch_size**3)
|
| 126 |
+
|
| 127 |
+
self.initialize_weights()
|
| 128 |
+
if use_fp16:
|
| 129 |
+
self.convert_to_fp16()
|
| 130 |
+
|
| 131 |
+
@property
|
| 132 |
+
def device(self) -> torch.device:
|
| 133 |
+
"""
|
| 134 |
+
Return the device of the model.
|
| 135 |
+
"""
|
| 136 |
+
return next(self.parameters()).device
|
| 137 |
+
|
| 138 |
+
def convert_to_fp16(self) -> None:
|
| 139 |
+
"""
|
| 140 |
+
Convert the torso of the model to float16.
|
| 141 |
+
"""
|
| 142 |
+
self.blocks.apply(convert_module_to_f16)
|
| 143 |
+
|
| 144 |
+
def convert_to_fp32(self) -> None:
|
| 145 |
+
"""
|
| 146 |
+
Convert the torso of the model to float32.
|
| 147 |
+
"""
|
| 148 |
+
self.blocks.apply(convert_module_to_f32)
|
| 149 |
+
|
| 150 |
+
def initialize_weights(self) -> None:
|
| 151 |
+
# Initialize transformer layers:
|
| 152 |
+
def _basic_init(module):
|
| 153 |
+
if isinstance(module, nn.Linear):
|
| 154 |
+
torch.nn.init.xavier_uniform_(module.weight)
|
| 155 |
+
if module.bias is not None:
|
| 156 |
+
nn.init.constant_(module.bias, 0)
|
| 157 |
+
self.apply(_basic_init)
|
| 158 |
+
|
| 159 |
+
# Initialize timestep embedding MLP:
|
| 160 |
+
nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
|
| 161 |
+
nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
|
| 162 |
+
|
| 163 |
+
# Zero-out adaLN modulation layers in DiT blocks:
|
| 164 |
+
if self.share_mod:
|
| 165 |
+
nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
|
| 166 |
+
nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
|
| 167 |
+
else:
|
| 168 |
+
for block in self.blocks:
|
| 169 |
+
nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
|
| 170 |
+
nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
|
| 171 |
+
|
| 172 |
+
# Zero-out output layers:
|
| 173 |
+
nn.init.constant_(self.out_layer.weight, 0)
|
| 174 |
+
nn.init.constant_(self.out_layer.bias, 0)
|
| 175 |
+
|
| 176 |
+
def forward(self, x: torch.Tensor, t: torch.Tensor, cond: torch.Tensor) -> torch.Tensor:
|
| 177 |
+
assert [*x.shape] == [x.shape[0], self.in_channels, *[self.resolution] * 3], \
|
| 178 |
+
f"Input shape mismatch, got {x.shape}, expected {[x.shape[0], self.in_channels, *[self.resolution] * 3]}"
|
| 179 |
+
|
| 180 |
+
h = patchify(x, self.patch_size)
|
| 181 |
+
h = h.view(*h.shape[:2], -1).permute(0, 2, 1).contiguous()
|
| 182 |
+
|
| 183 |
+
h = self.input_layer(h)
|
| 184 |
+
h = h + self.pos_emb[None]
|
| 185 |
+
t_emb = self.t_embedder(t)
|
| 186 |
+
if self.share_mod:
|
| 187 |
+
t_emb = self.adaLN_modulation(t_emb)
|
| 188 |
+
t_emb = t_emb.type(self.dtype)
|
| 189 |
+
h = h.type(self.dtype)
|
| 190 |
+
cond = cond.type(self.dtype)
|
| 191 |
+
for block in self.blocks:
|
| 192 |
+
h = block(h, t_emb, cond)
|
| 193 |
+
h = h.type(x.dtype)
|
| 194 |
+
h = F.layer_norm(h, h.shape[-1:])
|
| 195 |
+
h = self.out_layer(h)
|
| 196 |
+
|
| 197 |
+
h = h.permute(0, 2, 1).view(h.shape[0], h.shape[2], *[self.resolution // self.patch_size] * 3)
|
| 198 |
+
h = unpatchify(h, self.patch_size).contiguous()
|
| 199 |
+
|
| 200 |
+
return h
|
trellis/models/sparse_structure_vae.py
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import *
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from ..modules.norm import GroupNorm32, ChannelLayerNorm32
|
| 6 |
+
from ..modules.spatial import pixel_shuffle_3d
|
| 7 |
+
from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def norm_layer(norm_type: str, *args, **kwargs) -> nn.Module:
|
| 11 |
+
"""
|
| 12 |
+
Return a normalization layer.
|
| 13 |
+
"""
|
| 14 |
+
if norm_type == "group":
|
| 15 |
+
return GroupNorm32(32, *args, **kwargs)
|
| 16 |
+
elif norm_type == "layer":
|
| 17 |
+
return ChannelLayerNorm32(*args, **kwargs)
|
| 18 |
+
else:
|
| 19 |
+
raise ValueError(f"Invalid norm type {norm_type}")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class ResBlock3d(nn.Module):
|
| 23 |
+
def __init__(
|
| 24 |
+
self,
|
| 25 |
+
channels: int,
|
| 26 |
+
out_channels: Optional[int] = None,
|
| 27 |
+
norm_type: Literal["group", "layer"] = "layer",
|
| 28 |
+
):
|
| 29 |
+
super().__init__()
|
| 30 |
+
self.channels = channels
|
| 31 |
+
self.out_channels = out_channels or channels
|
| 32 |
+
|
| 33 |
+
self.norm1 = norm_layer(norm_type, channels)
|
| 34 |
+
self.norm2 = norm_layer(norm_type, self.out_channels)
|
| 35 |
+
self.conv1 = nn.Conv3d(channels, self.out_channels, 3, padding=1)
|
| 36 |
+
self.conv2 = zero_module(nn.Conv3d(self.out_channels, self.out_channels, 3, padding=1))
|
| 37 |
+
self.skip_connection = nn.Conv3d(channels, self.out_channels, 1) if channels != self.out_channels else nn.Identity()
|
| 38 |
+
|
| 39 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 40 |
+
h = self.norm1(x)
|
| 41 |
+
h = F.silu(h)
|
| 42 |
+
h = self.conv1(h)
|
| 43 |
+
h = self.norm2(h)
|
| 44 |
+
h = F.silu(h)
|
| 45 |
+
h = self.conv2(h)
|
| 46 |
+
h = h + self.skip_connection(x)
|
| 47 |
+
return h
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class DownsampleBlock3d(nn.Module):
|
| 51 |
+
def __init__(
|
| 52 |
+
self,
|
| 53 |
+
in_channels: int,
|
| 54 |
+
out_channels: int,
|
| 55 |
+
mode: Literal["conv", "avgpool"] = "conv",
|
| 56 |
+
):
|
| 57 |
+
assert mode in ["conv", "avgpool"], f"Invalid mode {mode}"
|
| 58 |
+
|
| 59 |
+
super().__init__()
|
| 60 |
+
self.in_channels = in_channels
|
| 61 |
+
self.out_channels = out_channels
|
| 62 |
+
|
| 63 |
+
if mode == "conv":
|
| 64 |
+
self.conv = nn.Conv3d(in_channels, out_channels, 2, stride=2)
|
| 65 |
+
elif mode == "avgpool":
|
| 66 |
+
assert in_channels == out_channels, "Pooling mode requires in_channels to be equal to out_channels"
|
| 67 |
+
|
| 68 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 69 |
+
if hasattr(self, "conv"):
|
| 70 |
+
return self.conv(x)
|
| 71 |
+
else:
|
| 72 |
+
return F.avg_pool3d(x, 2)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class UpsampleBlock3d(nn.Module):
|
| 76 |
+
def __init__(
|
| 77 |
+
self,
|
| 78 |
+
in_channels: int,
|
| 79 |
+
out_channels: int,
|
| 80 |
+
mode: Literal["conv", "nearest"] = "conv",
|
| 81 |
+
):
|
| 82 |
+
assert mode in ["conv", "nearest"], f"Invalid mode {mode}"
|
| 83 |
+
|
| 84 |
+
super().__init__()
|
| 85 |
+
self.in_channels = in_channels
|
| 86 |
+
self.out_channels = out_channels
|
| 87 |
+
|
| 88 |
+
if mode == "conv":
|
| 89 |
+
self.conv = nn.Conv3d(in_channels, out_channels*8, 3, padding=1)
|
| 90 |
+
elif mode == "nearest":
|
| 91 |
+
assert in_channels == out_channels, "Nearest mode requires in_channels to be equal to out_channels"
|
| 92 |
+
|
| 93 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 94 |
+
if hasattr(self, "conv"):
|
| 95 |
+
x = self.conv(x)
|
| 96 |
+
return pixel_shuffle_3d(x, 2)
|
| 97 |
+
else:
|
| 98 |
+
return F.interpolate(x, scale_factor=2, mode="nearest")
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class SparseStructureEncoder(nn.Module):
|
| 102 |
+
"""
|
| 103 |
+
Encoder for Sparse Structure (\mathcal{E}_S in the paper Sec. 3.3).
|
| 104 |
+
|
| 105 |
+
Args:
|
| 106 |
+
in_channels (int): Channels of the input.
|
| 107 |
+
latent_channels (int): Channels of the latent representation.
|
| 108 |
+
num_res_blocks (int): Number of residual blocks at each resolution.
|
| 109 |
+
channels (List[int]): Channels of the encoder blocks.
|
| 110 |
+
num_res_blocks_middle (int): Number of residual blocks in the middle.
|
| 111 |
+
norm_type (Literal["group", "layer"]): Type of normalization layer.
|
| 112 |
+
use_fp16 (bool): Whether to use FP16.
|
| 113 |
+
"""
|
| 114 |
+
def __init__(
|
| 115 |
+
self,
|
| 116 |
+
in_channels: int,
|
| 117 |
+
latent_channels: int,
|
| 118 |
+
num_res_blocks: int,
|
| 119 |
+
channels: List[int],
|
| 120 |
+
num_res_blocks_middle: int = 2,
|
| 121 |
+
norm_type: Literal["group", "layer"] = "layer",
|
| 122 |
+
use_fp16: bool = False,
|
| 123 |
+
):
|
| 124 |
+
super().__init__()
|
| 125 |
+
self.in_channels = in_channels
|
| 126 |
+
self.latent_channels = latent_channels
|
| 127 |
+
self.num_res_blocks = num_res_blocks
|
| 128 |
+
self.channels = channels
|
| 129 |
+
self.num_res_blocks_middle = num_res_blocks_middle
|
| 130 |
+
self.norm_type = norm_type
|
| 131 |
+
self.use_fp16 = use_fp16
|
| 132 |
+
self.dtype = torch.float16 if use_fp16 else torch.float32
|
| 133 |
+
|
| 134 |
+
self.input_layer = nn.Conv3d(in_channels, channels[0], 3, padding=1)
|
| 135 |
+
|
| 136 |
+
self.blocks = nn.ModuleList([])
|
| 137 |
+
for i, ch in enumerate(channels):
|
| 138 |
+
self.blocks.extend([
|
| 139 |
+
ResBlock3d(ch, ch)
|
| 140 |
+
for _ in range(num_res_blocks)
|
| 141 |
+
])
|
| 142 |
+
if i < len(channels) - 1:
|
| 143 |
+
self.blocks.append(
|
| 144 |
+
DownsampleBlock3d(ch, channels[i+1])
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
self.middle_block = nn.Sequential(*[
|
| 148 |
+
ResBlock3d(channels[-1], channels[-1])
|
| 149 |
+
for _ in range(num_res_blocks_middle)
|
| 150 |
+
])
|
| 151 |
+
|
| 152 |
+
self.out_layer = nn.Sequential(
|
| 153 |
+
norm_layer(norm_type, channels[-1]),
|
| 154 |
+
nn.SiLU(),
|
| 155 |
+
nn.Conv3d(channels[-1], latent_channels*2, 3, padding=1)
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
if use_fp16:
|
| 159 |
+
self.convert_to_fp16()
|
| 160 |
+
|
| 161 |
+
@property
|
| 162 |
+
def device(self) -> torch.device:
|
| 163 |
+
"""
|
| 164 |
+
Return the device of the model.
|
| 165 |
+
"""
|
| 166 |
+
return next(self.parameters()).device
|
| 167 |
+
|
| 168 |
+
def convert_to_fp16(self) -> None:
|
| 169 |
+
"""
|
| 170 |
+
Convert the torso of the model to float16.
|
| 171 |
+
"""
|
| 172 |
+
self.use_fp16 = True
|
| 173 |
+
self.dtype = torch.float16
|
| 174 |
+
self.blocks.apply(convert_module_to_f16)
|
| 175 |
+
self.middle_block.apply(convert_module_to_f16)
|
| 176 |
+
|
| 177 |
+
def convert_to_fp32(self) -> None:
|
| 178 |
+
"""
|
| 179 |
+
Convert the torso of the model to float32.
|
| 180 |
+
"""
|
| 181 |
+
self.use_fp16 = False
|
| 182 |
+
self.dtype = torch.float32
|
| 183 |
+
self.blocks.apply(convert_module_to_f32)
|
| 184 |
+
self.middle_block.apply(convert_module_to_f32)
|
| 185 |
+
|
| 186 |
+
def forward(self, x: torch.Tensor, sample_posterior: bool = False, return_raw: bool = False) -> torch.Tensor:
|
| 187 |
+
h = self.input_layer(x)
|
| 188 |
+
h = h.type(self.dtype)
|
| 189 |
+
|
| 190 |
+
for block in self.blocks:
|
| 191 |
+
h = block(h)
|
| 192 |
+
h = self.middle_block(h)
|
| 193 |
+
|
| 194 |
+
h = h.type(x.dtype)
|
| 195 |
+
h = self.out_layer(h)
|
| 196 |
+
|
| 197 |
+
mean, logvar = h.chunk(2, dim=1)
|
| 198 |
+
|
| 199 |
+
if sample_posterior:
|
| 200 |
+
std = torch.exp(0.5 * logvar)
|
| 201 |
+
z = mean + std * torch.randn_like(std)
|
| 202 |
+
else:
|
| 203 |
+
z = mean
|
| 204 |
+
|
| 205 |
+
if return_raw:
|
| 206 |
+
return z, mean, logvar
|
| 207 |
+
return z
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
class SparseStructureDecoder(nn.Module):
|
| 211 |
+
"""
|
| 212 |
+
Decoder for Sparse Structure (\mathcal{D}_S in the paper Sec. 3.3).
|
| 213 |
+
|
| 214 |
+
Args:
|
| 215 |
+
out_channels (int): Channels of the output.
|
| 216 |
+
latent_channels (int): Channels of the latent representation.
|
| 217 |
+
num_res_blocks (int): Number of residual blocks at each resolution.
|
| 218 |
+
channels (List[int]): Channels of the decoder blocks.
|
| 219 |
+
num_res_blocks_middle (int): Number of residual blocks in the middle.
|
| 220 |
+
norm_type (Literal["group", "layer"]): Type of normalization layer.
|
| 221 |
+
use_fp16 (bool): Whether to use FP16.
|
| 222 |
+
"""
|
| 223 |
+
def __init__(
|
| 224 |
+
self,
|
| 225 |
+
out_channels: int,
|
| 226 |
+
latent_channels: int,
|
| 227 |
+
num_res_blocks: int,
|
| 228 |
+
channels: List[int],
|
| 229 |
+
num_res_blocks_middle: int = 2,
|
| 230 |
+
norm_type: Literal["group", "layer"] = "layer",
|
| 231 |
+
use_fp16: bool = False,
|
| 232 |
+
):
|
| 233 |
+
super().__init__()
|
| 234 |
+
self.out_channels = out_channels
|
| 235 |
+
self.latent_channels = latent_channels
|
| 236 |
+
self.num_res_blocks = num_res_blocks
|
| 237 |
+
self.channels = channels
|
| 238 |
+
self.num_res_blocks_middle = num_res_blocks_middle
|
| 239 |
+
self.norm_type = norm_type
|
| 240 |
+
self.use_fp16 = use_fp16
|
| 241 |
+
self.dtype = torch.float16 if use_fp16 else torch.float32
|
| 242 |
+
|
| 243 |
+
self.input_layer = nn.Conv3d(latent_channels, channels[0], 3, padding=1)
|
| 244 |
+
|
| 245 |
+
self.middle_block = nn.Sequential(*[
|
| 246 |
+
ResBlock3d(channels[0], channels[0])
|
| 247 |
+
for _ in range(num_res_blocks_middle)
|
| 248 |
+
])
|
| 249 |
+
|
| 250 |
+
self.blocks = nn.ModuleList([])
|
| 251 |
+
for i, ch in enumerate(channels):
|
| 252 |
+
self.blocks.extend([
|
| 253 |
+
ResBlock3d(ch, ch)
|
| 254 |
+
for _ in range(num_res_blocks)
|
| 255 |
+
])
|
| 256 |
+
if i < len(channels) - 1:
|
| 257 |
+
self.blocks.append(
|
| 258 |
+
UpsampleBlock3d(ch, channels[i+1])
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
self.out_layer = nn.Sequential(
|
| 262 |
+
norm_layer(norm_type, channels[-1]),
|
| 263 |
+
nn.SiLU(),
|
| 264 |
+
nn.Conv3d(channels[-1], out_channels, 3, padding=1)
|
| 265 |
+
)
|
| 266 |
+
|
| 267 |
+
if use_fp16:
|
| 268 |
+
self.convert_to_fp16()
|
| 269 |
+
|
| 270 |
+
@property
|
| 271 |
+
def device(self) -> torch.device:
|
| 272 |
+
"""
|
| 273 |
+
Return the device of the model.
|
| 274 |
+
"""
|
| 275 |
+
return next(self.parameters()).device
|
| 276 |
+
|
| 277 |
+
def convert_to_fp16(self) -> None:
|
| 278 |
+
"""
|
| 279 |
+
Convert the torso of the model to float16.
|
| 280 |
+
"""
|
| 281 |
+
self.use_fp16 = True
|
| 282 |
+
self.dtype = torch.float16
|
| 283 |
+
self.blocks.apply(convert_module_to_f16)
|
| 284 |
+
self.middle_block.apply(convert_module_to_f16)
|
| 285 |
+
|
| 286 |
+
def convert_to_fp32(self) -> None:
|
| 287 |
+
"""
|
| 288 |
+
Convert the torso of the model to float32.
|
| 289 |
+
"""
|
| 290 |
+
self.use_fp16 = False
|
| 291 |
+
self.dtype = torch.float32
|
| 292 |
+
self.blocks.apply(convert_module_to_f32)
|
| 293 |
+
self.middle_block.apply(convert_module_to_f32)
|
| 294 |
+
|
| 295 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 296 |
+
h = self.input_layer(x)
|
| 297 |
+
|
| 298 |
+
h = h.type(self.dtype)
|
| 299 |
+
|
| 300 |
+
h = self.middle_block(h)
|
| 301 |
+
for block in self.blocks:
|
| 302 |
+
h = block(h)
|
| 303 |
+
|
| 304 |
+
h = h.type(x.dtype)
|
| 305 |
+
h = self.out_layer(h)
|
| 306 |
+
return h
|
trellis/models/structured_latent_flow.py
ADDED
|
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import *
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
import numpy as np
|
| 6 |
+
from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
|
| 7 |
+
from ..modules.transformer import AbsolutePositionEmbedder
|
| 8 |
+
from ..modules.norm import LayerNorm32
|
| 9 |
+
from ..modules import sparse as sp
|
| 10 |
+
from ..modules.sparse.transformer import ModulatedSparseTransformerCrossBlock
|
| 11 |
+
from .sparse_structure_flow import TimestepEmbedder
|
| 12 |
+
from .sparse_elastic_mixin import SparseTransformerElasticMixin
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class SparseResBlock3d(nn.Module):
|
| 16 |
+
def __init__(
|
| 17 |
+
self,
|
| 18 |
+
channels: int,
|
| 19 |
+
emb_channels: int,
|
| 20 |
+
out_channels: Optional[int] = None,
|
| 21 |
+
downsample: bool = False,
|
| 22 |
+
upsample: bool = False,
|
| 23 |
+
):
|
| 24 |
+
super().__init__()
|
| 25 |
+
self.channels = channels
|
| 26 |
+
self.emb_channels = emb_channels
|
| 27 |
+
self.out_channels = out_channels or channels
|
| 28 |
+
self.downsample = downsample
|
| 29 |
+
self.upsample = upsample
|
| 30 |
+
|
| 31 |
+
assert not (downsample and upsample), "Cannot downsample and upsample at the same time"
|
| 32 |
+
|
| 33 |
+
self.norm1 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
|
| 34 |
+
self.norm2 = LayerNorm32(self.out_channels, elementwise_affine=False, eps=1e-6)
|
| 35 |
+
self.conv1 = sp.SparseConv3d(channels, self.out_channels, 3)
|
| 36 |
+
self.conv2 = zero_module(sp.SparseConv3d(self.out_channels, self.out_channels, 3))
|
| 37 |
+
self.emb_layers = nn.Sequential(
|
| 38 |
+
nn.SiLU(),
|
| 39 |
+
nn.Linear(emb_channels, 2 * self.out_channels, bias=True),
|
| 40 |
+
)
|
| 41 |
+
self.skip_connection = sp.SparseLinear(channels, self.out_channels) if channels != self.out_channels else nn.Identity()
|
| 42 |
+
self.updown = None
|
| 43 |
+
if self.downsample:
|
| 44 |
+
self.updown = sp.SparseDownsample(2)
|
| 45 |
+
elif self.upsample:
|
| 46 |
+
self.updown = sp.SparseUpsample(2)
|
| 47 |
+
|
| 48 |
+
def _updown(self, x: sp.SparseTensor) -> sp.SparseTensor:
|
| 49 |
+
if self.updown is not None:
|
| 50 |
+
x = self.updown(x)
|
| 51 |
+
return x
|
| 52 |
+
|
| 53 |
+
def forward(self, x: sp.SparseTensor, emb: torch.Tensor) -> sp.SparseTensor:
|
| 54 |
+
emb_out = self.emb_layers(emb).type(x.dtype)
|
| 55 |
+
scale, shift = torch.chunk(emb_out, 2, dim=1)
|
| 56 |
+
|
| 57 |
+
x = self._updown(x)
|
| 58 |
+
h = x.replace(self.norm1(x.feats))
|
| 59 |
+
h = h.replace(F.silu(h.feats))
|
| 60 |
+
h = self.conv1(h)
|
| 61 |
+
h = h.replace(self.norm2(h.feats)) * (1 + scale) + shift
|
| 62 |
+
h = h.replace(F.silu(h.feats))
|
| 63 |
+
h = self.conv2(h)
|
| 64 |
+
h = h + self.skip_connection(x)
|
| 65 |
+
|
| 66 |
+
return h
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class SLatFlowModel(nn.Module):
|
| 70 |
+
def __init__(
|
| 71 |
+
self,
|
| 72 |
+
resolution: int,
|
| 73 |
+
in_channels: int,
|
| 74 |
+
model_channels: int,
|
| 75 |
+
cond_channels: int,
|
| 76 |
+
out_channels: int,
|
| 77 |
+
num_blocks: int,
|
| 78 |
+
num_heads: Optional[int] = None,
|
| 79 |
+
num_head_channels: Optional[int] = 64,
|
| 80 |
+
mlp_ratio: float = 4,
|
| 81 |
+
patch_size: int = 2,
|
| 82 |
+
num_io_res_blocks: int = 2,
|
| 83 |
+
io_block_channels: List[int] = None,
|
| 84 |
+
pe_mode: Literal["ape", "rope"] = "ape",
|
| 85 |
+
use_fp16: bool = False,
|
| 86 |
+
use_checkpoint: bool = False,
|
| 87 |
+
use_skip_connection: bool = True,
|
| 88 |
+
share_mod: bool = False,
|
| 89 |
+
qk_rms_norm: bool = False,
|
| 90 |
+
qk_rms_norm_cross: bool = False,
|
| 91 |
+
):
|
| 92 |
+
super().__init__()
|
| 93 |
+
self.resolution = resolution
|
| 94 |
+
self.in_channels = in_channels
|
| 95 |
+
self.model_channels = model_channels
|
| 96 |
+
self.cond_channels = cond_channels
|
| 97 |
+
self.out_channels = out_channels
|
| 98 |
+
self.num_blocks = num_blocks
|
| 99 |
+
self.num_heads = num_heads or model_channels // num_head_channels
|
| 100 |
+
self.mlp_ratio = mlp_ratio
|
| 101 |
+
self.patch_size = patch_size
|
| 102 |
+
self.num_io_res_blocks = num_io_res_blocks
|
| 103 |
+
self.io_block_channels = io_block_channels
|
| 104 |
+
self.pe_mode = pe_mode
|
| 105 |
+
self.use_fp16 = use_fp16
|
| 106 |
+
self.use_checkpoint = use_checkpoint
|
| 107 |
+
self.use_skip_connection = use_skip_connection
|
| 108 |
+
self.share_mod = share_mod
|
| 109 |
+
self.qk_rms_norm = qk_rms_norm
|
| 110 |
+
self.qk_rms_norm_cross = qk_rms_norm_cross
|
| 111 |
+
self.dtype = torch.float16 if use_fp16 else torch.float32
|
| 112 |
+
|
| 113 |
+
if self.io_block_channels is not None:
|
| 114 |
+
assert int(np.log2(patch_size)) == np.log2(patch_size), "Patch size must be a power of 2"
|
| 115 |
+
assert np.log2(patch_size) == len(io_block_channels), "Number of IO ResBlocks must match the number of stages"
|
| 116 |
+
|
| 117 |
+
self.t_embedder = TimestepEmbedder(model_channels)
|
| 118 |
+
if share_mod:
|
| 119 |
+
self.adaLN_modulation = nn.Sequential(
|
| 120 |
+
nn.SiLU(),
|
| 121 |
+
nn.Linear(model_channels, 6 * model_channels, bias=True)
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
if pe_mode == "ape":
|
| 125 |
+
self.pos_embedder = AbsolutePositionEmbedder(model_channels)
|
| 126 |
+
|
| 127 |
+
self.input_layer = sp.SparseLinear(in_channels, model_channels if io_block_channels is None else io_block_channels[0])
|
| 128 |
+
|
| 129 |
+
self.input_blocks = nn.ModuleList([])
|
| 130 |
+
if io_block_channels is not None:
|
| 131 |
+
for chs, next_chs in zip(io_block_channels, io_block_channels[1:] + [model_channels]):
|
| 132 |
+
self.input_blocks.extend([
|
| 133 |
+
SparseResBlock3d(
|
| 134 |
+
chs,
|
| 135 |
+
model_channels,
|
| 136 |
+
out_channels=chs,
|
| 137 |
+
)
|
| 138 |
+
for _ in range(num_io_res_blocks-1)
|
| 139 |
+
])
|
| 140 |
+
self.input_blocks.append(
|
| 141 |
+
SparseResBlock3d(
|
| 142 |
+
chs,
|
| 143 |
+
model_channels,
|
| 144 |
+
out_channels=next_chs,
|
| 145 |
+
downsample=True,
|
| 146 |
+
)
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
self.blocks = nn.ModuleList([
|
| 150 |
+
ModulatedSparseTransformerCrossBlock(
|
| 151 |
+
model_channels,
|
| 152 |
+
cond_channels,
|
| 153 |
+
num_heads=self.num_heads,
|
| 154 |
+
mlp_ratio=self.mlp_ratio,
|
| 155 |
+
attn_mode='full',
|
| 156 |
+
use_checkpoint=self.use_checkpoint,
|
| 157 |
+
use_rope=(pe_mode == "rope"),
|
| 158 |
+
share_mod=self.share_mod,
|
| 159 |
+
qk_rms_norm=self.qk_rms_norm,
|
| 160 |
+
qk_rms_norm_cross=self.qk_rms_norm_cross,
|
| 161 |
+
)
|
| 162 |
+
for _ in range(num_blocks)
|
| 163 |
+
])
|
| 164 |
+
|
| 165 |
+
self.out_blocks = nn.ModuleList([])
|
| 166 |
+
if io_block_channels is not None:
|
| 167 |
+
for chs, prev_chs in zip(reversed(io_block_channels), [model_channels] + list(reversed(io_block_channels[1:]))):
|
| 168 |
+
self.out_blocks.append(
|
| 169 |
+
SparseResBlock3d(
|
| 170 |
+
prev_chs * 2 if self.use_skip_connection else prev_chs,
|
| 171 |
+
model_channels,
|
| 172 |
+
out_channels=chs,
|
| 173 |
+
upsample=True,
|
| 174 |
+
)
|
| 175 |
+
)
|
| 176 |
+
self.out_blocks.extend([
|
| 177 |
+
SparseResBlock3d(
|
| 178 |
+
chs * 2 if self.use_skip_connection else chs,
|
| 179 |
+
model_channels,
|
| 180 |
+
out_channels=chs,
|
| 181 |
+
)
|
| 182 |
+
for _ in range(num_io_res_blocks-1)
|
| 183 |
+
])
|
| 184 |
+
|
| 185 |
+
self.out_layer = sp.SparseLinear(model_channels if io_block_channels is None else io_block_channels[0], out_channels)
|
| 186 |
+
|
| 187 |
+
self.initialize_weights()
|
| 188 |
+
if use_fp16:
|
| 189 |
+
self.convert_to_fp16()
|
| 190 |
+
|
| 191 |
+
@property
|
| 192 |
+
def device(self) -> torch.device:
|
| 193 |
+
"""
|
| 194 |
+
Return the device of the model.
|
| 195 |
+
"""
|
| 196 |
+
return next(self.parameters()).device
|
| 197 |
+
|
| 198 |
+
def convert_to_fp16(self) -> None:
|
| 199 |
+
"""
|
| 200 |
+
Convert the torso of the model to float16.
|
| 201 |
+
"""
|
| 202 |
+
self.input_blocks.apply(convert_module_to_f16)
|
| 203 |
+
self.blocks.apply(convert_module_to_f16)
|
| 204 |
+
self.out_blocks.apply(convert_module_to_f16)
|
| 205 |
+
|
| 206 |
+
def convert_to_fp32(self) -> None:
|
| 207 |
+
"""
|
| 208 |
+
Convert the torso of the model to float32.
|
| 209 |
+
"""
|
| 210 |
+
self.input_blocks.apply(convert_module_to_f32)
|
| 211 |
+
self.blocks.apply(convert_module_to_f32)
|
| 212 |
+
self.out_blocks.apply(convert_module_to_f32)
|
| 213 |
+
|
| 214 |
+
def initialize_weights(self) -> None:
|
| 215 |
+
# Initialize transformer layers:
|
| 216 |
+
def _basic_init(module):
|
| 217 |
+
if isinstance(module, nn.Linear):
|
| 218 |
+
torch.nn.init.xavier_uniform_(module.weight)
|
| 219 |
+
if module.bias is not None:
|
| 220 |
+
nn.init.constant_(module.bias, 0)
|
| 221 |
+
self.apply(_basic_init)
|
| 222 |
+
|
| 223 |
+
# Initialize timestep embedding MLP:
|
| 224 |
+
nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
|
| 225 |
+
nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
|
| 226 |
+
|
| 227 |
+
# Zero-out adaLN modulation layers in DiT blocks:
|
| 228 |
+
if self.share_mod:
|
| 229 |
+
nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
|
| 230 |
+
nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
|
| 231 |
+
else:
|
| 232 |
+
for block in self.blocks:
|
| 233 |
+
nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
|
| 234 |
+
nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
|
| 235 |
+
|
| 236 |
+
# Zero-out output layers:
|
| 237 |
+
nn.init.constant_(self.out_layer.weight, 0)
|
| 238 |
+
nn.init.constant_(self.out_layer.bias, 0)
|
| 239 |
+
|
| 240 |
+
def forward(self, x: sp.SparseTensor, t: torch.Tensor, cond: torch.Tensor) -> sp.SparseTensor:
|
| 241 |
+
h = self.input_layer(x).type(self.dtype)
|
| 242 |
+
t_emb = self.t_embedder(t)
|
| 243 |
+
if self.share_mod:
|
| 244 |
+
t_emb = self.adaLN_modulation(t_emb)
|
| 245 |
+
t_emb = t_emb.type(self.dtype)
|
| 246 |
+
cond = cond.type(self.dtype)
|
| 247 |
+
|
| 248 |
+
skips = []
|
| 249 |
+
# pack with input blocks
|
| 250 |
+
for block in self.input_blocks:
|
| 251 |
+
h = block(h, t_emb)
|
| 252 |
+
skips.append(h.feats)
|
| 253 |
+
|
| 254 |
+
if self.pe_mode == "ape":
|
| 255 |
+
h = h + self.pos_embedder(h.coords[:, 1:]).type(self.dtype)
|
| 256 |
+
for block in self.blocks:
|
| 257 |
+
h = block(h, t_emb, cond)
|
| 258 |
+
|
| 259 |
+
# unpack with output blocks
|
| 260 |
+
for block, skip in zip(self.out_blocks, reversed(skips)):
|
| 261 |
+
if self.use_skip_connection:
|
| 262 |
+
h = block(h.replace(torch.cat([h.feats, skip], dim=1)), t_emb)
|
| 263 |
+
else:
|
| 264 |
+
h = block(h, t_emb)
|
| 265 |
+
|
| 266 |
+
h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
|
| 267 |
+
h = self.out_layer(h.type(x.dtype))
|
| 268 |
+
return h
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
class ElasticSLatFlowModel(SparseTransformerElasticMixin, SLatFlowModel):
|
| 272 |
+
"""
|
| 273 |
+
SLat Flow Model with elastic memory management.
|
| 274 |
+
Used for training with low VRAM.
|
| 275 |
+
"""
|
| 276 |
+
pass
|
trellis/models/structured_latent_vae/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .encoder import SLatEncoder, ElasticSLatEncoder
|
| 2 |
+
from .decoder_gs import SLatGaussianDecoder, ElasticSLatGaussianDecoder
|
| 3 |
+
from .decoder_rf import SLatRadianceFieldDecoder, ElasticSLatRadianceFieldDecoder
|
| 4 |
+
from .decoder_mesh import SLatMeshDecoder, ElasticSLatMeshDecoder
|
trellis/models/structured_latent_vae/base.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import *
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
from ...modules.utils import convert_module_to_f16, convert_module_to_f32
|
| 5 |
+
from ...modules import sparse as sp
|
| 6 |
+
from ...modules.transformer import AbsolutePositionEmbedder
|
| 7 |
+
from ...modules.sparse.transformer import SparseTransformerBlock
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def block_attn_config(self):
|
| 11 |
+
"""
|
| 12 |
+
Return the attention configuration of the model.
|
| 13 |
+
"""
|
| 14 |
+
for i in range(self.num_blocks):
|
| 15 |
+
if self.attn_mode == "shift_window":
|
| 16 |
+
yield "serialized", self.window_size, 0, (16 * (i % 2),) * 3, sp.SerializeMode.Z_ORDER
|
| 17 |
+
elif self.attn_mode == "shift_sequence":
|
| 18 |
+
yield "serialized", self.window_size, self.window_size // 2 * (i % 2), (0, 0, 0), sp.SerializeMode.Z_ORDER
|
| 19 |
+
elif self.attn_mode == "shift_order":
|
| 20 |
+
yield "serialized", self.window_size, 0, (0, 0, 0), sp.SerializeModes[i % 4]
|
| 21 |
+
elif self.attn_mode == "full":
|
| 22 |
+
yield "full", None, None, None, None
|
| 23 |
+
elif self.attn_mode == "swin":
|
| 24 |
+
yield "windowed", self.window_size, None, self.window_size // 2 * (i % 2), None
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class SparseTransformerBase(nn.Module):
|
| 28 |
+
"""
|
| 29 |
+
Sparse Transformer without output layers.
|
| 30 |
+
Serve as the base class for encoder and decoder.
|
| 31 |
+
"""
|
| 32 |
+
def __init__(
|
| 33 |
+
self,
|
| 34 |
+
in_channels: int,
|
| 35 |
+
model_channels: int,
|
| 36 |
+
num_blocks: int,
|
| 37 |
+
num_heads: Optional[int] = None,
|
| 38 |
+
num_head_channels: Optional[int] = 64,
|
| 39 |
+
mlp_ratio: float = 4.0,
|
| 40 |
+
attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
|
| 41 |
+
window_size: Optional[int] = None,
|
| 42 |
+
pe_mode: Literal["ape", "rope"] = "ape",
|
| 43 |
+
use_fp16: bool = False,
|
| 44 |
+
use_checkpoint: bool = False,
|
| 45 |
+
qk_rms_norm: bool = False,
|
| 46 |
+
):
|
| 47 |
+
super().__init__()
|
| 48 |
+
self.in_channels = in_channels
|
| 49 |
+
self.model_channels = model_channels
|
| 50 |
+
self.num_blocks = num_blocks
|
| 51 |
+
self.window_size = window_size
|
| 52 |
+
self.num_heads = num_heads or model_channels // num_head_channels
|
| 53 |
+
self.mlp_ratio = mlp_ratio
|
| 54 |
+
self.attn_mode = attn_mode
|
| 55 |
+
self.pe_mode = pe_mode
|
| 56 |
+
self.use_fp16 = use_fp16
|
| 57 |
+
self.use_checkpoint = use_checkpoint
|
| 58 |
+
self.qk_rms_norm = qk_rms_norm
|
| 59 |
+
self.dtype = torch.float16 if use_fp16 else torch.float32
|
| 60 |
+
|
| 61 |
+
if pe_mode == "ape":
|
| 62 |
+
self.pos_embedder = AbsolutePositionEmbedder(model_channels)
|
| 63 |
+
|
| 64 |
+
self.input_layer = sp.SparseLinear(in_channels, model_channels)
|
| 65 |
+
self.blocks = nn.ModuleList([
|
| 66 |
+
SparseTransformerBlock(
|
| 67 |
+
model_channels,
|
| 68 |
+
num_heads=self.num_heads,
|
| 69 |
+
mlp_ratio=self.mlp_ratio,
|
| 70 |
+
attn_mode=attn_mode,
|
| 71 |
+
window_size=window_size,
|
| 72 |
+
shift_sequence=shift_sequence,
|
| 73 |
+
shift_window=shift_window,
|
| 74 |
+
serialize_mode=serialize_mode,
|
| 75 |
+
use_checkpoint=self.use_checkpoint,
|
| 76 |
+
use_rope=(pe_mode == "rope"),
|
| 77 |
+
qk_rms_norm=self.qk_rms_norm,
|
| 78 |
+
)
|
| 79 |
+
for attn_mode, window_size, shift_sequence, shift_window, serialize_mode in block_attn_config(self)
|
| 80 |
+
])
|
| 81 |
+
|
| 82 |
+
@property
|
| 83 |
+
def device(self) -> torch.device:
|
| 84 |
+
"""
|
| 85 |
+
Return the device of the model.
|
| 86 |
+
"""
|
| 87 |
+
return next(self.parameters()).device
|
| 88 |
+
|
| 89 |
+
def convert_to_fp16(self) -> None:
|
| 90 |
+
"""
|
| 91 |
+
Convert the torso of the model to float16.
|
| 92 |
+
"""
|
| 93 |
+
self.blocks.apply(convert_module_to_f16)
|
| 94 |
+
|
| 95 |
+
def convert_to_fp32(self) -> None:
|
| 96 |
+
"""
|
| 97 |
+
Convert the torso of the model to float32.
|
| 98 |
+
"""
|
| 99 |
+
self.blocks.apply(convert_module_to_f32)
|
| 100 |
+
|
| 101 |
+
def initialize_weights(self) -> None:
|
| 102 |
+
# Initialize transformer layers:
|
| 103 |
+
def _basic_init(module):
|
| 104 |
+
if isinstance(module, nn.Linear):
|
| 105 |
+
torch.nn.init.xavier_uniform_(module.weight)
|
| 106 |
+
if module.bias is not None:
|
| 107 |
+
nn.init.constant_(module.bias, 0)
|
| 108 |
+
self.apply(_basic_init)
|
| 109 |
+
|
| 110 |
+
def forward(self, x: sp.SparseTensor) -> sp.SparseTensor:
|
| 111 |
+
h = self.input_layer(x)
|
| 112 |
+
if self.pe_mode == "ape":
|
| 113 |
+
h = h + self.pos_embedder(x.coords[:, 1:])
|
| 114 |
+
h = h.type(self.dtype)
|
| 115 |
+
for block in self.blocks:
|
| 116 |
+
h = block(h)
|
| 117 |
+
return h
|
trellis/models/structured_latent_vae/decoder_gs.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import *
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from ...modules import sparse as sp
|
| 6 |
+
from ...utils.random_utils import hammersley_sequence
|
| 7 |
+
from .base import SparseTransformerBase
|
| 8 |
+
from ...representations import Gaussian
|
| 9 |
+
from ..sparse_elastic_mixin import SparseTransformerElasticMixin
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class SLatGaussianDecoder(SparseTransformerBase):
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
resolution: int,
|
| 16 |
+
model_channels: int,
|
| 17 |
+
latent_channels: int,
|
| 18 |
+
num_blocks: int,
|
| 19 |
+
num_heads: Optional[int] = None,
|
| 20 |
+
num_head_channels: Optional[int] = 64,
|
| 21 |
+
mlp_ratio: float = 4,
|
| 22 |
+
attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
|
| 23 |
+
window_size: int = 8,
|
| 24 |
+
pe_mode: Literal["ape", "rope"] = "ape",
|
| 25 |
+
use_fp16: bool = False,
|
| 26 |
+
use_checkpoint: bool = False,
|
| 27 |
+
qk_rms_norm: bool = False,
|
| 28 |
+
representation_config: dict = None,
|
| 29 |
+
):
|
| 30 |
+
super().__init__(
|
| 31 |
+
in_channels=latent_channels,
|
| 32 |
+
model_channels=model_channels,
|
| 33 |
+
num_blocks=num_blocks,
|
| 34 |
+
num_heads=num_heads,
|
| 35 |
+
num_head_channels=num_head_channels,
|
| 36 |
+
mlp_ratio=mlp_ratio,
|
| 37 |
+
attn_mode=attn_mode,
|
| 38 |
+
window_size=window_size,
|
| 39 |
+
pe_mode=pe_mode,
|
| 40 |
+
use_fp16=use_fp16,
|
| 41 |
+
use_checkpoint=use_checkpoint,
|
| 42 |
+
qk_rms_norm=qk_rms_norm,
|
| 43 |
+
)
|
| 44 |
+
self.resolution = resolution
|
| 45 |
+
self.rep_config = representation_config
|
| 46 |
+
self._calc_layout()
|
| 47 |
+
self.out_layer = sp.SparseLinear(model_channels, self.out_channels)
|
| 48 |
+
self._build_perturbation()
|
| 49 |
+
|
| 50 |
+
self.initialize_weights()
|
| 51 |
+
if use_fp16:
|
| 52 |
+
self.convert_to_fp16()
|
| 53 |
+
|
| 54 |
+
def initialize_weights(self) -> None:
|
| 55 |
+
super().initialize_weights()
|
| 56 |
+
# Zero-out output layers:
|
| 57 |
+
nn.init.constant_(self.out_layer.weight, 0)
|
| 58 |
+
nn.init.constant_(self.out_layer.bias, 0)
|
| 59 |
+
|
| 60 |
+
def _build_perturbation(self) -> None:
|
| 61 |
+
perturbation = [hammersley_sequence(3, i, self.rep_config['num_gaussians']) for i in range(self.rep_config['num_gaussians'])]
|
| 62 |
+
perturbation = torch.tensor(perturbation).float() * 2 - 1
|
| 63 |
+
perturbation = perturbation / self.rep_config['voxel_size']
|
| 64 |
+
perturbation = torch.atanh(perturbation).to(self.device)
|
| 65 |
+
self.register_buffer('offset_perturbation', perturbation)
|
| 66 |
+
|
| 67 |
+
def _calc_layout(self) -> None:
|
| 68 |
+
self.layout = {
|
| 69 |
+
'_xyz' : {'shape': (self.rep_config['num_gaussians'], 3), 'size': self.rep_config['num_gaussians'] * 3},
|
| 70 |
+
'_features_dc' : {'shape': (self.rep_config['num_gaussians'], 1, 3), 'size': self.rep_config['num_gaussians'] * 3},
|
| 71 |
+
'_scaling' : {'shape': (self.rep_config['num_gaussians'], 3), 'size': self.rep_config['num_gaussians'] * 3},
|
| 72 |
+
'_rotation' : {'shape': (self.rep_config['num_gaussians'], 4), 'size': self.rep_config['num_gaussians'] * 4},
|
| 73 |
+
'_opacity' : {'shape': (self.rep_config['num_gaussians'], 1), 'size': self.rep_config['num_gaussians']},
|
| 74 |
+
}
|
| 75 |
+
start = 0
|
| 76 |
+
for k, v in self.layout.items():
|
| 77 |
+
v['range'] = (start, start + v['size'])
|
| 78 |
+
start += v['size']
|
| 79 |
+
self.out_channels = start
|
| 80 |
+
|
| 81 |
+
def to_representation(self, x: sp.SparseTensor) -> List[Gaussian]:
|
| 82 |
+
"""
|
| 83 |
+
Convert a batch of network outputs to 3D representations.
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
x: The [N x * x C] sparse tensor output by the network.
|
| 87 |
+
|
| 88 |
+
Returns:
|
| 89 |
+
list of representations
|
| 90 |
+
"""
|
| 91 |
+
ret = []
|
| 92 |
+
for i in range(x.shape[0]):
|
| 93 |
+
representation = Gaussian(
|
| 94 |
+
sh_degree=0,
|
| 95 |
+
aabb=[-0.5, -0.5, -0.5, 1.0, 1.0, 1.0],
|
| 96 |
+
mininum_kernel_size = self.rep_config['3d_filter_kernel_size'],
|
| 97 |
+
scaling_bias = self.rep_config['scaling_bias'],
|
| 98 |
+
opacity_bias = self.rep_config['opacity_bias'],
|
| 99 |
+
scaling_activation = self.rep_config['scaling_activation']
|
| 100 |
+
)
|
| 101 |
+
xyz = (x.coords[x.layout[i]][:, 1:].float() + 0.5) / self.resolution
|
| 102 |
+
for k, v in self.layout.items():
|
| 103 |
+
if k == '_xyz':
|
| 104 |
+
offset = x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape'])
|
| 105 |
+
offset = offset * self.rep_config['lr'][k]
|
| 106 |
+
if self.rep_config['perturb_offset']:
|
| 107 |
+
offset = offset + self.offset_perturbation
|
| 108 |
+
offset = torch.tanh(offset) / self.resolution * 0.5 * self.rep_config['voxel_size']
|
| 109 |
+
_xyz = xyz.unsqueeze(1) + offset
|
| 110 |
+
setattr(representation, k, _xyz.flatten(0, 1))
|
| 111 |
+
else:
|
| 112 |
+
feats = x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape']).flatten(0, 1)
|
| 113 |
+
feats = feats * self.rep_config['lr'][k]
|
| 114 |
+
setattr(representation, k, feats)
|
| 115 |
+
ret.append(representation)
|
| 116 |
+
return ret
|
| 117 |
+
|
| 118 |
+
def forward(self, x: sp.SparseTensor) -> List[Gaussian]:
|
| 119 |
+
h = super().forward(x)
|
| 120 |
+
h = h.type(x.dtype)
|
| 121 |
+
h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
|
| 122 |
+
h = self.out_layer(h)
|
| 123 |
+
return self.to_representation(h)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
class ElasticSLatGaussianDecoder(SparseTransformerElasticMixin, SLatGaussianDecoder):
|
| 127 |
+
"""
|
| 128 |
+
Slat VAE Gaussian decoder with elastic memory management.
|
| 129 |
+
Used for training with low VRAM.
|
| 130 |
+
"""
|
| 131 |
+
pass
|
trellis/models/structured_latent_vae/decoder_mesh.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import *
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
import numpy as np
|
| 6 |
+
from ...modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
|
| 7 |
+
from ...modules import sparse as sp
|
| 8 |
+
from .base import SparseTransformerBase
|
| 9 |
+
from ...representations import MeshExtractResult
|
| 10 |
+
from ...representations.mesh import SparseFeatures2Mesh
|
| 11 |
+
from ..sparse_elastic_mixin import SparseTransformerElasticMixin
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class SparseSubdivideBlock3d(nn.Module):
|
| 15 |
+
"""
|
| 16 |
+
A 3D subdivide block that can subdivide the sparse tensor.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
channels: channels in the inputs and outputs.
|
| 20 |
+
out_channels: if specified, the number of output channels.
|
| 21 |
+
num_groups: the number of groups for the group norm.
|
| 22 |
+
"""
|
| 23 |
+
def __init__(
|
| 24 |
+
self,
|
| 25 |
+
channels: int,
|
| 26 |
+
resolution: int,
|
| 27 |
+
out_channels: Optional[int] = None,
|
| 28 |
+
num_groups: int = 32
|
| 29 |
+
):
|
| 30 |
+
super().__init__()
|
| 31 |
+
self.channels = channels
|
| 32 |
+
self.resolution = resolution
|
| 33 |
+
self.out_resolution = resolution * 2
|
| 34 |
+
self.out_channels = out_channels or channels
|
| 35 |
+
|
| 36 |
+
self.act_layers = nn.Sequential(
|
| 37 |
+
sp.SparseGroupNorm32(num_groups, channels),
|
| 38 |
+
sp.SparseSiLU()
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
self.sub = sp.SparseSubdivide()
|
| 42 |
+
|
| 43 |
+
self.out_layers = nn.Sequential(
|
| 44 |
+
sp.SparseConv3d(channels, self.out_channels, 3, indice_key=f"res_{self.out_resolution}"),
|
| 45 |
+
sp.SparseGroupNorm32(num_groups, self.out_channels),
|
| 46 |
+
sp.SparseSiLU(),
|
| 47 |
+
zero_module(sp.SparseConv3d(self.out_channels, self.out_channels, 3, indice_key=f"res_{self.out_resolution}")),
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
if self.out_channels == channels:
|
| 51 |
+
self.skip_connection = nn.Identity()
|
| 52 |
+
else:
|
| 53 |
+
self.skip_connection = sp.SparseConv3d(channels, self.out_channels, 1, indice_key=f"res_{self.out_resolution}")
|
| 54 |
+
|
| 55 |
+
def forward(self, x: sp.SparseTensor) -> sp.SparseTensor:
|
| 56 |
+
"""
|
| 57 |
+
Apply the block to a Tensor, conditioned on a timestep embedding.
|
| 58 |
+
|
| 59 |
+
Args:
|
| 60 |
+
x: an [N x C x ...] Tensor of features.
|
| 61 |
+
Returns:
|
| 62 |
+
an [N x C x ...] Tensor of outputs.
|
| 63 |
+
"""
|
| 64 |
+
h = self.act_layers(x)
|
| 65 |
+
h = self.sub(h)
|
| 66 |
+
x = self.sub(x)
|
| 67 |
+
h = self.out_layers(h)
|
| 68 |
+
h = h + self.skip_connection(x)
|
| 69 |
+
return h
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class SLatMeshDecoder(SparseTransformerBase):
|
| 73 |
+
def __init__(
|
| 74 |
+
self,
|
| 75 |
+
resolution: int,
|
| 76 |
+
model_channels: int,
|
| 77 |
+
latent_channels: int,
|
| 78 |
+
num_blocks: int,
|
| 79 |
+
num_heads: Optional[int] = None,
|
| 80 |
+
num_head_channels: Optional[int] = 64,
|
| 81 |
+
mlp_ratio: float = 4,
|
| 82 |
+
attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
|
| 83 |
+
window_size: int = 8,
|
| 84 |
+
pe_mode: Literal["ape", "rope"] = "ape",
|
| 85 |
+
use_fp16: bool = False,
|
| 86 |
+
use_checkpoint: bool = False,
|
| 87 |
+
qk_rms_norm: bool = False,
|
| 88 |
+
representation_config: dict = None,
|
| 89 |
+
):
|
| 90 |
+
super().__init__(
|
| 91 |
+
in_channels=latent_channels,
|
| 92 |
+
model_channels=model_channels,
|
| 93 |
+
num_blocks=num_blocks,
|
| 94 |
+
num_heads=num_heads,
|
| 95 |
+
num_head_channels=num_head_channels,
|
| 96 |
+
mlp_ratio=mlp_ratio,
|
| 97 |
+
attn_mode=attn_mode,
|
| 98 |
+
window_size=window_size,
|
| 99 |
+
pe_mode=pe_mode,
|
| 100 |
+
use_fp16=use_fp16,
|
| 101 |
+
use_checkpoint=use_checkpoint,
|
| 102 |
+
qk_rms_norm=qk_rms_norm,
|
| 103 |
+
)
|
| 104 |
+
self.resolution = resolution
|
| 105 |
+
self.rep_config = representation_config
|
| 106 |
+
self.mesh_extractor = SparseFeatures2Mesh(res=self.resolution*4, use_color=self.rep_config.get('use_color', False))
|
| 107 |
+
self.out_channels = self.mesh_extractor.feats_channels
|
| 108 |
+
self.upsample = nn.ModuleList([
|
| 109 |
+
SparseSubdivideBlock3d(
|
| 110 |
+
channels=model_channels,
|
| 111 |
+
resolution=resolution,
|
| 112 |
+
out_channels=model_channels // 4
|
| 113 |
+
),
|
| 114 |
+
SparseSubdivideBlock3d(
|
| 115 |
+
channels=model_channels // 4,
|
| 116 |
+
resolution=resolution * 2,
|
| 117 |
+
out_channels=model_channels // 8
|
| 118 |
+
)
|
| 119 |
+
])
|
| 120 |
+
self.out_layer = sp.SparseLinear(model_channels // 8, self.out_channels)
|
| 121 |
+
|
| 122 |
+
self.initialize_weights()
|
| 123 |
+
if use_fp16:
|
| 124 |
+
self.convert_to_fp16()
|
| 125 |
+
|
| 126 |
+
def initialize_weights(self) -> None:
|
| 127 |
+
super().initialize_weights()
|
| 128 |
+
# Zero-out output layers:
|
| 129 |
+
nn.init.constant_(self.out_layer.weight, 0)
|
| 130 |
+
nn.init.constant_(self.out_layer.bias, 0)
|
| 131 |
+
|
| 132 |
+
def convert_to_fp16(self) -> None:
|
| 133 |
+
"""
|
| 134 |
+
Convert the torso of the model to float16.
|
| 135 |
+
"""
|
| 136 |
+
super().convert_to_fp16()
|
| 137 |
+
self.upsample.apply(convert_module_to_f16)
|
| 138 |
+
|
| 139 |
+
def convert_to_fp32(self) -> None:
|
| 140 |
+
"""
|
| 141 |
+
Convert the torso of the model to float32.
|
| 142 |
+
"""
|
| 143 |
+
super().convert_to_fp32()
|
| 144 |
+
self.upsample.apply(convert_module_to_f32)
|
| 145 |
+
|
| 146 |
+
def to_representation(self, x: sp.SparseTensor) -> List[MeshExtractResult]:
|
| 147 |
+
"""
|
| 148 |
+
Convert a batch of network outputs to 3D representations.
|
| 149 |
+
|
| 150 |
+
Args:
|
| 151 |
+
x: The [N x * x C] sparse tensor output by the network.
|
| 152 |
+
|
| 153 |
+
Returns:
|
| 154 |
+
list of representations
|
| 155 |
+
"""
|
| 156 |
+
ret = []
|
| 157 |
+
for i in range(x.shape[0]):
|
| 158 |
+
mesh = self.mesh_extractor(x[i], training=self.training)
|
| 159 |
+
ret.append(mesh)
|
| 160 |
+
return ret
|
| 161 |
+
|
| 162 |
+
def forward(self, x: sp.SparseTensor) -> List[MeshExtractResult]:
|
| 163 |
+
h = super().forward(x)
|
| 164 |
+
for block in self.upsample:
|
| 165 |
+
h = block(h)
|
| 166 |
+
h = h.type(x.dtype)
|
| 167 |
+
h = self.out_layer(h)
|
| 168 |
+
return self.to_representation(h)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
class ElasticSLatMeshDecoder(SparseTransformerElasticMixin, SLatMeshDecoder):
|
| 172 |
+
"""
|
| 173 |
+
Slat VAE Mesh decoder with elastic memory management.
|
| 174 |
+
Used for training with low VRAM.
|
| 175 |
+
"""
|
| 176 |
+
pass
|
trellis/models/structured_latent_vae/decoder_rf.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import *
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
import numpy as np
|
| 6 |
+
from ...modules import sparse as sp
|
| 7 |
+
from .base import SparseTransformerBase
|
| 8 |
+
from ...representations import Strivec
|
| 9 |
+
from ..sparse_elastic_mixin import SparseTransformerElasticMixin
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class SLatRadianceFieldDecoder(SparseTransformerBase):
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
resolution: int,
|
| 16 |
+
model_channels: int,
|
| 17 |
+
latent_channels: int,
|
| 18 |
+
num_blocks: int,
|
| 19 |
+
num_heads: Optional[int] = None,
|
| 20 |
+
num_head_channels: Optional[int] = 64,
|
| 21 |
+
mlp_ratio: float = 4,
|
| 22 |
+
attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
|
| 23 |
+
window_size: int = 8,
|
| 24 |
+
pe_mode: Literal["ape", "rope"] = "ape",
|
| 25 |
+
use_fp16: bool = False,
|
| 26 |
+
use_checkpoint: bool = False,
|
| 27 |
+
qk_rms_norm: bool = False,
|
| 28 |
+
representation_config: dict = None,
|
| 29 |
+
):
|
| 30 |
+
super().__init__(
|
| 31 |
+
in_channels=latent_channels,
|
| 32 |
+
model_channels=model_channels,
|
| 33 |
+
num_blocks=num_blocks,
|
| 34 |
+
num_heads=num_heads,
|
| 35 |
+
num_head_channels=num_head_channels,
|
| 36 |
+
mlp_ratio=mlp_ratio,
|
| 37 |
+
attn_mode=attn_mode,
|
| 38 |
+
window_size=window_size,
|
| 39 |
+
pe_mode=pe_mode,
|
| 40 |
+
use_fp16=use_fp16,
|
| 41 |
+
use_checkpoint=use_checkpoint,
|
| 42 |
+
qk_rms_norm=qk_rms_norm,
|
| 43 |
+
)
|
| 44 |
+
self.resolution = resolution
|
| 45 |
+
self.rep_config = representation_config
|
| 46 |
+
self._calc_layout()
|
| 47 |
+
self.out_layer = sp.SparseLinear(model_channels, self.out_channels)
|
| 48 |
+
|
| 49 |
+
self.initialize_weights()
|
| 50 |
+
if use_fp16:
|
| 51 |
+
self.convert_to_fp16()
|
| 52 |
+
|
| 53 |
+
def initialize_weights(self) -> None:
|
| 54 |
+
super().initialize_weights()
|
| 55 |
+
# Zero-out output layers:
|
| 56 |
+
nn.init.constant_(self.out_layer.weight, 0)
|
| 57 |
+
nn.init.constant_(self.out_layer.bias, 0)
|
| 58 |
+
|
| 59 |
+
def _calc_layout(self) -> None:
|
| 60 |
+
self.layout = {
|
| 61 |
+
'trivec': {'shape': (self.rep_config['rank'], 3, self.rep_config['dim']), 'size': self.rep_config['rank'] * 3 * self.rep_config['dim']},
|
| 62 |
+
'density': {'shape': (self.rep_config['rank'],), 'size': self.rep_config['rank']},
|
| 63 |
+
'features_dc': {'shape': (self.rep_config['rank'], 1, 3), 'size': self.rep_config['rank'] * 3},
|
| 64 |
+
}
|
| 65 |
+
start = 0
|
| 66 |
+
for k, v in self.layout.items():
|
| 67 |
+
v['range'] = (start, start + v['size'])
|
| 68 |
+
start += v['size']
|
| 69 |
+
self.out_channels = start
|
| 70 |
+
|
| 71 |
+
def to_representation(self, x: sp.SparseTensor) -> List[Strivec]:
|
| 72 |
+
"""
|
| 73 |
+
Convert a batch of network outputs to 3D representations.
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
x: The [N x * x C] sparse tensor output by the network.
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
list of representations
|
| 80 |
+
"""
|
| 81 |
+
ret = []
|
| 82 |
+
for i in range(x.shape[0]):
|
| 83 |
+
representation = Strivec(
|
| 84 |
+
sh_degree=0,
|
| 85 |
+
resolution=self.resolution,
|
| 86 |
+
aabb=[-0.5, -0.5, -0.5, 1, 1, 1],
|
| 87 |
+
rank=self.rep_config['rank'],
|
| 88 |
+
dim=self.rep_config['dim'],
|
| 89 |
+
device='cuda',
|
| 90 |
+
)
|
| 91 |
+
representation.density_shift = 0.0
|
| 92 |
+
representation.position = (x.coords[x.layout[i]][:, 1:].float() + 0.5) / self.resolution
|
| 93 |
+
representation.depth = torch.full((representation.position.shape[0], 1), int(np.log2(self.resolution)), dtype=torch.uint8, device='cuda')
|
| 94 |
+
for k, v in self.layout.items():
|
| 95 |
+
setattr(representation, k, x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape']))
|
| 96 |
+
representation.trivec = representation.trivec + 1
|
| 97 |
+
ret.append(representation)
|
| 98 |
+
return ret
|
| 99 |
+
|
| 100 |
+
def forward(self, x: sp.SparseTensor) -> List[Strivec]:
|
| 101 |
+
h = super().forward(x)
|
| 102 |
+
h = h.type(x.dtype)
|
| 103 |
+
h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
|
| 104 |
+
h = self.out_layer(h)
|
| 105 |
+
return self.to_representation(h)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
class ElasticSLatRadianceFieldDecoder(SparseTransformerElasticMixin, SLatRadianceFieldDecoder):
|
| 109 |
+
"""
|
| 110 |
+
Slat VAE Radiance Field Decoder with elastic memory management.
|
| 111 |
+
Used for training with low VRAM.
|
| 112 |
+
"""
|
| 113 |
+
pass
|
trellis/models/structured_latent_vae/encoder.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import *
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from ...modules import sparse as sp
|
| 6 |
+
from .base import SparseTransformerBase
|
| 7 |
+
from ..sparse_elastic_mixin import SparseTransformerElasticMixin
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class SLatEncoder(SparseTransformerBase):
|
| 11 |
+
def __init__(
|
| 12 |
+
self,
|
| 13 |
+
resolution: int,
|
| 14 |
+
in_channels: int,
|
| 15 |
+
model_channels: int,
|
| 16 |
+
latent_channels: int,
|
| 17 |
+
num_blocks: int,
|
| 18 |
+
num_heads: Optional[int] = None,
|
| 19 |
+
num_head_channels: Optional[int] = 64,
|
| 20 |
+
mlp_ratio: float = 4,
|
| 21 |
+
attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
|
| 22 |
+
window_size: int = 8,
|
| 23 |
+
pe_mode: Literal["ape", "rope"] = "ape",
|
| 24 |
+
use_fp16: bool = False,
|
| 25 |
+
use_checkpoint: bool = False,
|
| 26 |
+
qk_rms_norm: bool = False,
|
| 27 |
+
):
|
| 28 |
+
super().__init__(
|
| 29 |
+
in_channels=in_channels,
|
| 30 |
+
model_channels=model_channels,
|
| 31 |
+
num_blocks=num_blocks,
|
| 32 |
+
num_heads=num_heads,
|
| 33 |
+
num_head_channels=num_head_channels,
|
| 34 |
+
mlp_ratio=mlp_ratio,
|
| 35 |
+
attn_mode=attn_mode,
|
| 36 |
+
window_size=window_size,
|
| 37 |
+
pe_mode=pe_mode,
|
| 38 |
+
use_fp16=use_fp16,
|
| 39 |
+
use_checkpoint=use_checkpoint,
|
| 40 |
+
qk_rms_norm=qk_rms_norm,
|
| 41 |
+
)
|
| 42 |
+
self.resolution = resolution
|
| 43 |
+
self.out_layer = sp.SparseLinear(model_channels, 2 * latent_channels)
|
| 44 |
+
|
| 45 |
+
self.initialize_weights()
|
| 46 |
+
if use_fp16:
|
| 47 |
+
self.convert_to_fp16()
|
| 48 |
+
|
| 49 |
+
def initialize_weights(self) -> None:
|
| 50 |
+
super().initialize_weights()
|
| 51 |
+
# Zero-out output layers:
|
| 52 |
+
nn.init.constant_(self.out_layer.weight, 0)
|
| 53 |
+
nn.init.constant_(self.out_layer.bias, 0)
|
| 54 |
+
|
| 55 |
+
def forward(self, x: sp.SparseTensor, sample_posterior=True, return_raw=False):
|
| 56 |
+
h = super().forward(x)
|
| 57 |
+
h = h.type(x.dtype)
|
| 58 |
+
h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
|
| 59 |
+
h = self.out_layer(h)
|
| 60 |
+
|
| 61 |
+
# Sample from the posterior distribution
|
| 62 |
+
mean, logvar = h.feats.chunk(2, dim=-1)
|
| 63 |
+
if sample_posterior:
|
| 64 |
+
std = torch.exp(0.5 * logvar)
|
| 65 |
+
z = mean + std * torch.randn_like(std)
|
| 66 |
+
else:
|
| 67 |
+
z = mean
|
| 68 |
+
z = h.replace(z)
|
| 69 |
+
|
| 70 |
+
if return_raw:
|
| 71 |
+
return z, mean, logvar
|
| 72 |
+
else:
|
| 73 |
+
return z
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class ElasticSLatEncoder(SparseTransformerElasticMixin, SLatEncoder):
|
| 77 |
+
"""
|
| 78 |
+
SLat VAE encoder with elastic memory management.
|
| 79 |
+
Used for training with low VRAM.
|
| 80 |
+
"""
|