mirror of
				https://github.com/Wan-Video/Wan2.1.git
				synced 2025-11-04 06:15:17 +00:00 
			
		
		
		
	
		
			
				
	
	
		
			1207 lines
		
	
	
		
			38 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			1207 lines
		
	
	
		
			38 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
 | 
						|
import logging
 | 
						|
 | 
						|
import torch
 | 
						|
import torch.cuda.amp as amp
 | 
						|
import torch.nn as nn
 | 
						|
import torch.nn.functional as F
 | 
						|
from einops import rearrange
 | 
						|
 | 
						|
__all__ = [
 | 
						|
    "Wan2_2_VAE",
 | 
						|
]
 | 
						|
 | 
						|
CACHE_T = 2
 | 
						|
 | 
						|
 | 
						|
class CausalConv3d(nn.Conv3d):
 | 
						|
    """
 | 
						|
    Causal 3d convolusion.
 | 
						|
    """
 | 
						|
 | 
						|
    def __init__(self, *args, **kwargs):
 | 
						|
        super().__init__(*args, **kwargs)
 | 
						|
        self._padding = (
 | 
						|
            self.padding[2],
 | 
						|
            self.padding[2],
 | 
						|
            self.padding[1],
 | 
						|
            self.padding[1],
 | 
						|
            2 * self.padding[0],
 | 
						|
            0,
 | 
						|
        )
 | 
						|
        self.padding = (0, 0, 0)
 | 
						|
 | 
						|
    def forward(self, x, cache_x=None):
 | 
						|
        padding = list(self._padding)
 | 
						|
        if cache_x is not None and self._padding[4] > 0:
 | 
						|
            cache_x = cache_x.to(x.device)
 | 
						|
            x = torch.cat([cache_x, x], dim=2)
 | 
						|
            padding[4] -= cache_x.shape[2]
 | 
						|
            cache_x = None
 | 
						|
        x = F.pad(x, padding)
 | 
						|
        try:
 | 
						|
            out = super().forward(x)
 | 
						|
            return out
 | 
						|
        except RuntimeError as e:
 | 
						|
            if "miopenStatus" in str(e):
 | 
						|
                print("⚠️ MIOpen fallback: AMD gets upset when trying to work with large areas, and so CPU will be "
 | 
						|
                      "used for this decoding (which is very slow). Consider using tiled VAE Decoding.")
 | 
						|
                x_cpu = x.float().cpu()
 | 
						|
                weight_cpu = self.weight.float().cpu()
 | 
						|
                bias_cpu = self.bias.float().cpu() if self.bias is not None else None
 | 
						|
                print(f"[Fallback] x shape: {x_cpu.shape}, weight shape: {weight_cpu.shape}")
 | 
						|
                out = F.conv3d(x_cpu, weight_cpu, bias_cpu,
 | 
						|
                               self.stride, (0, 0, 0),  # avoid double padding here
 | 
						|
                               self.dilation, self.groups)
 | 
						|
                out = out.to(x.device)
 | 
						|
                if x.dtype in (torch.float16, torch.bfloat16):
 | 
						|
                    out = out.half()
 | 
						|
                if x.dtype != out.dtype:
 | 
						|
                    out = out.to(x.dtype)
 | 
						|
                return out
 | 
						|
            raise
 | 
						|
class RMS_norm(nn.Module):
 | 
						|
 | 
						|
    def __init__(self, dim, channel_first=True, images=True, bias=False):
 | 
						|
        super().__init__()
 | 
						|
        broadcastable_dims = (1, 1, 1) if not images else (1, 1)
 | 
						|
        shape = (dim, *broadcastable_dims) if channel_first else (dim,)
 | 
						|
 | 
						|
        self.channel_first = channel_first
 | 
						|
        self.scale = dim**0.5
 | 
						|
        self.gamma = nn.Parameter(torch.ones(shape))
 | 
						|
        self.bias = nn.Parameter(torch.zeros(shape)) if bias else 0.0
 | 
						|
 | 
						|
    def forward(self, x):
 | 
						|
        return (F.normalize(x, dim=(1 if self.channel_first else -1)) *
 | 
						|
                self.scale * self.gamma + self.bias)
 | 
						|
 | 
						|
 | 
						|
class Upsample(nn.Upsample):
 | 
						|
 | 
						|
    def forward(self, x):
 | 
						|
        """
 | 
						|
        Fix bfloat16 support for nearest neighbor interpolation.
 | 
						|
        """
 | 
						|
        return super().forward(x.float()).type_as(x)
 | 
						|
 | 
						|
 | 
						|
class Resample(nn.Module):
 | 
						|
 | 
						|
    def __init__(self, dim, mode):
 | 
						|
        assert mode in (
 | 
						|
            "none",
 | 
						|
            "upsample2d",
 | 
						|
            "upsample3d",
 | 
						|
            "downsample2d",
 | 
						|
            "downsample3d",
 | 
						|
        )
 | 
						|
        super().__init__()
 | 
						|
        self.dim = dim
 | 
						|
        self.mode = mode
 | 
						|
 | 
						|
        # layers
 | 
						|
        if mode == "upsample2d":
 | 
						|
            self.resample = nn.Sequential(
 | 
						|
                Upsample(scale_factor=(2.0, 2.0), mode="nearest-exact"),
 | 
						|
                nn.Conv2d(dim, dim, 3, padding=1),
 | 
						|
            )
 | 
						|
        elif mode == "upsample3d":
 | 
						|
            self.resample = nn.Sequential(
 | 
						|
                Upsample(scale_factor=(2.0, 2.0), mode="nearest-exact"),
 | 
						|
                nn.Conv2d(dim, dim, 3, padding=1),
 | 
						|
                # nn.Conv2d(dim, dim//2, 3, padding=1)
 | 
						|
            )
 | 
						|
            self.time_conv = CausalConv3d(
 | 
						|
                dim, dim * 2, (3, 1, 1), padding=(1, 0, 0))
 | 
						|
        elif mode == "downsample2d":
 | 
						|
            self.resample = nn.Sequential(
 | 
						|
                nn.ZeroPad2d((0, 1, 0, 1)),
 | 
						|
                nn.Conv2d(dim, dim, 3, stride=(2, 2)))
 | 
						|
        elif mode == "downsample3d":
 | 
						|
            self.resample = nn.Sequential(
 | 
						|
                nn.ZeroPad2d((0, 1, 0, 1)),
 | 
						|
                nn.Conv2d(dim, dim, 3, stride=(2, 2)))
 | 
						|
            self.time_conv = CausalConv3d(
 | 
						|
                dim, dim, (3, 1, 1), stride=(2, 1, 1), padding=(0, 0, 0))
 | 
						|
        else:
 | 
						|
            self.resample = nn.Identity()
 | 
						|
 | 
						|
    def forward(self, x, feat_cache=None, feat_idx=[0]):
 | 
						|
        b, c, t, h, w = x.size()
 | 
						|
        if self.mode == "upsample3d":
 | 
						|
            if feat_cache is not None:
 | 
						|
                idx = feat_idx[0]
 | 
						|
                if feat_cache[idx] is None:
 | 
						|
                    feat_cache[idx] = "Rep"
 | 
						|
                    feat_idx[0] += 1
 | 
						|
                else:
 | 
						|
                    cache_x = x[:, :, -CACHE_T:, :, :].clone()
 | 
						|
                    if (cache_x.shape[2] < 2 and feat_cache[idx] is not None and
 | 
						|
                            feat_cache[idx] != "Rep"):
 | 
						|
                        # cache last frame of last two chunk
 | 
						|
                        cache_x = torch.cat(
 | 
						|
                            [
 | 
						|
                                feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(
 | 
						|
                                    cache_x.device),
 | 
						|
                                cache_x,
 | 
						|
                            ],
 | 
						|
                            dim=2,
 | 
						|
                        )
 | 
						|
                    if (cache_x.shape[2] < 2 and feat_cache[idx] is not None and
 | 
						|
                            feat_cache[idx] == "Rep"):
 | 
						|
                        cache_x = torch.cat(
 | 
						|
                            [
 | 
						|
                                torch.zeros_like(cache_x).to(cache_x.device),
 | 
						|
                                cache_x
 | 
						|
                            ],
 | 
						|
                            dim=2,
 | 
						|
                        )
 | 
						|
                    if feat_cache[idx] == "Rep":
 | 
						|
                        x = self.time_conv(x)
 | 
						|
                    else:
 | 
						|
                        x = self.time_conv(x, feat_cache[idx])
 | 
						|
                    feat_cache[idx] = cache_x
 | 
						|
                    feat_idx[0] += 1
 | 
						|
                    x = x.reshape(b, 2, c, t, h, w)
 | 
						|
                    x = torch.stack((x[:, 0, :, :, :, :], x[:, 1, :, :, :, :]),
 | 
						|
                                    3)
 | 
						|
                    x = x.reshape(b, c, t * 2, h, w)
 | 
						|
        t = x.shape[2]
 | 
						|
        x = rearrange(x, "b c t h w -> (b t) c h w")
 | 
						|
        x = self.resample(x)
 | 
						|
        x = rearrange(x, "(b t) c h w -> b c t h w", t=t)
 | 
						|
 | 
						|
        if self.mode == "downsample3d":
 | 
						|
            if feat_cache is not None:
 | 
						|
                idx = feat_idx[0]
 | 
						|
                if feat_cache[idx] is None:
 | 
						|
                    feat_cache[idx] = x.clone()
 | 
						|
                    feat_idx[0] += 1
 | 
						|
                else:
 | 
						|
                    cache_x = x[:, :, -1:, :, :].clone()
 | 
						|
                    x = self.time_conv(
 | 
						|
                        torch.cat([feat_cache[idx][:, :, -1:, :, :], x], 2))
 | 
						|
                    feat_cache[idx] = cache_x
 | 
						|
                    feat_idx[0] += 1
 | 
						|
        return x
 | 
						|
 | 
						|
    def init_weight(self, conv):
 | 
						|
        conv_weight = conv.weight.detach().clone()
 | 
						|
        nn.init.zeros_(conv_weight)
 | 
						|
        c1, c2, t, h, w = conv_weight.size()
 | 
						|
        one_matrix = torch.eye(c1, c2)
 | 
						|
        init_matrix = one_matrix
 | 
						|
        nn.init.zeros_(conv_weight)
 | 
						|
        conv_weight.data[:, :, 1, 0, 0] = init_matrix  # * 0.5
 | 
						|
        conv.weight = nn.Parameter(conv_weight)
 | 
						|
        nn.init.zeros_(conv.bias.data)
 | 
						|
 | 
						|
    def init_weight2(self, conv):
 | 
						|
        conv_weight = conv.weight.data.detach().clone()
 | 
						|
        nn.init.zeros_(conv_weight)
 | 
						|
        c1, c2, t, h, w = conv_weight.size()
 | 
						|
        init_matrix = torch.eye(c1 // 2, c2)
 | 
						|
        conv_weight[:c1 // 2, :, -1, 0, 0] = init_matrix
 | 
						|
        conv_weight[c1 // 2:, :, -1, 0, 0] = init_matrix
 | 
						|
        conv.weight = nn.Parameter(conv_weight)
 | 
						|
        nn.init.zeros_(conv.bias.data)
 | 
						|
 | 
						|
 | 
						|
class ResidualBlock(nn.Module):
 | 
						|
 | 
						|
    def __init__(self, in_dim, out_dim, dropout=0.0):
 | 
						|
        super().__init__()
 | 
						|
        self.in_dim = in_dim
 | 
						|
        self.out_dim = out_dim
 | 
						|
 | 
						|
        # layers
 | 
						|
        self.residual = nn.Sequential(
 | 
						|
            RMS_norm(in_dim, images=False),
 | 
						|
            nn.SiLU(),
 | 
						|
            CausalConv3d(in_dim, out_dim, 3, padding=1),
 | 
						|
            RMS_norm(out_dim, images=False),
 | 
						|
            nn.SiLU(),
 | 
						|
            nn.Dropout(dropout),
 | 
						|
            CausalConv3d(out_dim, out_dim, 3, padding=1),
 | 
						|
        )
 | 
						|
        self.shortcut = (
 | 
						|
            CausalConv3d(in_dim, out_dim, 1)
 | 
						|
            if in_dim != out_dim else nn.Identity())
 | 
						|
 | 
						|
    def forward(self, x, feat_cache=None, feat_idx=[0]):
 | 
						|
        h = self.shortcut(x)
 | 
						|
        for layer in self.residual:
 | 
						|
            if isinstance(layer, CausalConv3d) and feat_cache is not None:
 | 
						|
                idx = feat_idx[0]
 | 
						|
                cache_x = x[:, :, -CACHE_T:, :, :].clone()
 | 
						|
                if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
 | 
						|
                    # cache last frame of last two chunk
 | 
						|
                    cache_x = torch.cat(
 | 
						|
                        [
 | 
						|
                            feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(
 | 
						|
                                cache_x.device),
 | 
						|
                            cache_x,
 | 
						|
                        ],
 | 
						|
                        dim=2,
 | 
						|
                    )
 | 
						|
                x = layer(x, feat_cache[idx])
 | 
						|
                feat_cache[idx] = cache_x
 | 
						|
                feat_idx[0] += 1
 | 
						|
            else:
 | 
						|
                x = layer(x)
 | 
						|
        return x + h
 | 
						|
 | 
						|
 | 
						|
class AttentionBlock(nn.Module):
 | 
						|
    """
 | 
						|
    Causal self-attention with a single head.
 | 
						|
    """
 | 
						|
 | 
						|
    def __init__(self, dim):
 | 
						|
        super().__init__()
 | 
						|
        self.dim = dim
 | 
						|
 | 
						|
        # layers
 | 
						|
        self.norm = RMS_norm(dim)
 | 
						|
        self.to_qkv = nn.Conv2d(dim, dim * 3, 1)
 | 
						|
        self.proj = nn.Conv2d(dim, dim, 1)
 | 
						|
 | 
						|
        # zero out the last layer params
 | 
						|
        nn.init.zeros_(self.proj.weight)
 | 
						|
 | 
						|
    def forward(self, x):
 | 
						|
        identity = x
 | 
						|
        b, c, t, h, w = x.size()
 | 
						|
        x = rearrange(x, "b c t h w -> (b t) c h w")
 | 
						|
        x = self.norm(x)
 | 
						|
        # compute query, key, value
 | 
						|
        q, k, v = (
 | 
						|
            self.to_qkv(x).reshape(b * t, 1, c * 3,
 | 
						|
                                   -1).permute(0, 1, 3,
 | 
						|
                                               2).contiguous().chunk(3, dim=-1))
 | 
						|
 | 
						|
        # apply attention
 | 
						|
        x = F.scaled_dot_product_attention(
 | 
						|
            q,
 | 
						|
            k,
 | 
						|
            v,
 | 
						|
        )
 | 
						|
        x = x.squeeze(1).permute(0, 2, 1).reshape(b * t, c, h, w)
 | 
						|
 | 
						|
        # output
 | 
						|
        x = self.proj(x)
 | 
						|
        x = rearrange(x, "(b t) c h w-> b c t h w", t=t)
 | 
						|
        return x + identity
 | 
						|
 | 
						|
 | 
						|
def patchify(x, patch_size):
 | 
						|
    if patch_size == 1:
 | 
						|
        return x
 | 
						|
    if x.dim() == 4:
 | 
						|
        x = rearrange(
 | 
						|
            x, "b c (h q) (w r) -> b (c r q) h w", q=patch_size, r=patch_size)
 | 
						|
    elif x.dim() == 5:
 | 
						|
        x = rearrange(
 | 
						|
            x,
 | 
						|
            "b c f (h q) (w r) -> b (c r q) f h w",
 | 
						|
            q=patch_size,
 | 
						|
            r=patch_size,
 | 
						|
        )
 | 
						|
    else:
 | 
						|
        raise ValueError(f"Invalid input shape: {x.shape}")
 | 
						|
 | 
						|
    return x
 | 
						|
 | 
						|
 | 
						|
def unpatchify(x, patch_size):
 | 
						|
    if patch_size == 1:
 | 
						|
        return x
 | 
						|
 | 
						|
    if x.dim() == 4:
 | 
						|
        x = rearrange(
 | 
						|
            x, "b (c r q) h w -> b c (h q) (w r)", q=patch_size, r=patch_size)
 | 
						|
    elif x.dim() == 5:
 | 
						|
        x = rearrange(
 | 
						|
            x,
 | 
						|
            "b (c r q) f h w -> b c f (h q) (w r)",
 | 
						|
            q=patch_size,
 | 
						|
            r=patch_size,
 | 
						|
        )
 | 
						|
    return x
 | 
						|
 | 
						|
 | 
						|
class AvgDown3D(nn.Module):
 | 
						|
 | 
						|
    def __init__(
 | 
						|
        self,
 | 
						|
        in_channels,
 | 
						|
        out_channels,
 | 
						|
        factor_t,
 | 
						|
        factor_s=1,
 | 
						|
    ):
 | 
						|
        super().__init__()
 | 
						|
        self.in_channels = in_channels
 | 
						|
        self.out_channels = out_channels
 | 
						|
        self.factor_t = factor_t
 | 
						|
        self.factor_s = factor_s
 | 
						|
        self.factor = self.factor_t * self.factor_s * self.factor_s
 | 
						|
 | 
						|
        assert in_channels * self.factor % out_channels == 0
 | 
						|
        self.group_size = in_channels * self.factor // out_channels
 | 
						|
 | 
						|
    def forward(self, x: torch.Tensor) -> torch.Tensor:
 | 
						|
        pad_t = (self.factor_t - x.shape[2] % self.factor_t) % self.factor_t
 | 
						|
        pad = (0, 0, 0, 0, pad_t, 0)
 | 
						|
        x = F.pad(x, pad)
 | 
						|
        B, C, T, H, W = x.shape
 | 
						|
        x = x.view(
 | 
						|
            B,
 | 
						|
            C,
 | 
						|
            T // self.factor_t,
 | 
						|
            self.factor_t,
 | 
						|
            H // self.factor_s,
 | 
						|
            self.factor_s,
 | 
						|
            W // self.factor_s,
 | 
						|
            self.factor_s,
 | 
						|
        )
 | 
						|
        x = x.permute(0, 1, 3, 5, 7, 2, 4, 6).contiguous()
 | 
						|
        x = x.view(
 | 
						|
            B,
 | 
						|
            C * self.factor,
 | 
						|
            T // self.factor_t,
 | 
						|
            H // self.factor_s,
 | 
						|
            W // self.factor_s,
 | 
						|
        )
 | 
						|
        x = x.view(
 | 
						|
            B,
 | 
						|
            self.out_channels,
 | 
						|
            self.group_size,
 | 
						|
            T // self.factor_t,
 | 
						|
            H // self.factor_s,
 | 
						|
            W // self.factor_s,
 | 
						|
        )
 | 
						|
        x = x.mean(dim=2)
 | 
						|
        return x
 | 
						|
 | 
						|
 | 
						|
class DupUp3D(nn.Module):
 | 
						|
 | 
						|
    def __init__(
 | 
						|
        self,
 | 
						|
        in_channels: int,
 | 
						|
        out_channels: int,
 | 
						|
        factor_t,
 | 
						|
        factor_s=1,
 | 
						|
    ):
 | 
						|
        super().__init__()
 | 
						|
        self.in_channels = in_channels
 | 
						|
        self.out_channels = out_channels
 | 
						|
 | 
						|
        self.factor_t = factor_t
 | 
						|
        self.factor_s = factor_s
 | 
						|
        self.factor = self.factor_t * self.factor_s * self.factor_s
 | 
						|
 | 
						|
        assert out_channels * self.factor % in_channels == 0
 | 
						|
        self.repeats = out_channels * self.factor // in_channels
 | 
						|
 | 
						|
    def forward(self, x: torch.Tensor, first_chunk=False) -> torch.Tensor:
 | 
						|
        x = x.repeat_interleave(self.repeats, dim=1)
 | 
						|
        x = x.view(
 | 
						|
            x.size(0),
 | 
						|
            self.out_channels,
 | 
						|
            self.factor_t,
 | 
						|
            self.factor_s,
 | 
						|
            self.factor_s,
 | 
						|
            x.size(2),
 | 
						|
            x.size(3),
 | 
						|
            x.size(4),
 | 
						|
        )
 | 
						|
        x = x.permute(0, 1, 5, 2, 6, 3, 7, 4).contiguous()
 | 
						|
        x = x.view(
 | 
						|
            x.size(0),
 | 
						|
            self.out_channels,
 | 
						|
            x.size(2) * self.factor_t,
 | 
						|
            x.size(4) * self.factor_s,
 | 
						|
            x.size(6) * self.factor_s,
 | 
						|
        )
 | 
						|
        if first_chunk:
 | 
						|
            x = x[:, :, self.factor_t - 1:, :, :]
 | 
						|
        return x
 | 
						|
 | 
						|
 | 
						|
class Down_ResidualBlock(nn.Module):
 | 
						|
 | 
						|
    def __init__(self,
 | 
						|
                 in_dim,
 | 
						|
                 out_dim,
 | 
						|
                 dropout,
 | 
						|
                 mult,
 | 
						|
                 temperal_downsample=False,
 | 
						|
                 down_flag=False):
 | 
						|
        super().__init__()
 | 
						|
 | 
						|
        # Shortcut path with downsample
 | 
						|
        self.avg_shortcut = AvgDown3D(
 | 
						|
            in_dim,
 | 
						|
            out_dim,
 | 
						|
            factor_t=2 if temperal_downsample else 1,
 | 
						|
            factor_s=2 if down_flag else 1,
 | 
						|
        )
 | 
						|
 | 
						|
        # Main path with residual blocks and downsample
 | 
						|
        downsamples = []
 | 
						|
        for _ in range(mult):
 | 
						|
            downsamples.append(ResidualBlock(in_dim, out_dim, dropout))
 | 
						|
            in_dim = out_dim
 | 
						|
 | 
						|
        # Add the final downsample block
 | 
						|
        if down_flag:
 | 
						|
            mode = "downsample3d" if temperal_downsample else "downsample2d"
 | 
						|
            downsamples.append(Resample(out_dim, mode=mode))
 | 
						|
 | 
						|
        self.downsamples = nn.Sequential(*downsamples)
 | 
						|
 | 
						|
    def forward(self, x, feat_cache=None, feat_idx=[0]):
 | 
						|
        x_copy = x.clone()
 | 
						|
        for module in self.downsamples:
 | 
						|
            x = module(x, feat_cache, feat_idx)
 | 
						|
 | 
						|
        return x + self.avg_shortcut(x_copy)
 | 
						|
 | 
						|
 | 
						|
class Up_ResidualBlock(nn.Module):
 | 
						|
 | 
						|
    def __init__(self,
 | 
						|
                 in_dim,
 | 
						|
                 out_dim,
 | 
						|
                 dropout,
 | 
						|
                 mult,
 | 
						|
                 temperal_upsample=False,
 | 
						|
                 up_flag=False):
 | 
						|
        super().__init__()
 | 
						|
        # Shortcut path with upsample
 | 
						|
        if up_flag:
 | 
						|
            self.avg_shortcut = DupUp3D(
 | 
						|
                in_dim,
 | 
						|
                out_dim,
 | 
						|
                factor_t=2 if temperal_upsample else 1,
 | 
						|
                factor_s=2 if up_flag else 1,
 | 
						|
            )
 | 
						|
        else:
 | 
						|
            self.avg_shortcut = None
 | 
						|
 | 
						|
        # Main path with residual blocks and upsample
 | 
						|
        upsamples = []
 | 
						|
        for _ in range(mult):
 | 
						|
            upsamples.append(ResidualBlock(in_dim, out_dim, dropout))
 | 
						|
            in_dim = out_dim
 | 
						|
 | 
						|
        # Add the final upsample block
 | 
						|
        if up_flag:
 | 
						|
            mode = "upsample3d" if temperal_upsample else "upsample2d"
 | 
						|
            upsamples.append(Resample(out_dim, mode=mode))
 | 
						|
 | 
						|
        self.upsamples = nn.Sequential(*upsamples)
 | 
						|
 | 
						|
    def forward(self, x, feat_cache=None, feat_idx=[0], first_chunk=False):
 | 
						|
        x_main = x.clone()
 | 
						|
        for module in self.upsamples:
 | 
						|
            x_main = module(x_main, feat_cache, feat_idx)
 | 
						|
        if self.avg_shortcut is not None:
 | 
						|
            x_shortcut = self.avg_shortcut(x, first_chunk)
 | 
						|
            return x_main + x_shortcut
 | 
						|
        else:
 | 
						|
            return x_main
 | 
						|
 | 
						|
 | 
						|
class Encoder3d(nn.Module):
 | 
						|
 | 
						|
    def __init__(
 | 
						|
        self,
 | 
						|
        dim=128,
 | 
						|
        z_dim=4,
 | 
						|
        dim_mult=[1, 2, 4, 4],
 | 
						|
        num_res_blocks=2,
 | 
						|
        attn_scales=[],
 | 
						|
        temperal_downsample=[True, True, False],
 | 
						|
        dropout=0.0,
 | 
						|
    ):
 | 
						|
        super().__init__()
 | 
						|
        self.dim = dim
 | 
						|
        self.z_dim = z_dim
 | 
						|
        self.dim_mult = dim_mult
 | 
						|
        self.num_res_blocks = num_res_blocks
 | 
						|
        self.attn_scales = attn_scales
 | 
						|
        self.temperal_downsample = temperal_downsample
 | 
						|
 | 
						|
        # dimensions
 | 
						|
        dims = [dim * u for u in [1] + dim_mult]
 | 
						|
        scale = 1.0
 | 
						|
 | 
						|
        # init block
 | 
						|
        self.conv1 = CausalConv3d(12, dims[0], 3, padding=1)
 | 
						|
 | 
						|
        # downsample blocks
 | 
						|
        downsamples = []
 | 
						|
        for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])):
 | 
						|
            t_down_flag = (
 | 
						|
                temperal_downsample[i]
 | 
						|
                if i < len(temperal_downsample) else False)
 | 
						|
            downsamples.append(
 | 
						|
                Down_ResidualBlock(
 | 
						|
                    in_dim=in_dim,
 | 
						|
                    out_dim=out_dim,
 | 
						|
                    dropout=dropout,
 | 
						|
                    mult=num_res_blocks,
 | 
						|
                    temperal_downsample=t_down_flag,
 | 
						|
                    down_flag=i != len(dim_mult) - 1,
 | 
						|
                ))
 | 
						|
            scale /= 2.0
 | 
						|
        self.downsamples = nn.Sequential(*downsamples)
 | 
						|
 | 
						|
        # middle blocks
 | 
						|
        self.middle = nn.Sequential(
 | 
						|
            ResidualBlock(out_dim, out_dim, dropout),
 | 
						|
            AttentionBlock(out_dim),
 | 
						|
            ResidualBlock(out_dim, out_dim, dropout),
 | 
						|
        )
 | 
						|
 | 
						|
        # # output blocks
 | 
						|
        self.head = nn.Sequential(
 | 
						|
            RMS_norm(out_dim, images=False),
 | 
						|
            nn.SiLU(),
 | 
						|
            CausalConv3d(out_dim, z_dim, 3, padding=1),
 | 
						|
        )
 | 
						|
 | 
						|
    def forward(self, x, feat_cache=None, feat_idx=[0]):
 | 
						|
 | 
						|
        if feat_cache is not None:
 | 
						|
            idx = feat_idx[0]
 | 
						|
            cache_x = x[:, :, -CACHE_T:, :, :].clone()
 | 
						|
            if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
 | 
						|
                cache_x = torch.cat(
 | 
						|
                    [
 | 
						|
                        feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(
 | 
						|
                            cache_x.device),
 | 
						|
                        cache_x,
 | 
						|
                    ],
 | 
						|
                    dim=2,
 | 
						|
                )
 | 
						|
            x = self.conv1(x, feat_cache[idx])
 | 
						|
            feat_cache[idx] = cache_x
 | 
						|
            feat_idx[0] += 1
 | 
						|
        else:
 | 
						|
            x = self.conv1(x)
 | 
						|
 | 
						|
        ## downsamples
 | 
						|
        for layer in self.downsamples:
 | 
						|
            if feat_cache is not None:
 | 
						|
                x = layer(x, feat_cache, feat_idx)
 | 
						|
            else:
 | 
						|
                x = layer(x)
 | 
						|
 | 
						|
        ## middle
 | 
						|
        for layer in self.middle:
 | 
						|
            if isinstance(layer, ResidualBlock) and feat_cache is not None:
 | 
						|
                x = layer(x, feat_cache, feat_idx)
 | 
						|
            else:
 | 
						|
                x = layer(x)
 | 
						|
 | 
						|
        ## head
 | 
						|
        for layer in self.head:
 | 
						|
            if isinstance(layer, CausalConv3d) and feat_cache is not None:
 | 
						|
                idx = feat_idx[0]
 | 
						|
                cache_x = x[:, :, -CACHE_T:, :, :].clone()
 | 
						|
                if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
 | 
						|
                    cache_x = torch.cat(
 | 
						|
                        [
 | 
						|
                            feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(
 | 
						|
                                cache_x.device),
 | 
						|
                            cache_x,
 | 
						|
                        ],
 | 
						|
                        dim=2,
 | 
						|
                    )
 | 
						|
                x = layer(x, feat_cache[idx])
 | 
						|
                feat_cache[idx] = cache_x
 | 
						|
                feat_idx[0] += 1
 | 
						|
            else:
 | 
						|
                x = layer(x)
 | 
						|
 | 
						|
        return x
 | 
						|
 | 
						|
 | 
						|
class Decoder3d(nn.Module):
 | 
						|
 | 
						|
    def __init__(
 | 
						|
        self,
 | 
						|
        dim=128,
 | 
						|
        z_dim=4,
 | 
						|
        dim_mult=[1, 2, 4, 4],
 | 
						|
        num_res_blocks=2,
 | 
						|
        attn_scales=[],
 | 
						|
        temperal_upsample=[False, True, True],
 | 
						|
        dropout=0.0,
 | 
						|
    ):
 | 
						|
        super().__init__()
 | 
						|
        self.dim = dim
 | 
						|
        self.z_dim = z_dim
 | 
						|
        self.dim_mult = dim_mult
 | 
						|
        self.num_res_blocks = num_res_blocks
 | 
						|
        self.attn_scales = attn_scales
 | 
						|
        self.temperal_upsample = temperal_upsample
 | 
						|
 | 
						|
        # dimensions
 | 
						|
        dims = [dim * u for u in [dim_mult[-1]] + dim_mult[::-1]]
 | 
						|
        scale = 1.0 / 2**(len(dim_mult) - 2)
 | 
						|
        # init block
 | 
						|
        self.conv1 = CausalConv3d(z_dim, dims[0], 3, padding=1)
 | 
						|
 | 
						|
        # middle blocks
 | 
						|
        self.middle = nn.Sequential(
 | 
						|
            ResidualBlock(dims[0], dims[0], dropout),
 | 
						|
            AttentionBlock(dims[0]),
 | 
						|
            ResidualBlock(dims[0], dims[0], dropout),
 | 
						|
        )
 | 
						|
 | 
						|
        # upsample blocks
 | 
						|
        upsamples = []
 | 
						|
        for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])):
 | 
						|
            t_up_flag = temperal_upsample[i] if i < len(
 | 
						|
                temperal_upsample) else False
 | 
						|
            upsamples.append(
 | 
						|
                Up_ResidualBlock(
 | 
						|
                    in_dim=in_dim,
 | 
						|
                    out_dim=out_dim,
 | 
						|
                    dropout=dropout,
 | 
						|
                    mult=num_res_blocks + 1,
 | 
						|
                    temperal_upsample=t_up_flag,
 | 
						|
                    up_flag=i != len(dim_mult) - 1,
 | 
						|
                ))
 | 
						|
        self.upsamples = nn.Sequential(*upsamples)
 | 
						|
 | 
						|
        # output blocks
 | 
						|
        self.head = nn.Sequential(
 | 
						|
            RMS_norm(out_dim, images=False),
 | 
						|
            nn.SiLU(),
 | 
						|
            CausalConv3d(out_dim, 12, 3, padding=1),
 | 
						|
        )
 | 
						|
 | 
						|
    def forward(self, x, feat_cache=None, feat_idx=[0], first_chunk=False):
 | 
						|
        if feat_cache is not None:
 | 
						|
            idx = feat_idx[0]
 | 
						|
            cache_x = x[:, :, -CACHE_T:, :, :].clone()
 | 
						|
            if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
 | 
						|
                cache_x = torch.cat(
 | 
						|
                    [
 | 
						|
                        feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(
 | 
						|
                            cache_x.device),
 | 
						|
                        cache_x,
 | 
						|
                    ],
 | 
						|
                    dim=2,
 | 
						|
                )
 | 
						|
            x = self.conv1(x, feat_cache[idx])
 | 
						|
            feat_cache[idx] = cache_x
 | 
						|
            feat_idx[0] += 1
 | 
						|
        else:
 | 
						|
            x = self.conv1(x)
 | 
						|
 | 
						|
        for layer in self.middle:
 | 
						|
            if isinstance(layer, ResidualBlock) and feat_cache is not None:
 | 
						|
                x = layer(x, feat_cache, feat_idx)
 | 
						|
            else:
 | 
						|
                x = layer(x)
 | 
						|
 | 
						|
        ## upsamples
 | 
						|
        for layer in self.upsamples:
 | 
						|
            if feat_cache is not None:
 | 
						|
                x = layer(x, feat_cache, feat_idx, first_chunk)
 | 
						|
            else:
 | 
						|
                x = layer(x)
 | 
						|
 | 
						|
        ## head
 | 
						|
        for layer in self.head:
 | 
						|
            if isinstance(layer, CausalConv3d) and feat_cache is not None:
 | 
						|
                idx = feat_idx[0]
 | 
						|
                cache_x = x[:, :, -CACHE_T:, :, :].clone()
 | 
						|
                if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
 | 
						|
                    cache_x = torch.cat(
 | 
						|
                        [
 | 
						|
                            feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(
 | 
						|
                                cache_x.device),
 | 
						|
                            cache_x,
 | 
						|
                        ],
 | 
						|
                        dim=2,
 | 
						|
                    )
 | 
						|
                x = layer(x, feat_cache[idx])
 | 
						|
                feat_cache[idx] = cache_x
 | 
						|
                feat_idx[0] += 1
 | 
						|
            else:
 | 
						|
                x = layer(x)
 | 
						|
        return x
 | 
						|
 | 
						|
 | 
						|
def count_conv3d(model):
 | 
						|
    count = 0
 | 
						|
    for m in model.modules():
 | 
						|
        if isinstance(m, CausalConv3d):
 | 
						|
            count += 1
 | 
						|
    return count
 | 
						|
 | 
						|
 | 
						|
class WanVAE_(nn.Module):
 | 
						|
 | 
						|
    def __init__(
 | 
						|
        self,
 | 
						|
        dim=160,
 | 
						|
        dec_dim=256,
 | 
						|
        z_dim=16,
 | 
						|
        dim_mult=[1, 2, 4, 4],
 | 
						|
        num_res_blocks=2,
 | 
						|
        attn_scales=[],
 | 
						|
        temperal_downsample=[True, True, False],
 | 
						|
        dropout=0.0,
 | 
						|
    ):
 | 
						|
        super().__init__()
 | 
						|
        self.dim = dim
 | 
						|
        self.z_dim = z_dim
 | 
						|
        self.dim_mult = dim_mult
 | 
						|
        self.num_res_blocks = num_res_blocks
 | 
						|
        self.attn_scales = attn_scales
 | 
						|
        self.temperal_downsample = temperal_downsample
 | 
						|
        self.temperal_upsample = temperal_downsample[::-1]
 | 
						|
 | 
						|
        # modules
 | 
						|
        self.encoder = Encoder3d(
 | 
						|
            dim,
 | 
						|
            z_dim * 2,
 | 
						|
            dim_mult,
 | 
						|
            num_res_blocks,
 | 
						|
            attn_scales,
 | 
						|
            self.temperal_downsample,
 | 
						|
            dropout,
 | 
						|
        )
 | 
						|
        self.conv1 = CausalConv3d(z_dim * 2, z_dim * 2, 1)
 | 
						|
        self.conv2 = CausalConv3d(z_dim, z_dim, 1)
 | 
						|
        self.decoder = Decoder3d(
 | 
						|
            dec_dim,
 | 
						|
            z_dim,
 | 
						|
            dim_mult,
 | 
						|
            num_res_blocks,
 | 
						|
            attn_scales,
 | 
						|
            self.temperal_upsample,
 | 
						|
            dropout,
 | 
						|
        )
 | 
						|
 | 
						|
    def forward(self, x, scale=[0, 1]):
 | 
						|
        mu = self.encode(x, scale)
 | 
						|
        x_recon = self.decode(mu, scale)
 | 
						|
        return x_recon, mu
 | 
						|
 | 
						|
    def encode(self, x, scale, any_end_frame = False):
 | 
						|
        self.clear_cache()
 | 
						|
        x = patchify(x, patch_size=2)
 | 
						|
        t = x.shape[2]
 | 
						|
        iter_ = 1 + (t - 1) // 4
 | 
						|
        for i in range(iter_):
 | 
						|
            self._enc_conv_idx = [0]
 | 
						|
            if i == 0:
 | 
						|
                out = self.encoder(
 | 
						|
                    x[:, :, :1, :, :],
 | 
						|
                    feat_cache=self._enc_feat_map,
 | 
						|
                    feat_idx=self._enc_conv_idx,
 | 
						|
                )
 | 
						|
            else:
 | 
						|
                out_ = self.encoder(
 | 
						|
                    x[:, :, 1 + 4 * (i - 1):1 + 4 * i, :, :],
 | 
						|
                    feat_cache=self._enc_feat_map,
 | 
						|
                    feat_idx=self._enc_conv_idx,
 | 
						|
                )
 | 
						|
                out = torch.cat([out, out_], 2)
 | 
						|
        mu, log_var = self.conv1(out).chunk(2, dim=1)
 | 
						|
        if isinstance(scale[0], torch.Tensor):
 | 
						|
            mu = (mu - scale[0].view(1, self.z_dim, 1, 1, 1)) * scale[1].view(
 | 
						|
                1, self.z_dim, 1, 1, 1)
 | 
						|
        else:
 | 
						|
            mu = (mu - scale[0]) * scale[1]
 | 
						|
        self.clear_cache()
 | 
						|
        return mu
 | 
						|
 | 
						|
    def decode(self, z, scale,any_end_frame = False):
 | 
						|
        self.clear_cache()
 | 
						|
        if isinstance(scale[0], torch.Tensor):
 | 
						|
            z = z / scale[1].view(1, self.z_dim, 1, 1, 1) + scale[0].view(
 | 
						|
                1, self.z_dim, 1, 1, 1)
 | 
						|
        else:
 | 
						|
            z = z / scale[1] + scale[0]
 | 
						|
        iter_ = z.shape[2]
 | 
						|
        x = self.conv2(z)
 | 
						|
        for i in range(iter_):
 | 
						|
            self._conv_idx = [0]
 | 
						|
            if i == 0:
 | 
						|
                out = self.decoder(
 | 
						|
                    x[:, :, i:i + 1, :, :],
 | 
						|
                    feat_cache=self._feat_map,
 | 
						|
                    feat_idx=self._conv_idx,
 | 
						|
                    first_chunk=True,
 | 
						|
                )
 | 
						|
            else:
 | 
						|
                out_ = self.decoder(
 | 
						|
                    x[:, :, i:i + 1, :, :],
 | 
						|
                    feat_cache=self._feat_map,
 | 
						|
                    feat_idx=self._conv_idx,
 | 
						|
                )
 | 
						|
                out = torch.cat([out, out_], 2)
 | 
						|
        out = unpatchify(out, patch_size=2)
 | 
						|
        self.clear_cache()
 | 
						|
        return out
 | 
						|
 | 
						|
   
 | 
						|
    def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
 | 
						|
        blend_extent = min(a.shape[-2], b.shape[-2], blend_extent)
 | 
						|
        for y in range(blend_extent):
 | 
						|
            b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * (y / blend_extent)
 | 
						|
        return b
 | 
						|
 | 
						|
    def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
 | 
						|
        blend_extent = min(a.shape[-1], b.shape[-1], blend_extent)
 | 
						|
        for x in range(blend_extent):
 | 
						|
            b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * (x / blend_extent)
 | 
						|
        return b
 | 
						|
    
 | 
						|
    def spatial_tiled_decode(self, z, scale, tile_size, any_end_frame= False):
 | 
						|
        tile_sample_min_size = tile_size
 | 
						|
        tile_latent_min_size = int(tile_sample_min_size / 16)
 | 
						|
        tile_overlap_factor = 0.25
 | 
						|
 | 
						|
        # z: [b,c,t,h,w]
 | 
						|
 | 
						|
        if isinstance(scale[0], torch.Tensor):
 | 
						|
            z = z / scale[1].view(1, self.z_dim, 1, 1, 1) + scale[0].view(
 | 
						|
                1, self.z_dim, 1, 1, 1)
 | 
						|
        else:
 | 
						|
            z = z / scale[1] + scale[0]
 | 
						|
 | 
						|
 | 
						|
        overlap_size = int(tile_latent_min_size * (1 - tile_overlap_factor)) #8 0.75
 | 
						|
        blend_extent = int(tile_sample_min_size * tile_overlap_factor) #256 0.25
 | 
						|
        row_limit = tile_sample_min_size - blend_extent
 | 
						|
 | 
						|
        # Split z into overlapping tiles and decode them separately.
 | 
						|
        # The tiles have an overlap to avoid seams between tiles.
 | 
						|
        rows = []
 | 
						|
        for i in range(0, z.shape[-2], overlap_size):
 | 
						|
            row = []
 | 
						|
            for j in range(0, z.shape[-1], overlap_size):
 | 
						|
                tile = z[:, :, :, i: i + tile_latent_min_size, j: j + tile_latent_min_size]
 | 
						|
                decoded = self.decode(tile, scale, any_end_frame= any_end_frame)
 | 
						|
                row.append(decoded)
 | 
						|
            rows.append(row)
 | 
						|
        result_rows = []
 | 
						|
        for i, row in enumerate(rows):
 | 
						|
            result_row = []
 | 
						|
            for j, tile in enumerate(row):
 | 
						|
                # blend the above tile and the left tile
 | 
						|
                # to the current tile and add the current tile to the result row
 | 
						|
                if i > 0:
 | 
						|
                    tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
 | 
						|
                if j > 0:
 | 
						|
                    tile = self.blend_h(row[j - 1], tile, blend_extent)
 | 
						|
                result_row.append(tile[:, :, :, :row_limit, :row_limit])
 | 
						|
            result_rows.append(torch.cat(result_row, dim=-1))
 | 
						|
 | 
						|
        return torch.cat(result_rows, dim=-2)
 | 
						|
 | 
						|
 | 
						|
    def spatial_tiled_encode(self, x, scale, tile_size, any_end_frame = False) :
 | 
						|
        tile_sample_min_size = tile_size
 | 
						|
        tile_latent_min_size = int(tile_sample_min_size / 16)
 | 
						|
        tile_overlap_factor = 0.25
 | 
						|
 | 
						|
        overlap_size = int(tile_sample_min_size * (1 - tile_overlap_factor))
 | 
						|
        blend_extent = int(tile_latent_min_size * tile_overlap_factor)
 | 
						|
        row_limit = tile_latent_min_size - blend_extent
 | 
						|
 | 
						|
        # Split video into tiles and encode them separately.
 | 
						|
        rows = []
 | 
						|
        for i in range(0, x.shape[-2], overlap_size):
 | 
						|
            row = []
 | 
						|
            for j in range(0, x.shape[-1], overlap_size):
 | 
						|
                tile = x[:, :, :, i: i + tile_sample_min_size, j: j + tile_sample_min_size]
 | 
						|
                tile = self.encode(tile, scale, any_end_frame= any_end_frame)
 | 
						|
                row.append(tile)
 | 
						|
            rows.append(row)
 | 
						|
        result_rows = []
 | 
						|
        for i, row in enumerate(rows):
 | 
						|
            result_row = []
 | 
						|
            for j, tile in enumerate(row):
 | 
						|
                # blend the above tile and the left tile
 | 
						|
                # to the current tile and add the current tile to the result row
 | 
						|
                if i > 0:
 | 
						|
                    tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
 | 
						|
                if j > 0:
 | 
						|
                    tile = self.blend_h(row[j - 1], tile, blend_extent)
 | 
						|
                result_row.append(tile[:, :, :, :row_limit, :row_limit])
 | 
						|
            result_rows.append(torch.cat(result_row, dim=-1))
 | 
						|
 | 
						|
        mu = torch.cat(result_rows, dim=-2)
 | 
						|
 | 
						|
        if isinstance(scale[0], torch.Tensor):
 | 
						|
            mu = (mu - scale[0].view(1, self.z_dim, 1, 1, 1)) * scale[1].view(
 | 
						|
                1, self.z_dim, 1, 1, 1)
 | 
						|
        else:
 | 
						|
            mu = (mu - scale[0]) * scale[1]
 | 
						|
 | 
						|
        return mu
 | 
						|
 | 
						|
    def reparameterize(self, mu, log_var):
 | 
						|
        std = torch.exp(0.5 * log_var)
 | 
						|
        eps = torch.randn_like(std)
 | 
						|
        return eps * std + mu
 | 
						|
 | 
						|
    def sample(self, imgs, deterministic=False):
 | 
						|
        mu, log_var = self.encode(imgs)
 | 
						|
        if deterministic:
 | 
						|
            return mu
 | 
						|
        std = torch.exp(0.5 * log_var.clamp(-30.0, 20.0))
 | 
						|
        return mu + std * torch.randn_like(std)
 | 
						|
 | 
						|
    def clear_cache(self):
 | 
						|
        self._conv_num = count_conv3d(self.decoder)
 | 
						|
        self._conv_idx = [0]
 | 
						|
        self._feat_map = [None] * self._conv_num
 | 
						|
        # cache encode
 | 
						|
        self._enc_conv_num = count_conv3d(self.encoder)
 | 
						|
        self._enc_conv_idx = [0]
 | 
						|
        self._enc_feat_map = [None] * self._enc_conv_num
 | 
						|
 | 
						|
 | 
						|
def _video_vae(pretrained_path=None, z_dim=16, dim=160, device="cpu", **kwargs):
 | 
						|
    # params
 | 
						|
    cfg = dict(
 | 
						|
        dim=dim,
 | 
						|
        z_dim=z_dim,
 | 
						|
        dim_mult=[1, 2, 4, 4],
 | 
						|
        num_res_blocks=2,
 | 
						|
        attn_scales=[],
 | 
						|
        temperal_downsample=[True, True, True],
 | 
						|
        dropout=0.0,
 | 
						|
    )
 | 
						|
    cfg.update(**kwargs)
 | 
						|
 | 
						|
    # init model
 | 
						|
    with torch.device("meta"):
 | 
						|
        model = WanVAE_(**cfg)
 | 
						|
 | 
						|
    from mmgp import offload
 | 
						|
    # load checkpoint
 | 
						|
    logging.info(f"loading {pretrained_path}")
 | 
						|
    # model.load_state_dict(
 | 
						|
    #     torch.load(pretrained_path, map_location=device), assign=True)
 | 
						|
    # offload.save_model(model, "Wan_vae_2_2.safetensors")
 | 
						|
    # model.to(torch.bfloat16)
 | 
						|
    # offload.save_model(model, "Wan_vae_2_2_bf16.safetensors")
 | 
						|
    offload.load_model_data(model, pretrained_path.replace(".pth", ".safetensors"), writable_tensors= False)    
 | 
						|
 | 
						|
    return model
 | 
						|
 | 
						|
 | 
						|
class Wan2_2_VAE:
 | 
						|
 | 
						|
    def __init__(
 | 
						|
        self,
 | 
						|
        z_dim=48,
 | 
						|
        c_dim=160,
 | 
						|
        vae_pth=None,
 | 
						|
        dim_mult=[1, 2, 4, 4],
 | 
						|
        temperal_downsample=[False, True, True],
 | 
						|
        dtype=torch.float,
 | 
						|
        device="cuda",
 | 
						|
    ):
 | 
						|
 | 
						|
        self.dtype = dtype
 | 
						|
        self.device = device
 | 
						|
 | 
						|
        mean = torch.tensor(
 | 
						|
            [
 | 
						|
                -0.2289,
 | 
						|
                -0.0052,
 | 
						|
                -0.1323,
 | 
						|
                -0.2339,
 | 
						|
                -0.2799,
 | 
						|
                0.0174,
 | 
						|
                0.1838,
 | 
						|
                0.1557,
 | 
						|
                -0.1382,
 | 
						|
                0.0542,
 | 
						|
                0.2813,
 | 
						|
                0.0891,
 | 
						|
                0.1570,
 | 
						|
                -0.0098,
 | 
						|
                0.0375,
 | 
						|
                -0.1825,
 | 
						|
                -0.2246,
 | 
						|
                -0.1207,
 | 
						|
                -0.0698,
 | 
						|
                0.5109,
 | 
						|
                0.2665,
 | 
						|
                -0.2108,
 | 
						|
                -0.2158,
 | 
						|
                0.2502,
 | 
						|
                -0.2055,
 | 
						|
                -0.0322,
 | 
						|
                0.1109,
 | 
						|
                0.1567,
 | 
						|
                -0.0729,
 | 
						|
                0.0899,
 | 
						|
                -0.2799,
 | 
						|
                -0.1230,
 | 
						|
                -0.0313,
 | 
						|
                -0.1649,
 | 
						|
                0.0117,
 | 
						|
                0.0723,
 | 
						|
                -0.2839,
 | 
						|
                -0.2083,
 | 
						|
                -0.0520,
 | 
						|
                0.3748,
 | 
						|
                0.0152,
 | 
						|
                0.1957,
 | 
						|
                0.1433,
 | 
						|
                -0.2944,
 | 
						|
                0.3573,
 | 
						|
                -0.0548,
 | 
						|
                -0.1681,
 | 
						|
                -0.0667,
 | 
						|
            ],
 | 
						|
            dtype=dtype,
 | 
						|
            device=device,
 | 
						|
        )
 | 
						|
        std = torch.tensor(
 | 
						|
            [
 | 
						|
                0.4765,
 | 
						|
                1.0364,
 | 
						|
                0.4514,
 | 
						|
                1.1677,
 | 
						|
                0.5313,
 | 
						|
                0.4990,
 | 
						|
                0.4818,
 | 
						|
                0.5013,
 | 
						|
                0.8158,
 | 
						|
                1.0344,
 | 
						|
                0.5894,
 | 
						|
                1.0901,
 | 
						|
                0.6885,
 | 
						|
                0.6165,
 | 
						|
                0.8454,
 | 
						|
                0.4978,
 | 
						|
                0.5759,
 | 
						|
                0.3523,
 | 
						|
                0.7135,
 | 
						|
                0.6804,
 | 
						|
                0.5833,
 | 
						|
                1.4146,
 | 
						|
                0.8986,
 | 
						|
                0.5659,
 | 
						|
                0.7069,
 | 
						|
                0.5338,
 | 
						|
                0.4889,
 | 
						|
                0.4917,
 | 
						|
                0.4069,
 | 
						|
                0.4999,
 | 
						|
                0.6866,
 | 
						|
                0.4093,
 | 
						|
                0.5709,
 | 
						|
                0.6065,
 | 
						|
                0.6415,
 | 
						|
                0.4944,
 | 
						|
                0.5726,
 | 
						|
                1.2042,
 | 
						|
                0.5458,
 | 
						|
                1.6887,
 | 
						|
                0.3971,
 | 
						|
                1.0600,
 | 
						|
                0.3943,
 | 
						|
                0.5537,
 | 
						|
                0.5444,
 | 
						|
                0.4089,
 | 
						|
                0.7468,
 | 
						|
                0.7744,
 | 
						|
            ],
 | 
						|
            dtype=dtype,
 | 
						|
            device=device,
 | 
						|
        )
 | 
						|
        self.scale = [mean, 1.0 / std]
 | 
						|
 | 
						|
        # init model
 | 
						|
        self.model = (
 | 
						|
            _video_vae(
 | 
						|
                pretrained_path=vae_pth,
 | 
						|
                z_dim=z_dim,
 | 
						|
                dim=c_dim,
 | 
						|
                dim_mult=dim_mult,
 | 
						|
                temperal_downsample=temperal_downsample,
 | 
						|
            ).eval().requires_grad_(False).to(device))
 | 
						|
        
 | 
						|
        self.model._model_dtype = dtype
 | 
						|
 | 
						|
 | 
						|
    @staticmethod
 | 
						|
    def get_VAE_tile_size(vae_config, device_mem_capacity, mixed_precision):
 | 
						|
        # VAE Tiling
 | 
						|
        if vae_config == 0:
 | 
						|
            if mixed_precision:
 | 
						|
                device_mem_capacity = device_mem_capacity / 2
 | 
						|
            if device_mem_capacity >= 24000:
 | 
						|
                use_vae_config = 1            
 | 
						|
            elif device_mem_capacity >= 8000:
 | 
						|
                use_vae_config = 2
 | 
						|
            else:          
 | 
						|
                use_vae_config = 3
 | 
						|
        else:
 | 
						|
            use_vae_config = vae_config
 | 
						|
 | 
						|
        if use_vae_config == 1:
 | 
						|
            VAE_tile_size = 0  
 | 
						|
        elif use_vae_config == 2:
 | 
						|
            VAE_tile_size = 256  
 | 
						|
        else: 
 | 
						|
            VAE_tile_size = 128  
 | 
						|
 | 
						|
        return  VAE_tile_size
 | 
						|
 | 
						|
    def encode(self, videos, tile_size = 256, any_end_frame = False):
 | 
						|
        """
 | 
						|
        videos: A list of videos each with shape [C, T, H, W].
 | 
						|
        """
 | 
						|
        original_dtype = videos[0].dtype
 | 
						|
        
 | 
						|
        if tile_size > 0 and False:
 | 
						|
            return [ self.model.spatial_tiled_encode(u.to(self.dtype).unsqueeze(0), self.scale, tile_size, any_end_frame=any_end_frame).float().squeeze(0) for u in videos ]
 | 
						|
        else:
 | 
						|
            return [ self.model.encode(u.to(self.dtype).unsqueeze(0), self.scale, any_end_frame=any_end_frame).float().squeeze(0) for u in videos ]
 | 
						|
 | 
						|
 | 
						|
    def decode(self, zs, tile_size, any_end_frame = False):
 | 
						|
        if tile_size > 0 and False:
 | 
						|
            return [ self.model.spatial_tiled_decode(u.to(self.dtype).unsqueeze(0), self.scale, tile_size, any_end_frame=any_end_frame).clamp_(-1, 1).float().squeeze(0) for u in zs ]
 | 
						|
        else:
 | 
						|
            return [ self.model.decode(u.to(self.dtype).unsqueeze(0), self.scale, any_end_frame=any_end_frame).clamp_(-1, 1).float().squeeze(0) for u in zs ]
 | 
						|
 | 
						|
 | 
						|
    # def encode(self, videos, VAE_tile_size = 0, any_end_frame = False ):
 | 
						|
    #     with amp.autocast(dtype=self.dtype):
 | 
						|
    #         return [
 | 
						|
    #             self.model.encode(u.unsqueeze(0),
 | 
						|
    #                                 self.scale).float().squeeze(0)
 | 
						|
    #             for u in videos
 | 
						|
    #         ]
 | 
						|
 | 
						|
    # def decode(self, zs, VAE_tile_size = 0, any_end_frame = False):
 | 
						|
    #     with amp.autocast(dtype=self.dtype):
 | 
						|
    #         return [
 | 
						|
    #             self.model.decode(u.unsqueeze(0),
 | 
						|
    #                                 self.scale).float().clamp_(-1,
 | 
						|
    #                                                             1).squeeze(0)
 | 
						|
    #             for u in zs
 | 
						|
    #         ]
 |