mirror of
				https://github.com/Wan-Video/Wan2.1.git
				synced 2025-11-04 06:15:17 +00:00 
			
		
		
		
	
		
			
				
	
	
		
			512 lines
		
	
	
		
			15 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			512 lines
		
	
	
		
			15 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
# -*- coding: utf-8 -*-
 | 
						|
# Copyright (c) Alibaba, Inc. and its affiliates.
 | 
						|
import math
 | 
						|
import types
 | 
						|
 | 
						|
import timm
 | 
						|
import torch
 | 
						|
import torch.nn as nn
 | 
						|
import torch.nn.functional as F
 | 
						|
 | 
						|
 | 
						|
class Slice(nn.Module):
 | 
						|
    def __init__(self, start_index=1):
 | 
						|
        super(Slice, self).__init__()
 | 
						|
        self.start_index = start_index
 | 
						|
 | 
						|
    def forward(self, x):
 | 
						|
        return x[:, self.start_index:]
 | 
						|
 | 
						|
 | 
						|
class AddReadout(nn.Module):
 | 
						|
    def __init__(self, start_index=1):
 | 
						|
        super(AddReadout, self).__init__()
 | 
						|
        self.start_index = start_index
 | 
						|
 | 
						|
    def forward(self, x):
 | 
						|
        if self.start_index == 2:
 | 
						|
            readout = (x[:, 0] + x[:, 1]) / 2
 | 
						|
        else:
 | 
						|
            readout = x[:, 0]
 | 
						|
        return x[:, self.start_index:] + readout.unsqueeze(1)
 | 
						|
 | 
						|
 | 
						|
class ProjectReadout(nn.Module):
 | 
						|
    def __init__(self, in_features, start_index=1):
 | 
						|
        super(ProjectReadout, self).__init__()
 | 
						|
        self.start_index = start_index
 | 
						|
 | 
						|
        self.project = nn.Sequential(nn.Linear(2 * in_features, in_features),
 | 
						|
                                     nn.GELU())
 | 
						|
 | 
						|
    def forward(self, x):
 | 
						|
        readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index:])
 | 
						|
        features = torch.cat((x[:, self.start_index:], readout), -1)
 | 
						|
 | 
						|
        return self.project(features)
 | 
						|
 | 
						|
 | 
						|
class Transpose(nn.Module):
 | 
						|
    def __init__(self, dim0, dim1):
 | 
						|
        super(Transpose, self).__init__()
 | 
						|
        self.dim0 = dim0
 | 
						|
        self.dim1 = dim1
 | 
						|
 | 
						|
    def forward(self, x):
 | 
						|
        x = x.transpose(self.dim0, self.dim1)
 | 
						|
        return x
 | 
						|
 | 
						|
 | 
						|
def forward_vit(pretrained, x):
 | 
						|
    b, c, h, w = x.shape
 | 
						|
 | 
						|
    _ = pretrained.model.forward_flex(x)
 | 
						|
 | 
						|
    layer_1 = pretrained.activations['1']
 | 
						|
    layer_2 = pretrained.activations['2']
 | 
						|
    layer_3 = pretrained.activations['3']
 | 
						|
    layer_4 = pretrained.activations['4']
 | 
						|
 | 
						|
    layer_1 = pretrained.act_postprocess1[0:2](layer_1)
 | 
						|
    layer_2 = pretrained.act_postprocess2[0:2](layer_2)
 | 
						|
    layer_3 = pretrained.act_postprocess3[0:2](layer_3)
 | 
						|
    layer_4 = pretrained.act_postprocess4[0:2](layer_4)
 | 
						|
 | 
						|
    unflatten = nn.Sequential(
 | 
						|
        nn.Unflatten(
 | 
						|
            2,
 | 
						|
            torch.Size([
 | 
						|
                h // pretrained.model.patch_size[1],
 | 
						|
                w // pretrained.model.patch_size[0],
 | 
						|
            ]),
 | 
						|
        ))
 | 
						|
 | 
						|
    if layer_1.ndim == 3:
 | 
						|
        layer_1 = unflatten(layer_1)
 | 
						|
    if layer_2.ndim == 3:
 | 
						|
        layer_2 = unflatten(layer_2)
 | 
						|
    if layer_3.ndim == 3:
 | 
						|
        layer_3 = unflatten(layer_3)
 | 
						|
    if layer_4.ndim == 3:
 | 
						|
        layer_4 = unflatten(layer_4)
 | 
						|
 | 
						|
    layer_1 = pretrained.act_postprocess1[3:len(pretrained.act_postprocess1)](
 | 
						|
        layer_1)
 | 
						|
    layer_2 = pretrained.act_postprocess2[3:len(pretrained.act_postprocess2)](
 | 
						|
        layer_2)
 | 
						|
    layer_3 = pretrained.act_postprocess3[3:len(pretrained.act_postprocess3)](
 | 
						|
        layer_3)
 | 
						|
    layer_4 = pretrained.act_postprocess4[3:len(pretrained.act_postprocess4)](
 | 
						|
        layer_4)
 | 
						|
 | 
						|
    return layer_1, layer_2, layer_3, layer_4
 | 
						|
 | 
						|
 | 
						|
def _resize_pos_embed(self, posemb, gs_h, gs_w):
 | 
						|
    posemb_tok, posemb_grid = (
 | 
						|
        posemb[:, :self.start_index],
 | 
						|
        posemb[0, self.start_index:],
 | 
						|
    )
 | 
						|
 | 
						|
    gs_old = int(math.sqrt(len(posemb_grid)))
 | 
						|
 | 
						|
    posemb_grid = posemb_grid.reshape(1, gs_old, gs_old,
 | 
						|
                                      -1).permute(0, 3, 1, 2)
 | 
						|
    posemb_grid = F.interpolate(posemb_grid,
 | 
						|
                                size=(gs_h, gs_w),
 | 
						|
                                mode='bilinear')
 | 
						|
    posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
 | 
						|
 | 
						|
    posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
 | 
						|
 | 
						|
    return posemb
 | 
						|
 | 
						|
 | 
						|
def forward_flex(self, x):
 | 
						|
    b, c, h, w = x.shape
 | 
						|
 | 
						|
    pos_embed = self._resize_pos_embed(self.pos_embed, h // self.patch_size[1],
 | 
						|
                                       w // self.patch_size[0])
 | 
						|
 | 
						|
    B = x.shape[0]
 | 
						|
 | 
						|
    if hasattr(self.patch_embed, 'backbone'):
 | 
						|
        x = self.patch_embed.backbone(x)
 | 
						|
        if isinstance(x, (list, tuple)):
 | 
						|
            x = x[
 | 
						|
                -1]  # last feature if backbone outputs list/tuple of features
 | 
						|
 | 
						|
    x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
 | 
						|
 | 
						|
    if getattr(self, 'dist_token', None) is not None:
 | 
						|
        cls_tokens = self.cls_token.expand(
 | 
						|
            B, -1, -1)  # stole cls_tokens impl from Phil Wang, thanks
 | 
						|
        dist_token = self.dist_token.expand(B, -1, -1)
 | 
						|
        x = torch.cat((cls_tokens, dist_token, x), dim=1)
 | 
						|
    else:
 | 
						|
        cls_tokens = self.cls_token.expand(
 | 
						|
            B, -1, -1)  # stole cls_tokens impl from Phil Wang, thanks
 | 
						|
        x = torch.cat((cls_tokens, x), dim=1)
 | 
						|
 | 
						|
    x = x + pos_embed
 | 
						|
    x = self.pos_drop(x)
 | 
						|
 | 
						|
    for blk in self.blocks:
 | 
						|
        x = blk(x)
 | 
						|
 | 
						|
    x = self.norm(x)
 | 
						|
 | 
						|
    return x
 | 
						|
 | 
						|
 | 
						|
activations = {}
 | 
						|
 | 
						|
 | 
						|
def get_activation(name):
 | 
						|
    def hook(model, input, output):
 | 
						|
        activations[name] = output
 | 
						|
 | 
						|
    return hook
 | 
						|
 | 
						|
 | 
						|
def get_readout_oper(vit_features, features, use_readout, start_index=1):
 | 
						|
    if use_readout == 'ignore':
 | 
						|
        readout_oper = [Slice(start_index)] * len(features)
 | 
						|
    elif use_readout == 'add':
 | 
						|
        readout_oper = [AddReadout(start_index)] * len(features)
 | 
						|
    elif use_readout == 'project':
 | 
						|
        readout_oper = [
 | 
						|
            ProjectReadout(vit_features, start_index) for out_feat in features
 | 
						|
        ]
 | 
						|
    else:
 | 
						|
        assert (
 | 
						|
            False
 | 
						|
        ), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
 | 
						|
 | 
						|
    return readout_oper
 | 
						|
 | 
						|
 | 
						|
def _make_vit_b16_backbone(
 | 
						|
    model,
 | 
						|
    features=[96, 192, 384, 768],
 | 
						|
    size=[384, 384],
 | 
						|
    hooks=[2, 5, 8, 11],
 | 
						|
    vit_features=768,
 | 
						|
    use_readout='ignore',
 | 
						|
    start_index=1,
 | 
						|
):
 | 
						|
    pretrained = nn.Module()
 | 
						|
 | 
						|
    pretrained.model = model
 | 
						|
    pretrained.model.blocks[hooks[0]].register_forward_hook(
 | 
						|
        get_activation('1'))
 | 
						|
    pretrained.model.blocks[hooks[1]].register_forward_hook(
 | 
						|
        get_activation('2'))
 | 
						|
    pretrained.model.blocks[hooks[2]].register_forward_hook(
 | 
						|
        get_activation('3'))
 | 
						|
    pretrained.model.blocks[hooks[3]].register_forward_hook(
 | 
						|
        get_activation('4'))
 | 
						|
 | 
						|
    pretrained.activations = activations
 | 
						|
 | 
						|
    readout_oper = get_readout_oper(vit_features, features, use_readout,
 | 
						|
                                    start_index)
 | 
						|
 | 
						|
    # 32, 48, 136, 384
 | 
						|
    pretrained.act_postprocess1 = nn.Sequential(
 | 
						|
        readout_oper[0],
 | 
						|
        Transpose(1, 2),
 | 
						|
        nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
 | 
						|
        nn.Conv2d(
 | 
						|
            in_channels=vit_features,
 | 
						|
            out_channels=features[0],
 | 
						|
            kernel_size=1,
 | 
						|
            stride=1,
 | 
						|
            padding=0,
 | 
						|
        ),
 | 
						|
        nn.ConvTranspose2d(
 | 
						|
            in_channels=features[0],
 | 
						|
            out_channels=features[0],
 | 
						|
            kernel_size=4,
 | 
						|
            stride=4,
 | 
						|
            padding=0,
 | 
						|
            bias=True,
 | 
						|
            dilation=1,
 | 
						|
            groups=1,
 | 
						|
        ),
 | 
						|
    )
 | 
						|
 | 
						|
    pretrained.act_postprocess2 = nn.Sequential(
 | 
						|
        readout_oper[1],
 | 
						|
        Transpose(1, 2),
 | 
						|
        nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
 | 
						|
        nn.Conv2d(
 | 
						|
            in_channels=vit_features,
 | 
						|
            out_channels=features[1],
 | 
						|
            kernel_size=1,
 | 
						|
            stride=1,
 | 
						|
            padding=0,
 | 
						|
        ),
 | 
						|
        nn.ConvTranspose2d(
 | 
						|
            in_channels=features[1],
 | 
						|
            out_channels=features[1],
 | 
						|
            kernel_size=2,
 | 
						|
            stride=2,
 | 
						|
            padding=0,
 | 
						|
            bias=True,
 | 
						|
            dilation=1,
 | 
						|
            groups=1,
 | 
						|
        ),
 | 
						|
    )
 | 
						|
 | 
						|
    pretrained.act_postprocess3 = nn.Sequential(
 | 
						|
        readout_oper[2],
 | 
						|
        Transpose(1, 2),
 | 
						|
        nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
 | 
						|
        nn.Conv2d(
 | 
						|
            in_channels=vit_features,
 | 
						|
            out_channels=features[2],
 | 
						|
            kernel_size=1,
 | 
						|
            stride=1,
 | 
						|
            padding=0,
 | 
						|
        ),
 | 
						|
    )
 | 
						|
 | 
						|
    pretrained.act_postprocess4 = nn.Sequential(
 | 
						|
        readout_oper[3],
 | 
						|
        Transpose(1, 2),
 | 
						|
        nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
 | 
						|
        nn.Conv2d(
 | 
						|
            in_channels=vit_features,
 | 
						|
            out_channels=features[3],
 | 
						|
            kernel_size=1,
 | 
						|
            stride=1,
 | 
						|
            padding=0,
 | 
						|
        ),
 | 
						|
        nn.Conv2d(
 | 
						|
            in_channels=features[3],
 | 
						|
            out_channels=features[3],
 | 
						|
            kernel_size=3,
 | 
						|
            stride=2,
 | 
						|
            padding=1,
 | 
						|
        ),
 | 
						|
    )
 | 
						|
 | 
						|
    pretrained.model.start_index = start_index
 | 
						|
    pretrained.model.patch_size = [16, 16]
 | 
						|
 | 
						|
    # We inject this function into the VisionTransformer instances so that
 | 
						|
    # we can use it with interpolated position embeddings without modifying the library source.
 | 
						|
    pretrained.model.forward_flex = types.MethodType(forward_flex,
 | 
						|
                                                     pretrained.model)
 | 
						|
    pretrained.model._resize_pos_embed = types.MethodType(
 | 
						|
        _resize_pos_embed, pretrained.model)
 | 
						|
 | 
						|
    return pretrained
 | 
						|
 | 
						|
 | 
						|
def _make_pretrained_vitl16_384(pretrained, use_readout='ignore', hooks=None):
 | 
						|
    model = timm.create_model('vit_large_patch16_384', pretrained=pretrained)
 | 
						|
 | 
						|
    hooks = [5, 11, 17, 23] if hooks is None else hooks
 | 
						|
    return _make_vit_b16_backbone(
 | 
						|
        model,
 | 
						|
        features=[256, 512, 1024, 1024],
 | 
						|
        hooks=hooks,
 | 
						|
        vit_features=1024,
 | 
						|
        use_readout=use_readout,
 | 
						|
    )
 | 
						|
 | 
						|
 | 
						|
def _make_pretrained_vitb16_384(pretrained, use_readout='ignore', hooks=None):
 | 
						|
    model = timm.create_model('vit_base_patch16_384', pretrained=pretrained)
 | 
						|
 | 
						|
    hooks = [2, 5, 8, 11] if hooks is None else hooks
 | 
						|
    return _make_vit_b16_backbone(model,
 | 
						|
                                  features=[96, 192, 384, 768],
 | 
						|
                                  hooks=hooks,
 | 
						|
                                  use_readout=use_readout)
 | 
						|
 | 
						|
 | 
						|
def _make_pretrained_deitb16_384(pretrained, use_readout='ignore', hooks=None):
 | 
						|
    model = timm.create_model('vit_deit_base_patch16_384',
 | 
						|
                              pretrained=pretrained)
 | 
						|
 | 
						|
    hooks = [2, 5, 8, 11] if hooks is None else hooks
 | 
						|
    return _make_vit_b16_backbone(model,
 | 
						|
                                  features=[96, 192, 384, 768],
 | 
						|
                                  hooks=hooks,
 | 
						|
                                  use_readout=use_readout)
 | 
						|
 | 
						|
 | 
						|
def _make_pretrained_deitb16_distil_384(pretrained,
 | 
						|
                                        use_readout='ignore',
 | 
						|
                                        hooks=None):
 | 
						|
    model = timm.create_model('vit_deit_base_distilled_patch16_384',
 | 
						|
                              pretrained=pretrained)
 | 
						|
 | 
						|
    hooks = [2, 5, 8, 11] if hooks is None else hooks
 | 
						|
    return _make_vit_b16_backbone(
 | 
						|
        model,
 | 
						|
        features=[96, 192, 384, 768],
 | 
						|
        hooks=hooks,
 | 
						|
        use_readout=use_readout,
 | 
						|
        start_index=2,
 | 
						|
    )
 | 
						|
 | 
						|
 | 
						|
def _make_vit_b_rn50_backbone(
 | 
						|
    model,
 | 
						|
    features=[256, 512, 768, 768],
 | 
						|
    size=[384, 384],
 | 
						|
    hooks=[0, 1, 8, 11],
 | 
						|
    vit_features=768,
 | 
						|
    use_vit_only=False,
 | 
						|
    use_readout='ignore',
 | 
						|
    start_index=1,
 | 
						|
):
 | 
						|
    pretrained = nn.Module()
 | 
						|
 | 
						|
    pretrained.model = model
 | 
						|
 | 
						|
    if use_vit_only is True:
 | 
						|
        pretrained.model.blocks[hooks[0]].register_forward_hook(
 | 
						|
            get_activation('1'))
 | 
						|
        pretrained.model.blocks[hooks[1]].register_forward_hook(
 | 
						|
            get_activation('2'))
 | 
						|
    else:
 | 
						|
        pretrained.model.patch_embed.backbone.stages[0].register_forward_hook(
 | 
						|
            get_activation('1'))
 | 
						|
        pretrained.model.patch_embed.backbone.stages[1].register_forward_hook(
 | 
						|
            get_activation('2'))
 | 
						|
 | 
						|
    pretrained.model.blocks[hooks[2]].register_forward_hook(
 | 
						|
        get_activation('3'))
 | 
						|
    pretrained.model.blocks[hooks[3]].register_forward_hook(
 | 
						|
        get_activation('4'))
 | 
						|
 | 
						|
    pretrained.activations = activations
 | 
						|
 | 
						|
    readout_oper = get_readout_oper(vit_features, features, use_readout,
 | 
						|
                                    start_index)
 | 
						|
 | 
						|
    if use_vit_only is True:
 | 
						|
        pretrained.act_postprocess1 = nn.Sequential(
 | 
						|
            readout_oper[0],
 | 
						|
            Transpose(1, 2),
 | 
						|
            nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
 | 
						|
            nn.Conv2d(
 | 
						|
                in_channels=vit_features,
 | 
						|
                out_channels=features[0],
 | 
						|
                kernel_size=1,
 | 
						|
                stride=1,
 | 
						|
                padding=0,
 | 
						|
            ),
 | 
						|
            nn.ConvTranspose2d(
 | 
						|
                in_channels=features[0],
 | 
						|
                out_channels=features[0],
 | 
						|
                kernel_size=4,
 | 
						|
                stride=4,
 | 
						|
                padding=0,
 | 
						|
                bias=True,
 | 
						|
                dilation=1,
 | 
						|
                groups=1,
 | 
						|
            ),
 | 
						|
        )
 | 
						|
 | 
						|
        pretrained.act_postprocess2 = nn.Sequential(
 | 
						|
            readout_oper[1],
 | 
						|
            Transpose(1, 2),
 | 
						|
            nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
 | 
						|
            nn.Conv2d(
 | 
						|
                in_channels=vit_features,
 | 
						|
                out_channels=features[1],
 | 
						|
                kernel_size=1,
 | 
						|
                stride=1,
 | 
						|
                padding=0,
 | 
						|
            ),
 | 
						|
            nn.ConvTranspose2d(
 | 
						|
                in_channels=features[1],
 | 
						|
                out_channels=features[1],
 | 
						|
                kernel_size=2,
 | 
						|
                stride=2,
 | 
						|
                padding=0,
 | 
						|
                bias=True,
 | 
						|
                dilation=1,
 | 
						|
                groups=1,
 | 
						|
            ),
 | 
						|
        )
 | 
						|
    else:
 | 
						|
        pretrained.act_postprocess1 = nn.Sequential(nn.Identity(),
 | 
						|
                                                    nn.Identity(),
 | 
						|
                                                    nn.Identity())
 | 
						|
        pretrained.act_postprocess2 = nn.Sequential(nn.Identity(),
 | 
						|
                                                    nn.Identity(),
 | 
						|
                                                    nn.Identity())
 | 
						|
 | 
						|
    pretrained.act_postprocess3 = nn.Sequential(
 | 
						|
        readout_oper[2],
 | 
						|
        Transpose(1, 2),
 | 
						|
        nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
 | 
						|
        nn.Conv2d(
 | 
						|
            in_channels=vit_features,
 | 
						|
            out_channels=features[2],
 | 
						|
            kernel_size=1,
 | 
						|
            stride=1,
 | 
						|
            padding=0,
 | 
						|
        ),
 | 
						|
    )
 | 
						|
 | 
						|
    pretrained.act_postprocess4 = nn.Sequential(
 | 
						|
        readout_oper[3],
 | 
						|
        Transpose(1, 2),
 | 
						|
        nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
 | 
						|
        nn.Conv2d(
 | 
						|
            in_channels=vit_features,
 | 
						|
            out_channels=features[3],
 | 
						|
            kernel_size=1,
 | 
						|
            stride=1,
 | 
						|
            padding=0,
 | 
						|
        ),
 | 
						|
        nn.Conv2d(
 | 
						|
            in_channels=features[3],
 | 
						|
            out_channels=features[3],
 | 
						|
            kernel_size=3,
 | 
						|
            stride=2,
 | 
						|
            padding=1,
 | 
						|
        ),
 | 
						|
    )
 | 
						|
 | 
						|
    pretrained.model.start_index = start_index
 | 
						|
    pretrained.model.patch_size = [16, 16]
 | 
						|
 | 
						|
    # We inject this function into the VisionTransformer instances so that
 | 
						|
    # we can use it with interpolated position embeddings without modifying the library source.
 | 
						|
    pretrained.model.forward_flex = types.MethodType(forward_flex,
 | 
						|
                                                     pretrained.model)
 | 
						|
 | 
						|
    # We inject this function into the VisionTransformer instances so that
 | 
						|
    # we can use it with interpolated position embeddings without modifying the library source.
 | 
						|
    pretrained.model._resize_pos_embed = types.MethodType(
 | 
						|
        _resize_pos_embed, pretrained.model)
 | 
						|
 | 
						|
    return pretrained
 | 
						|
 | 
						|
 | 
						|
def _make_pretrained_vitb_rn50_384(pretrained,
 | 
						|
                                   use_readout='ignore',
 | 
						|
                                   hooks=None,
 | 
						|
                                   use_vit_only=False):
 | 
						|
    model = timm.create_model('vit_base_resnet50_384', pretrained=pretrained)
 | 
						|
    # model = timm.create_model('vit_base_r50_s16_384.orig_in21k_ft_in1k', pretrained=pretrained)
 | 
						|
 | 
						|
    hooks = [0, 1, 8, 11] if hooks is None else hooks
 | 
						|
    return _make_vit_b_rn50_backbone(
 | 
						|
        model,
 | 
						|
        features=[256, 512, 768, 768],
 | 
						|
        size=[384, 384],
 | 
						|
        hooks=hooks,
 | 
						|
        use_vit_only=use_vit_only,
 | 
						|
        use_readout=use_readout,
 | 
						|
    )
 |