diff --git a/gradio_server.py b/gradio_server.py
index d2dde1b..668bbcf 100644
--- a/gradio_server.py
+++ b/gradio_server.py
@@ -97,6 +97,8 @@ def process_prompt_and_add_tasks(
image_to_end,
video_to_continue,
max_frames,
+ temporal_upsampling,
+ spatial_upsampling,
RIFLEx_setting,
slg_switch,
slg_layers,
@@ -230,6 +232,8 @@ def process_prompt_and_add_tasks(
"image_to_end" : image_end,
"video_to_continue" : video_to_continue ,
"max_frames" : max_frames,
+ "temporal_upsampling" : temporal_upsampling,
+ "spatial_upsampling" : spatial_upsampling,
"RIFLEx_setting" : RIFLEx_setting,
"slg_switch" : slg_switch,
"slg_layers" : slg_layers,
@@ -852,48 +856,63 @@ model_filename = ""
# compile = "transformer"
def preprocess_loras(sd):
+ if wan_model == None:
+ return sd
+ model_filename = wan_model._model_file_name
+
first = next(iter(sd), None)
if first == None:
return sd
- if not first.startswith("lora_unet_"):
- return sd
- new_sd = {}
- print("Converting Lora Safetensors format to Lora Diffusers format")
- alphas = {}
- repl_list = ["cross_attn", "self_attn", "ffn"]
- src_list = ["_" + k + "_" for k in repl_list]
- tgt_list = ["." + k + "." for k in repl_list]
+
+ if first.startswith("lora_unet_"):
+ new_sd = {}
+ print("Converting Lora Safetensors format to Lora Diffusers format")
+ alphas = {}
+ repl_list = ["cross_attn", "self_attn", "ffn"]
+ src_list = ["_" + k + "_" for k in repl_list]
+ tgt_list = ["." + k + "." for k in repl_list]
- for k,v in sd.items():
- k = k.replace("lora_unet_blocks_","diffusion_model.blocks.")
+ for k,v in sd.items():
+ k = k.replace("lora_unet_blocks_","diffusion_model.blocks.")
- for s,t in zip(src_list, tgt_list):
- k = k.replace(s,t)
+ for s,t in zip(src_list, tgt_list):
+ k = k.replace(s,t)
- k = k.replace("lora_up","lora_B")
- k = k.replace("lora_down","lora_A")
+ k = k.replace("lora_up","lora_B")
+ k = k.replace("lora_down","lora_A")
- if "alpha" in k:
- alphas[k] = v
- else:
+ if "alpha" in k:
+ alphas[k] = v
+ else:
+ new_sd[k] = v
+
+ new_alphas = {}
+ for k,v in new_sd.items():
+ if "lora_B" in k:
+ dim = v.shape[1]
+ elif "lora_A" in k:
+ dim = v.shape[0]
+ else:
+ continue
+ alpha_key = k[:-len("lora_X.weight")] +"alpha"
+ if alpha_key in alphas:
+ scale = alphas[alpha_key] / dim
+ new_alphas[alpha_key] = scale
+ else:
+ print(f"Lora alpha'{alpha_key}' is missing")
+ new_sd.update(new_alphas)
+ sd = new_sd
+
+ if "text2video" in model_filename:
+ new_sd = {}
+ # convert loras for i2v to t2v
+ for k,v in sd.items():
+ if any(layer in k for layer in ["cross_attn.k_img", "cross_attn.v_img"]):
+ continue
new_sd[k] = v
+ sd = new_sd
- new_alphas = {}
- for k,v in new_sd.items():
- if "lora_B" in k:
- dim = v.shape[1]
- elif "lora_A" in k:
- dim = v.shape[0]
- else:
- continue
- alpha_key = k[:-len("lora_X.weight")] +"alpha"
- if alpha_key in alphas:
- scale = alphas[alpha_key] / dim
- new_alphas[alpha_key] = scale
- else:
- print(f"Lora alpha'{alpha_key}' is missing")
- new_sd.update(new_alphas)
- return new_sd
+ return sd
def download_models(transformer_filename, text_encoder_filename):
@@ -905,7 +924,7 @@ def download_models(transformer_filename, text_encoder_filename):
from huggingface_hub import hf_hub_download, snapshot_download
repoId = "DeepBeepMeep/Wan2.1"
sourceFolderList = ["xlm-roberta-large", "", ]
- fileList = [ [], ["Wan2.1_VAE_bf16.safetensors", "models_clip_open-clip-xlm-roberta-large-vit-huge-14-bf16.safetensors" ] + computeList(text_encoder_filename) + computeList(transformer_filename) ]
+ fileList = [ [], ["Wan2.1_VAE_bf16.safetensors", "models_clip_open-clip-xlm-roberta-large-vit-huge-14-bf16.safetensors", "flownet.pkl" ] + computeList(text_encoder_filename) + computeList(transformer_filename) ]
targetRoot = "ckpts/"
for sourceFolder, files in zip(sourceFolderList,fileList ):
if len(files)==0:
@@ -1094,6 +1113,7 @@ def load_models(i2v):
wan_model, pipe = load_i2v_model(model_filename, "720P" if res720P else "480P")
else:
wan_model, pipe = load_t2v_model(model_filename, "")
+ wan_model._model_file_name = model_filename
kwargs = { "extraModelsToQuantize": None}
if profile == 2 or profile == 4:
kwargs["budgets"] = { "transformer" : 100 if preload == 0 else preload, "text_encoder" : 100, "*" : 1000 }
@@ -1441,6 +1461,8 @@ def generate_video(
image_to_end,
video_to_continue,
max_frames,
+ temporal_upsampling,
+ spatial_upsampling,
RIFLEx_setting,
slg_switch,
slg_layers,
@@ -1693,6 +1715,7 @@ def generate_video(
cfg_star_switch = cfg_star_switch,
cfg_zero_step = cfg_zero_step,
)
+ # samples = torch.empty( (1,2)) #for testing
except Exception as e:
if temp_filename!= None and os.path.isfile(temp_filename):
os.remove(temp_filename)
@@ -1717,8 +1740,6 @@ def generate_video(
VRAM_crash = True
break
- _ , exc_value, exc_traceback = sys.exc_info()
-
state["prompt"] = ""
if VRAM_crash:
new_error = "The generation of the video has encountered an error: it is likely that you have unsufficient VRAM and you should therefore reduce the video resolution or its number of frames."
@@ -1759,17 +1780,61 @@ def generate_video(
file_name = f"{time_flag}_seed{seed}_{sanitize_file_name(prompt[:50]).strip()}.mp4"
else:
file_name = f"{time_flag}_seed{seed}_{sanitize_file_name(prompt[:100]).strip()}.mp4"
- video_path = os.path.join(save_path, file_name)
+ video_path = os.path.join(save_path, file_name)
+ # if False: # for testing
+ # torch.save(sample, "ouput.pt")
+ # else:
+ # sample =torch.load("ouput.pt")
+ exp = 0
+ fps = 16
+
+ if len(temporal_upsampling) > 0 or len(spatial_upsampling) > 0:
+ progress_args = [0, status + " - Upsampling"]
+ progress(*progress_args )
+ gen["progress_args"] = progress_args
+
+ if temporal_upsampling == "rife2":
+ exp = 1
+ elif temporal_upsampling == "rife4":
+ exp = 2
+
+ if exp > 0:
+ from rife.inference import temporal_interpolation
+ sample = temporal_interpolation( os.path.join("ckpts", "flownet.pkl"), sample, exp, device="cuda")
+ fps = fps * 2**exp
+
+ if len(spatial_upsampling) > 0:
+ from wan.utils.utils import resize_lanczos
+ if spatial_upsampling == "lanczos1.5":
+ scale = 1.5
+ else:
+ scale = 2
+ sample = (sample + 1) / 2
+ h, w = sample.shape[-2:]
+ h *= scale
+ w *= scale
+ new_frames =[]
+ for i in range( sample.shape[1] ):
+ frame = sample[:, i]
+ frame = resize_lanczos(frame, h, w)
+ frame = frame.unsqueeze(1)
+ new_frames.append(frame)
+ sample = torch.cat(new_frames, dim=1)
+ new_frames = None
+ sample = sample * 2 - 1
+
+
cache_video(
tensor=sample[None],
save_file=video_path,
- fps=16,
+ fps=fps,
nrow=1,
normalize=True,
value_range=(-1, 1))
+
configs = get_settings_dict(state, image2video, prompt, 0 if image_to_end == None else 1 , video_length, resolution, num_inference_steps, seed, repeat_generation, multi_images_gen_type, guidance_scale, flow_shift, negative_prompt, loras_choices,
- loras_mult_choices, tea_cache , tea_cache_start_step_perc, RIFLEx_setting, slg_switch, slg_layers, slg_start, slg_end, cfg_star_switch, cfg_zero_step)
+ loras_mult_choices, tea_cache , tea_cache_start_step_perc, temporal_upsampling, spatial_upsampling, RIFLEx_setting, slg_switch, slg_layers, slg_start, slg_end, cfg_star_switch, cfg_zero_step)
metadata_choice = server_config.get("metadata_choice","metadata")
if metadata_choice == "json":
@@ -2231,7 +2296,7 @@ def switch_advanced(state, new_advanced, lset_name):
def get_settings_dict(state, i2v, prompt, image_prompt_type, video_length, resolution, num_inference_steps, seed, repeat_generation, multi_images_gen_type, guidance_scale, flow_shift, negative_prompt, loras_choices,
- loras_mult_choices, tea_cache_setting, tea_cache_start_step_perc, RIFLEx_setting, slg_switch, slg_layers, slg_start_perc, slg_end_perc, cfg_star_switch, cfg_zero_step):
+ loras_mult_choices, tea_cache_setting, tea_cache_start_step_perc, temporal_upsampling, spatial_upsampling, RIFLEx_setting, slg_switch, slg_layers, slg_start_perc, slg_end_perc, cfg_star_switch, cfg_zero_step):
loras = state["loras"]
activated_loras = [Path( loras[int(no)]).parts[-1] for no in loras_choices ]
@@ -2251,6 +2316,8 @@ def get_settings_dict(state, i2v, prompt, image_prompt_type, video_length, resol
"loras_multipliers": loras_mult_choices,
"tea_cache": tea_cache_setting,
"tea_cache_start_step_perc": tea_cache_start_step_perc,
+ "temporal_upsampling" : temporal_upsampling,
+ "spatial_upsampling" : spatial_upsampling,
"RIFLEx_setting": RIFLEx_setting,
"slg_switch": slg_switch,
"slg_layers": slg_layers,
@@ -2269,14 +2336,14 @@ def get_settings_dict(state, i2v, prompt, image_prompt_type, video_length, resol
return ui_settings
def save_settings(state, prompt, image_prompt_type, video_length, resolution, num_inference_steps, seed, repeat_generation, multi_images_gen_type, guidance_scale, flow_shift, negative_prompt, loras_choices,
- loras_mult_choices, tea_cache_setting, tea_cache_start_step_perc, RIFLEx_setting, slg_switch, slg_layers, slg_start_perc, slg_end_perc, cfg_star_switch, cfg_zero_step):
+ loras_mult_choices, tea_cache_setting, tea_cache_start_step_perc, temporal_upsampling, spatial_upsampling, RIFLEx_setting, slg_switch, slg_layers, slg_start_perc, slg_end_perc, cfg_star_switch, cfg_zero_step):
if state.get("validate_success",0) != 1:
return
image2video = state["image2video"]
ui_defaults = get_settings_dict(state, image2video, prompt, image_prompt_type, video_length, resolution, num_inference_steps, seed, repeat_generation, multi_images_gen_type, guidance_scale, flow_shift, negative_prompt, loras_choices,
- loras_mult_choices, tea_cache_setting, tea_cache_start_step_perc, RIFLEx_setting, slg_switch, slg_layers, slg_start_perc, slg_end_perc, cfg_star_switch, cfg_zero_step)
+ loras_mult_choices, tea_cache_setting, tea_cache_start_step_perc, temporal_upsampling, spatial_upsampling, RIFLEx_setting, slg_switch, slg_layers, slg_start_perc, slg_end_perc, cfg_star_switch, cfg_zero_step)
defaults_filename = get_settings_file_name(image2video)
@@ -2538,6 +2605,32 @@ def generate_video_tab(image2video=False):
)
tea_cache_start_step_perc = gr.Slider(0, 100, value=ui_defaults["tea_cache_start_step_perc"], step=1, label="Tea Cache starting moment in % of generation")
+ with gr.Row():
+ gr.Markdown("Upsampling")
+ with gr.Row():
+ temporal_upsampling_choice = gr.Dropdown(
+ choices=[
+ ("Disabled", ""),
+ ("Rife x2 (32 frames/s)", "rife2"),
+ ("Rife x4 (64 frames/s)", "rife4"),
+ ],
+ value=ui_defaults.get("temporal_upsampling", ""),
+ visible=True,
+ scale = 1,
+ label="Temporal Upsampling"
+ )
+ spatial_upsampling_choice = gr.Dropdown(
+ choices=[
+ ("Disabled", ""),
+ ("Lanczos x1.5", "lanczos1.5"),
+ ("Lanczos x2.0", "lanczos2"),
+ ],
+ value=ui_defaults.get("spatial_upsampling", ""),
+ visible=True,
+ scale = 1,
+ label="Spatial Upsampling"
+ )
+
gr.Markdown("With Riflex you can generate videos longer than 5s which is the default duration of videos used to train the model")
RIFLEx_setting = gr.Dropdown(
choices=[
@@ -2699,7 +2792,7 @@ def generate_video_tab(image2video=False):
)
save_settings_btn.click( fn=validate_wizard_prompt, inputs =[state, wizard_prompt_activated_var, wizard_variables_var, prompt, wizard_prompt, *prompt_vars] , outputs= [prompt]).then(
save_settings, inputs = [state, prompt, image_prompt_type_radio, video_length, resolution, num_inference_steps, seed, repeat_generation, multi_images_gen_type, guidance_scale, flow_shift, negative_prompt,
- loras_choices, loras_mult_choices, tea_cache_setting, tea_cache_start_step_perc, RIFLEx_setting, slg_switch, slg_layers,
+ loras_choices, loras_mult_choices, tea_cache_setting, tea_cache_start_step_perc, temporal_upsampling_choice, spatial_upsampling_choice, RIFLEx_setting, slg_switch, slg_layers,
slg_start_perc, slg_end_perc, cfg_star_switch, cfg_zero_step ], outputs = [])
save_lset_btn.click(validate_save_lset, inputs=[lset_name], outputs=[apply_lset_btn, refresh_lora_btn, delete_lset_btn, save_lset_btn,confirm_save_lset_btn, cancel_lset_btn, save_lset_prompt_drop])
confirm_save_lset_btn.click(fn=validate_wizard_prompt, inputs =[state, wizard_prompt_activated_var, wizard_variables_var, prompt, wizard_prompt, *prompt_vars] , outputs= [prompt]).then(
@@ -2758,6 +2851,8 @@ def generate_video_tab(image2video=False):
image_to_end,
video_to_continue,
max_frames,
+ temporal_upsampling_choice,
+ spatial_upsampling_choice,
RIFLEx_setting,
slg_switch,
slg_layers,
diff --git a/rife/IFNet_HDv3.py b/rife/IFNet_HDv3.py
new file mode 100644
index 0000000..53e512b
--- /dev/null
+++ b/rife/IFNet_HDv3.py
@@ -0,0 +1,133 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+# from ..model.warplayer import warp
+
+# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
+backwarp_tenGrid = {}
+
+def warp(tenInput, tenFlow, device):
+ k = (str(tenFlow.device), str(tenFlow.size()))
+ if k not in backwarp_tenGrid:
+ tenHorizontal = torch.linspace(-1.0, 1.0, tenFlow.shape[3], device=device).view(
+ 1, 1, 1, tenFlow.shape[3]).expand(tenFlow.shape[0], -1, tenFlow.shape[2], -1)
+ tenVertical = torch.linspace(-1.0, 1.0, tenFlow.shape[2], device=device).view(
+ 1, 1, tenFlow.shape[2], 1).expand(tenFlow.shape[0], -1, -1, tenFlow.shape[3])
+ backwarp_tenGrid[k] = torch.cat(
+ [tenHorizontal, tenVertical], 1).to(device)
+
+ tenFlow = torch.cat([tenFlow[:, 0:1, :, :] / ((tenInput.shape[3] - 1.0) / 2.0),
+ tenFlow[:, 1:2, :, :] / ((tenInput.shape[2] - 1.0) / 2.0)], 1)
+
+ g = (backwarp_tenGrid[k] + tenFlow).permute(0, 2, 3, 1)
+ return torch.nn.functional.grid_sample(input=tenInput, grid=g, mode='bilinear', padding_mode='border', align_corners=True)
+
+def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
+ return nn.Sequential(
+ nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
+ padding=padding, dilation=dilation, bias=True),
+ nn.PReLU(out_planes)
+ )
+
+def conv_bn(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
+ return nn.Sequential(
+ nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
+ padding=padding, dilation=dilation, bias=False),
+ nn.BatchNorm2d(out_planes),
+ nn.PReLU(out_planes)
+ )
+
+class IFBlock(nn.Module):
+ def __init__(self, in_planes, c=64):
+ super(IFBlock, self).__init__()
+ self.conv0 = nn.Sequential(
+ conv(in_planes, c//2, 3, 2, 1),
+ conv(c//2, c, 3, 2, 1),
+ )
+ self.convblock0 = nn.Sequential(
+ conv(c, c),
+ conv(c, c)
+ )
+ self.convblock1 = nn.Sequential(
+ conv(c, c),
+ conv(c, c)
+ )
+ self.convblock2 = nn.Sequential(
+ conv(c, c),
+ conv(c, c)
+ )
+ self.convblock3 = nn.Sequential(
+ conv(c, c),
+ conv(c, c)
+ )
+ self.conv1 = nn.Sequential(
+ nn.ConvTranspose2d(c, c//2, 4, 2, 1),
+ nn.PReLU(c//2),
+ nn.ConvTranspose2d(c//2, 4, 4, 2, 1),
+ )
+ self.conv2 = nn.Sequential(
+ nn.ConvTranspose2d(c, c//2, 4, 2, 1),
+ nn.PReLU(c//2),
+ nn.ConvTranspose2d(c//2, 1, 4, 2, 1),
+ )
+
+ def forward(self, x, flow, scale=1):
+ x = F.interpolate(x, scale_factor= 1. / scale, mode="bilinear", align_corners=False, recompute_scale_factor=False)
+ flow = F.interpolate(flow, scale_factor= 1. / scale, mode="bilinear", align_corners=False, recompute_scale_factor=False) * 1. / scale
+ feat = self.conv0(torch.cat((x, flow), 1))
+ feat = self.convblock0(feat) + feat
+ feat = self.convblock1(feat) + feat
+ feat = self.convblock2(feat) + feat
+ feat = self.convblock3(feat) + feat
+ flow = self.conv1(feat)
+ mask = self.conv2(feat)
+ flow = F.interpolate(flow, scale_factor=scale, mode="bilinear", align_corners=False, recompute_scale_factor=False) * scale
+ mask = F.interpolate(mask, scale_factor=scale, mode="bilinear", align_corners=False, recompute_scale_factor=False)
+ return flow, mask
+
+class IFNet(nn.Module):
+ def __init__(self):
+ super(IFNet, self).__init__()
+ self.block0 = IFBlock(7+4, c=90)
+ self.block1 = IFBlock(7+4, c=90)
+ self.block2 = IFBlock(7+4, c=90)
+ self.block_tea = IFBlock(10+4, c=90)
+ # self.contextnet = Contextnet()
+ # self.unet = Unet()
+
+ def forward(self, x, scale_list=[4, 2, 1], training=False):
+ if training == False:
+ channel = x.shape[1] // 2
+ img0 = x[:, :channel]
+ img1 = x[:, channel:]
+ flow_list = []
+ merged = []
+ mask_list = []
+ warped_img0 = img0
+ warped_img1 = img1
+ flow = (x[:, :4]).detach() * 0
+ mask = (x[:, :1]).detach() * 0
+ loss_cons = 0
+ block = [self.block0, self.block1, self.block2]
+ for i in range(3):
+ f0, m0 = block[i](torch.cat((warped_img0[:, :3], warped_img1[:, :3], mask), 1), flow, scale=scale_list[i])
+ f1, m1 = block[i](torch.cat((warped_img1[:, :3], warped_img0[:, :3], -mask), 1), torch.cat((flow[:, 2:4], flow[:, :2]), 1), scale=scale_list[i])
+ flow = flow + (f0 + torch.cat((f1[:, 2:4], f1[:, :2]), 1)) / 2
+ mask = mask + (m0 + (-m1)) / 2
+ mask_list.append(mask)
+ flow_list.append(flow)
+ warped_img0 = warp(img0, flow[:, :2], device= flow.device)
+ warped_img1 = warp(img1, flow[:, 2:4], device= flow.device)
+ merged.append((warped_img0, warped_img1))
+ '''
+ c0 = self.contextnet(img0, flow[:, :2])
+ c1 = self.contextnet(img1, flow[:, 2:4])
+ tmp = self.unet(img0, img1, warped_img0, warped_img1, mask, flow, c0, c1)
+ res = tmp[:, 1:4] * 2 - 1
+ '''
+ for i in range(3):
+ mask_list[i] = torch.sigmoid(mask_list[i])
+ merged[i] = merged[i][0] * mask_list[i] + merged[i][1] * (1 - mask_list[i])
+ # merged[i] = torch.clamp(merged[i] + res, 0, 1)
+ return flow_list, mask_list[2], merged
diff --git a/rife/RIFE_HDv3.py b/rife/RIFE_HDv3.py
new file mode 100644
index 0000000..75c672d
--- /dev/null
+++ b/rife/RIFE_HDv3.py
@@ -0,0 +1,84 @@
+import torch
+import torch.nn as nn
+import numpy as np
+from torch.optim import AdamW
+import torch.optim as optim
+import itertools
+from torch.nn.parallel import DistributedDataParallel as DDP
+from .IFNet_HDv3 import *
+import torch.nn.functional as F
+# from ..model.loss import *
+
+# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
+class Model:
+ def __init__(self, local_rank=-1):
+ self.flownet = IFNet()
+ # self.device()
+ # self.optimG = AdamW(self.flownet.parameters(), lr=1e-6, weight_decay=1e-4)
+ # self.epe = EPE()
+ # self.vgg = VGGPerceptualLoss().to(device)
+ # self.sobel = SOBEL()
+ if local_rank != -1:
+ self.flownet = DDP(self.flownet, device_ids=[local_rank], output_device=local_rank)
+
+ def train(self):
+ self.flownet.train()
+
+ def eval(self):
+ self.flownet.eval()
+
+ def to(self, device):
+ self.flownet.to(device)
+
+ def load_model(self, path, rank=0, device = "cuda"):
+ self.device = device
+ def convert(param):
+ if rank == -1:
+ return {
+ k.replace("module.", ""): v
+ for k, v in param.items()
+ if "module." in k
+ }
+ else:
+ return param
+ self.flownet.load_state_dict(convert(torch.load(path, map_location=device)))
+
+ def save_model(self, path, rank=0):
+ if rank == 0:
+ torch.save(self.flownet.state_dict(),'{}/flownet.pkl'.format(path))
+
+ def inference(self, img0, img1, scale=1.0):
+ imgs = torch.cat((img0, img1), 1)
+ scale_list = [4/scale, 2/scale, 1/scale]
+ flow, mask, merged = self.flownet(imgs, scale_list)
+ return merged[2]
+
+ def update(self, imgs, gt, learning_rate=0, mul=1, training=True, flow_gt=None):
+ for param_group in self.optimG.param_groups:
+ param_group['lr'] = learning_rate
+ img0 = imgs[:, :3]
+ img1 = imgs[:, 3:]
+ if training:
+ self.train()
+ else:
+ self.eval()
+ scale = [4, 2, 1]
+ flow, mask, merged = self.flownet(torch.cat((imgs, gt), 1), scale=scale, training=training)
+ loss_l1 = (merged[2] - gt).abs().mean()
+ loss_smooth = self.sobel(flow[2], flow[2]*0).mean()
+ # loss_vgg = self.vgg(merged[2], gt)
+ if training:
+ self.optimG.zero_grad()
+ loss_G = loss_cons + loss_smooth * 0.1
+ loss_G.backward()
+ self.optimG.step()
+ else:
+ flow_teacher = flow[2]
+ return merged[2], {
+ 'mask': mask,
+ 'flow': flow[2][:, :2],
+ 'loss_l1': loss_l1,
+ 'loss_cons': loss_cons,
+ 'loss_smooth': loss_smooth,
+ }
diff --git a/rife/inference.py b/rife/inference.py
new file mode 100644
index 0000000..24a2bdd
--- /dev/null
+++ b/rife/inference.py
@@ -0,0 +1,119 @@
+import os
+import torch
+from torch.nn import functional as F
+# from .model.pytorch_msssim import ssim_matlab
+from .ssim import ssim_matlab
+
+from .RIFE_HDv3 import Model
+
+def get_frame(frames, frame_no):
+ if frame_no >= frames.shape[1]:
+ return None
+ frame = (frames[:, frame_no] + 1) /2
+ frame = frame.clip(0., 1.)
+ return frame
+
+def add_frame(frames, frame, h, w):
+ frame = (frame * 2) - 1
+ frame = frame.clip(-1., 1.)
+ frame = frame.squeeze(0)
+ frame = frame[:, :h, :w]
+ frame = frame.unsqueeze(1)
+ frames.append(frame.cpu())
+
+def process_frames(model, device, frames, exp):
+ pos = 0
+ output_frames = []
+
+ lastframe = get_frame(frames, 0)
+ _, h, w = lastframe.shape
+ scale = 1
+ fp16 = False
+
+ def make_inference(I0, I1, n):
+ middle = model.inference(I0, I1, scale)
+ if n == 1:
+ return [middle]
+ first_half = make_inference(I0, middle, n=n//2)
+ second_half = make_inference(middle, I1, n=n//2)
+ if n%2:
+ return [*first_half, middle, *second_half]
+ else:
+ return [*first_half, *second_half]
+
+ tmp = max(32, int(32 / scale))
+ ph = ((h - 1) // tmp + 1) * tmp
+ pw = ((w - 1) // tmp + 1) * tmp
+ padding = (0, pw - w, 0, ph - h)
+
+ def pad_image(img):
+ if(fp16):
+ return F.pad(img, padding).half()
+ else:
+ return F.pad(img, padding)
+
+ I1 = lastframe.to(device, non_blocking=True).unsqueeze(0)
+ I1 = pad_image(I1)
+ temp = None # save lastframe when processing static frame
+
+ while True:
+ if temp is not None:
+ frame = temp
+ temp = None
+ else:
+ pos += 1
+ frame = get_frame(frames, pos)
+ if frame is None:
+ break
+ I0 = I1
+ I1 = frame.to(device, non_blocking=True).unsqueeze(0)
+ I1 = pad_image(I1)
+ I0_small = F.interpolate(I0, (32, 32), mode='bilinear', align_corners=False)
+ I1_small = F.interpolate(I1, (32, 32), mode='bilinear', align_corners=False)
+ ssim = ssim_matlab(I0_small[:, :3], I1_small[:, :3])
+
+ break_flag = False
+ if ssim > 0.996:
+ pos += 1
+ frame = get_frame(frames, pos)
+ if frame is None:
+ break_flag = True
+ frame = lastframe
+ else:
+ temp = frame
+ I1 = frame.to(device, non_blocking=True).unsqueeze(0)
+ I1 = pad_image(I1)
+ I1 = model.inference(I0, I1, scale)
+ I1_small = F.interpolate(I1, (32, 32), mode='bilinear', align_corners=False)
+ ssim = ssim_matlab(I0_small[:, :3], I1_small[:, :3])
+ frame = I1[0]
+
+ if ssim < 0.2:
+ output = []
+ for _ in range((2 ** exp) - 1):
+ output.append(I0)
+ else:
+ output = make_inference(I0, I1, 2**exp-1) if exp else []
+
+ add_frame(output_frames, lastframe, h, w)
+ for mid in output:
+ add_frame(output_frames, mid, h, w)
+ lastframe = frame
+ if break_flag:
+ break
+
+ add_frame(output_frames, lastframe, h, w)
+ return torch.cat( output_frames, dim=1)
+
+def temporal_interpolation(model_path, frames, exp, device ="cuda"):
+
+ model = Model()
+ model.load_model(model_path, -1, device=device)
+
+ model.eval()
+ model.to(device=device)
+
+ with torch.no_grad():
+ output = process_frames(model, device, frames, exp)
+
+ return output
diff --git a/rife/ssim.py b/rife/ssim.py
new file mode 100644
index 0000000..a4d3032
--- /dev/null
+++ b/rife/ssim.py
@@ -0,0 +1,200 @@
+import torch
+import torch.nn.functional as F
+from math import exp
+import numpy as np
+
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
+def gaussian(window_size, sigma):
+ gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
+ return gauss/gauss.sum()
+
+
+def create_window(window_size, channel=1):
+ _1D_window = gaussian(window_size, 1.5).unsqueeze(1)
+ _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0).to(device)
+ window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
+ return window
+
+def create_window_3d(window_size, channel=1):
+ _1D_window = gaussian(window_size, 1.5).unsqueeze(1)
+ _2D_window = _1D_window.mm(_1D_window.t())
+ _3D_window = _2D_window.unsqueeze(2) @ (_1D_window.t())
+ window = _3D_window.expand(1, channel, window_size, window_size, window_size).contiguous().to(device)
+ return window
+
+
+def ssim(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
+ # Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
+ if val_range is None:
+ if torch.max(img1) > 128:
+ max_val = 255
+ else:
+ max_val = 1
+
+ if torch.min(img1) < -0.5:
+ min_val = -1
+ else:
+ min_val = 0
+ L = max_val - min_val
+ else:
+ L = val_range
+
+ padd = 0
+ (_, channel, height, width) = img1.size()
+ if window is None:
+ real_size = min(window_size, height, width)
+ window = create_window(real_size, channel=channel).to(img1.device)
+
+ # mu1 = F.conv2d(img1, window, padding=padd, groups=channel)
+ # mu2 = F.conv2d(img2, window, padding=padd, groups=channel)
+ mu1 = F.conv2d(F.pad(img1, (5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=channel)
+ mu2 = F.conv2d(F.pad(img2, (5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=channel)
+
+ mu1_sq = mu1.pow(2)
+ mu2_sq = mu2.pow(2)
+ mu1_mu2 = mu1 * mu2
+
+ sigma1_sq = F.conv2d(F.pad(img1 * img1, (5, 5, 5, 5), 'replicate'), window, padding=padd, groups=channel) - mu1_sq
+ sigma2_sq = F.conv2d(F.pad(img2 * img2, (5, 5, 5, 5), 'replicate'), window, padding=padd, groups=channel) - mu2_sq
+ sigma12 = F.conv2d(F.pad(img1 * img2, (5, 5, 5, 5), 'replicate'), window, padding=padd, groups=channel) - mu1_mu2
+
+ C1 = (0.01 * L) ** 2
+ C2 = (0.03 * L) ** 2
+
+ v1 = 2.0 * sigma12 + C2
+ v2 = sigma1_sq + sigma2_sq + C2
+ cs = torch.mean(v1 / v2) # contrast sensitivity
+
+ ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
+
+ if size_average:
+ ret = ssim_map.mean()
+ else:
+ ret = ssim_map.mean(1).mean(1).mean(1)
+
+ if full:
+ return ret, cs
+ return ret
+
+
+def ssim_matlab(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
+ # Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
+ if val_range is None:
+ if torch.max(img1) > 128:
+ max_val = 255
+ else:
+ max_val = 1
+
+ if torch.min(img1) < -0.5:
+ min_val = -1
+ else:
+ min_val = 0
+ L = max_val - min_val
+ else:
+ L = val_range
+
+ padd = 0
+ (_, _, height, width) = img1.size()
+ if window is None:
+ real_size = min(window_size, height, width)
+ window = create_window_3d(real_size, channel=1).to(img1.device)
+ # Channel is set to 1 since we consider color images as volumetric images
+
+ img1 = img1.unsqueeze(1)
+ img2 = img2.unsqueeze(1)
+
+ mu1 = F.conv3d(F.pad(img1, (5, 5, 5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=1)
+ mu2 = F.conv3d(F.pad(img2, (5, 5, 5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=1)
+
+ mu1_sq = mu1.pow(2)
+ mu2_sq = mu2.pow(2)
+ mu1_mu2 = mu1 * mu2
+
+ sigma1_sq = F.conv3d(F.pad(img1 * img1, (5, 5, 5, 5, 5, 5), 'replicate'), window, padding=padd, groups=1) - mu1_sq
+ sigma2_sq = F.conv3d(F.pad(img2 * img2, (5, 5, 5, 5, 5, 5), 'replicate'), window, padding=padd, groups=1) - mu2_sq
+ sigma12 = F.conv3d(F.pad(img1 * img2, (5, 5, 5, 5, 5, 5), 'replicate'), window, padding=padd, groups=1) - mu1_mu2
+
+ C1 = (0.01 * L) ** 2
+ C2 = (0.03 * L) ** 2
+
+ v1 = 2.0 * sigma12 + C2
+ v2 = sigma1_sq + sigma2_sq + C2
+ cs = torch.mean(v1 / v2) # contrast sensitivity
+
+ ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
+
+ if size_average:
+ ret = ssim_map.mean()
+ else:
+ ret = ssim_map.mean(1).mean(1).mean(1)
+
+ if full:
+ return ret, cs
+ return ret
+
+
+def msssim(img1, img2, window_size=11, size_average=True, val_range=None, normalize=False):
+ device = img1.device
+ weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(device)
+ levels = weights.size()[0]
+ mssim = []
+ mcs = []
+ for _ in range(levels):
+ sim, cs = ssim(img1, img2, window_size=window_size, size_average=size_average, full=True, val_range=val_range)
+ mssim.append(sim)
+ mcs.append(cs)
+
+ img1 = F.avg_pool2d(img1, (2, 2))
+ img2 = F.avg_pool2d(img2, (2, 2))
+
+ mssim = torch.stack(mssim)
+ mcs = torch.stack(mcs)
+
+ # Normalize (to avoid NaNs during training unstable models, not compliant with original definition)
+ if normalize:
+ mssim = (mssim + 1) / 2
+ mcs = (mcs + 1) / 2
+
+ pow1 = mcs ** weights
+ pow2 = mssim ** weights
+ # From Matlab implementation https://ece.uwaterloo.ca/~z70wang/research/iwssim/
+ output = torch.prod(pow1[:-1] * pow2[-1])
+ return output
+
+
+# Classes to re-use window
+class SSIM(torch.nn.Module):
+ def __init__(self, window_size=11, size_average=True, val_range=None):
+ super(SSIM, self).__init__()
+ self.window_size = window_size
+ self.size_average = size_average
+ self.val_range = val_range
+
+ # Assume 3 channel for SSIM
+ self.channel = 3
+ self.window = create_window(window_size, channel=self.channel)
+
+ def forward(self, img1, img2):
+ (_, channel, _, _) = img1.size()
+
+ if channel == self.channel and self.window.dtype == img1.dtype:
+ window = self.window
+ else:
+ window = create_window(self.window_size, channel).to(img1.device).type(img1.dtype)
+ self.window = window
+ self.channel = channel
+
+ _ssim = ssim(img1, img2, window=window, window_size=self.window_size, size_average=self.size_average)
+ dssim = (1 - _ssim) / 2
+ return dssim
+
+class MSSSIM(torch.nn.Module):
+ def __init__(self, window_size=11, size_average=True, channel=3):
+ super(MSSSIM, self).__init__()
+ self.window_size = window_size
+ self.size_average = size_average
+ self.channel = channel
+
+ def forward(self, img1, img2):
+ return msssim(img1, img2, window_size=self.window_size, size_average=self.size_average)
diff --git a/wan/image2video.py b/wan/image2video.py
index a71e9d4..ed08d44 100644
--- a/wan/image2video.py
+++ b/wan/image2video.py
@@ -25,8 +25,7 @@ from .utils.fm_solvers import (FlowDPMSolverMultistepScheduler,
get_sampling_sigmas, retrieve_timesteps)
from .utils.fm_solvers_unipc import FlowUniPCMultistepScheduler
from wan.modules.posemb_layers import get_rotary_pos_embed
-
-from PIL import Image
+from wan.utils.utils import resize_lanczos
def optimized_scale(positive_flat, negative_flat):
@@ -41,10 +40,6 @@ def optimized_scale(positive_flat, negative_flat):
return st_star
-def resize_lanczos(img, h, w):
- img = Image.fromarray(np.clip(255. * img.movedim(0, -1).cpu().numpy(), 0, 255).astype(np.uint8))
- img = img.resize((w,h), resample=Image.Resampling.LANCZOS)
- return torch.from_numpy(np.array(img).astype(np.float32) / 255.0).movedim(-1, 0)
class WanI2V:
@@ -285,21 +280,6 @@ class WanI2V:
self.clip.model.cpu()
from mmgp import offload
-
-
- # img_interpolated.save('aaa.png')
-
- # img_interpolated = torch.from_numpy(np.array(img_interpolated).astype(np.float32) / 255.0).movedim(-1, 0)
-
- # img_interpolated = torch.nn.functional.interpolate(img[None].cpu(), size=(h, w), mode='lanczos')
- # img_interpolated = img_interpolated.squeeze(0).transpose(0,2).transpose(1,0)
- # img_interpolated = img_interpolated.clamp(-1, 1)
- # img_interpolated = (img_interpolated + 1)/2
- # img_interpolated = (img_interpolated*255).type(torch.uint8)
- # img_interpolated = img_interpolated.cpu().numpy()
- # xxx = Image.fromarray(img_interpolated, 'RGB')
- # xxx.save('my.png')
-
offload.last_offload_obj.unload_all()
if any_end_frame:
mean2 = 0
diff --git a/wan/utils/utils.py b/wan/utils/utils.py
index d725999..e19c298 100644
--- a/wan/utils/utils.py
+++ b/wan/utils/utils.py
@@ -7,9 +7,16 @@ import os.path as osp
import imageio
import torch
import torchvision
+from PIL import Image
+import numpy as np
__all__ = ['cache_video', 'cache_image', 'str2bool']
+def resize_lanczos(img, h, w):
+ img = Image.fromarray(np.clip(255. * img.movedim(0, -1).cpu().numpy(), 0, 255).astype(np.uint8))
+ img = img.resize((w,h), resample=Image.Resampling.LANCZOS)
+ return torch.from_numpy(np.array(img).astype(np.float32) / 255.0).movedim(-1, 0)
+
def rand_name(length=8, suffix=''):
name = binascii.b2a_hex(os.urandom(length)).decode('utf-8')