From 6490af145a0f9cb206809b3df6f6c1a84a812a8c Mon Sep 17 00:00:00 2001 From: deepbeepmeep Date: Sat, 30 Aug 2025 16:10:19 +0200 Subject: [PATCH] fixed standin with boost off --- models/wan/modules/model.py | 4 ++-- wgp.py | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/models/wan/modules/model.py b/models/wan/modules/model.py index 95faa4d..b9c98bf 100644 --- a/models/wan/modules/model.py +++ b/models/wan/modules/model.py @@ -1306,7 +1306,7 @@ class WanModel(ModelMixin, ConfigMixin): if standin_ref is not None: standin_cache_enabled = False kwargs["standin_phase"] = 2 - if (current_step == 0 or not standin_cache_enabled) and x_id == 0: + if current_step == 0 or not standin_cache_enabled : standin_x = self.patch_embedding(standin_ref).to(modulation_dtype).flatten(2).transpose(1, 2) standin_e = self.time_embedding( sinusoidal_embedding_1d(self.freq_dim, torch.zeros_like(t)).to(modulation_dtype) ) standin_e0 = self.time_projection(standin_e).unflatten(1, (6, self.dim)).to(e.dtype) @@ -1453,7 +1453,7 @@ class WanModel(ModelMixin, ConfigMixin): return [None] * len(x_list) if standin_x is not None: - if not standin_cache_enabled and x_id ==0 : get_cache("standin").clear() + if not standin_cache_enabled: get_cache("standin").clear() standin_x = block(standin_x, context = None, grid_sizes = None, e= standin_e0, freqs = standin_freqs, standin_phase = 1) if slg_layers is not None and block_idx in slg_layers: diff --git a/wgp.py b/wgp.py index 4066595..a151252 100644 --- a/wgp.py +++ b/wgp.py @@ -1,4 +1,8 @@ import os +# # os.environ.pop("TORCH_LOGS", None) # make sure no env var is suppressing/overriding +# os.environ["TORCH_LOGS"]= "recompiles" +import torch._logging as tlog +# tlog.set_logs(recompiles=True, guards=True, graph_breaks=True) import time import sys import threading @@ -55,7 +59,7 @@ AUTOSAVE_FILENAME = "queue.zip" PROMPT_VARS_MAX = 10 target_mmgp_version = "3.5.10" -WanGP_version = "8.2" +WanGP_version = "8.21" settings_version = 2.27 max_source_video_frames = 3000 prompt_enhancer_image_caption_model, prompt_enhancer_image_caption_processor, prompt_enhancer_llm_model, prompt_enhancer_llm_tokenizer = None, None, None, None @@ -4308,10 +4312,6 @@ def generate_video( model_filename, mode, ): - # import os - # os.environ.pop("TORCH_LOGS", None) # make sure no env var is suppressing/overriding - # import torch._logging as tlog - # tlog.set_logs(recompiles=True, guards=True, graph_breaks=True)