fixed pytorch compilation

This commit is contained in:
DeepBeepMeep 2025-09-04 01:10:42 +02:00
parent a60eea2371
commit e5abb1b9bc
3 changed files with 8 additions and 7 deletions

View File

@ -454,7 +454,8 @@ class WanAny2V:
timesteps.append(0.) timesteps.append(0.)
timesteps = [torch.tensor([t], device=self.device) for t in timesteps] timesteps = [torch.tensor([t], device=self.device) for t in timesteps]
if self.use_timestep_transform: if self.use_timestep_transform:
timesteps = [timestep_transform(t, shift=shift, num_timesteps=self.num_timesteps) for t in timesteps][:-1] timesteps = [timestep_transform(t, shift=shift, num_timesteps=self.num_timesteps) for t in timesteps][:-1]
timesteps = torch.tensor(timesteps)
sample_scheduler = None sample_scheduler = None
elif sample_solver == 'causvid': elif sample_solver == 'causvid':
sample_scheduler = FlowMatchScheduler(num_inference_steps=sampling_steps, shift=shift, sigma_min=0, extra_one_step=True) sample_scheduler = FlowMatchScheduler(num_inference_steps=sampling_steps, shift=shift, sigma_min=0, extra_one_step=True)
@ -1016,8 +1017,8 @@ class WanAny2V:
if sample_solver == "euler": if sample_solver == "euler":
dt = timesteps[i] if i == len(timesteps)-1 else (timesteps[i] - timesteps[i + 1]) dt = timesteps[i] if i == len(timesteps)-1 else (timesteps[i] - timesteps[i + 1])
dt = dt / self.num_timesteps dt = dt.item() / self.num_timesteps
latents = latents - noise_pred * dt[:, None, None, None, None] latents = latents - noise_pred * dt
else: else:
latents = sample_scheduler.step( latents = sample_scheduler.step(
noise_pred[:, :, :target_shape[1]], noise_pred[:, :, :target_shape[1]],

View File

@ -44,7 +44,7 @@ pydantic==2.10.6
# Math & modeling # Math & modeling
torchdiffeq>=0.2.5 torchdiffeq>=0.2.5
tensordict>=0.6.1 tensordict>=0.6.1
mmgp==3.5.12 mmgp==3.6.0
peft==0.15.0 peft==0.15.0
matplotlib matplotlib

6
wgp.py
View File

@ -59,8 +59,8 @@ global_queue_ref = []
AUTOSAVE_FILENAME = "queue.zip" AUTOSAVE_FILENAME = "queue.zip"
PROMPT_VARS_MAX = 10 PROMPT_VARS_MAX = 10
target_mmgp_version = "3.5.12" target_mmgp_version = "3.6.0"
WanGP_version = "8.33" WanGP_version = "8.34"
settings_version = 2.29 settings_version = 2.29
max_source_video_frames = 3000 max_source_video_frames = 3000
prompt_enhancer_image_caption_model, prompt_enhancer_image_caption_processor, prompt_enhancer_llm_model, prompt_enhancer_llm_tokenizer = None, None, None, None prompt_enhancer_image_caption_model, prompt_enhancer_image_caption_processor, prompt_enhancer_llm_model, prompt_enhancer_llm_tokenizer = None, None, None, None
@ -8509,7 +8509,7 @@ def generate_configuration_tab(state, blocks, header, model_family, model_choice
("Off", "" ), ("Off", "" ),
], ],
value= compile, value= compile,
label="Compile Transformer (up to 50% faster and 30% more frames but requires Linux / WSL and Flash or Sage attention)", label="Compile Transformer : up to 10-20% faster, useful only if multiple gens at same frames no / resolution",
interactive= not lock_ui_compile interactive= not lock_ui_compile
) )