qwen lighning fix

This commit is contained in:
deepbeepmeep 2025-08-13 00:16:07 +02:00
parent cc744cc5c3
commit d480d48d5c
5 changed files with 16 additions and 6 deletions

View File

@ -21,7 +21,7 @@ WanGP supports the Wan (and derived models), Hunyuan Video and LTV Video models
## 🔥 Latest Updates :
### August 11 2025: WanGP v7.777 - Lucky Day
### August 12 2025: WanGP v7.7777 - Lucky Day(s)
This is your lucky day ! thanks to new configuration options that will let you store generated Videos and Images in lossless compressed formats, you will find they in fact they look two times better without doing anything !
@ -35,7 +35,11 @@ Generation Settings are stored in each of the above regardless of the format (th
Also you can now choose different output directories for images and videos.
unexpected luck: fixed lightning 8 steps for Qwen, and lightning 4 steps for Wan 2.2, now you just need 1x multiplier no weird numbers.
* update 7.777 : oops got a crash a with FastWan ? Luck comes and goes, try a new update, maybe you will have a better chance this time *
*update 7.777 : oops got a crash a with FastWan ? Luck comes and goes, try a new update, maybe you will have a better chance this time*
*update 7.7777 : Sometime good luck seems to last forever. For instance what if Qwen Lightning 4 steps could also work with WanGP ?*
- https://huggingface.co/lightx2v/Qwen-Image-Lightning/resolve/main/Qwen-Image-Lightning-4steps-V1.0-bf16.safetensors (Qwen Lightning 4 steps)
- https://huggingface.co/lightx2v/Qwen-Image-Lightning/resolve/main/Qwen-Image-Lightning-8steps-V1.1-bf16.safetensors (new improved version of Qwen Lightning 8 steps)
### August 10 2025: WanGP v7.76 - Faster than the VAE ...
We have a funny one here today: FastWan 2.2 5B, the Fastest Video Generator, only 20s to generate 121 frames at 720p. The snag is that VAE is twice as slow...

View File

@ -71,7 +71,7 @@ class model_factory():
input_ref_images = None,
width= 832,
height=480,
embedded_guidance_scale: float = 4,
guide_scale: float = 4,
fit_into_canvas = None,
callback = None,
loras_slists = None,
@ -148,7 +148,7 @@ class model_factory():
height=height,
num_inference_steps=sampling_steps,
num_images_per_prompt = batch_size,
true_cfg_scale=embedded_guidance_scale,
true_cfg_scale=guide_scale,
callback = callback,
pipeline=self,
loras_slists=loras_slists,

View File

@ -473,6 +473,13 @@ class QwenImageTransformer2DModel(nn.Module):
first = next(iter(sd), None)
if first == None:
return sd
new_sd = {}
for k,v in sd.items():
k = k.replace(".lora.", ".lora_")
new_sd[k] = v
sd = new_sd
if first.startswith("transformer_blocks"):
new_sd = {}
for k,v in sd.items():

View File

@ -7,7 +7,6 @@ import subprocess
import torchvision.transforms as transforms
import torch.nn.functional as F
import torch.nn as nn
from shared.utils.utils import cache_image, cache_video, str2bool
# from shared.utils.multitalk_utils import save_video_ffmpeg
# from .kokoro import KPipeline
from transformers import Wav2Vec2FeatureExtractor

2
wgp.py
View File

@ -55,7 +55,7 @@ AUTOSAVE_FILENAME = "queue.zip"
PROMPT_VARS_MAX = 10
target_mmgp_version = "3.5.10"
WanGP_version = "7.777"
WanGP_version = "7.7777"
settings_version = 2.23
max_source_video_frames = 3000
prompt_enhancer_image_caption_model, prompt_enhancer_image_caption_processor, prompt_enhancer_llm_model, prompt_enhancer_llm_tokenizer = None, None, None, None