mirror of
				https://github.com/Wan-Video/Wan2.1.git
				synced 2025-11-04 06:15:17 +00:00 
			
		
		
		
	fix messed up
This commit is contained in:
		
							parent
							
								
									332ca7af0e
								
							
						
					
					
						commit
						58c1549962
					
				@ -20,10 +20,12 @@ WanGP supports the Wan (and derived models), Hunyuan Video and LTV Video models
 | 
			
		||||
**Follow DeepBeepMeep on Twitter/X to get the Latest News**: https://x.com/deepbeepmeep
 | 
			
		||||
 | 
			
		||||
## 🔥 Latest Updates : 
 | 
			
		||||
### August 10 2025: WanGP v7.75 - Faster than the VAE ...
 | 
			
		||||
### August 10 2025: WanGP v7.76 - Faster than the VAE ...
 | 
			
		||||
We have a funny one here today: FastWan 2.2 5B, the Fastest Video Generator, only 20s to generate 121 frames at 720p. The snag is that VAE is twice as slow... 
 | 
			
		||||
Thanks to Kijai for extracting the Lora that is used to build the corresponding finetune.
 | 
			
		||||
 | 
			
		||||
*WanGP 7.76: fixed the messed up I did to i2v models (loras path was wrong for Wan2.2 and Clip broken)*
 | 
			
		||||
 | 
			
		||||
### August 9 2025: WanGP v7.74 - Qwen Rebirth part 2
 | 
			
		||||
Added support for Qwen Lightning lora for a 8 steps generation (https://huggingface.co/lightx2v/Qwen-Image-Lightning/blob/main/Qwen-Image-Lightning-8steps-V1.0.safetensors). Lora is not normalized and you can use a multiplier around 0.1.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -97,7 +97,7 @@ class WanAny2V:
 | 
			
		||||
                device=self.device,
 | 
			
		||||
                checkpoint_path=os.path.join(checkpoint_dir , 
 | 
			
		||||
                                            config.clip_checkpoint),
 | 
			
		||||
                tokenizer_path=os.path.join(checkpoint_dir ,  "clip_vit_large_patch14"))
 | 
			
		||||
                tokenizer_path=os.path.join(checkpoint_dir , "xlm-roberta-large"))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        if base_model_type in ["ti2v_2_2"]:
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										4
									
								
								wgp.py
									
									
									
									
									
								
							
							
						
						
									
										4
									
								
								wgp.py
									
									
									
									
									
								
							@ -53,7 +53,7 @@ AUTOSAVE_FILENAME = "queue.zip"
 | 
			
		||||
PROMPT_VARS_MAX = 10
 | 
			
		||||
 | 
			
		||||
target_mmgp_version = "3.5.8"
 | 
			
		||||
WanGP_version = "7.75"
 | 
			
		||||
WanGP_version = "7.76"
 | 
			
		||||
settings_version = 2.23
 | 
			
		||||
max_source_video_frames = 3000
 | 
			
		||||
prompt_enhancer_image_caption_model, prompt_enhancer_image_caption_processor, prompt_enhancer_llm_model, prompt_enhancer_llm_tokenizer = None, None, None, None
 | 
			
		||||
@ -1620,7 +1620,7 @@ def _parse_args():
 | 
			
		||||
def get_lora_dir(model_type):
 | 
			
		||||
    model_family = get_model_family(model_type)
 | 
			
		||||
    base_model_type = get_base_model_type(model_type)
 | 
			
		||||
    i2v = test_class_i2v(model_type) or  base_model_type == "i2v_2_2"
 | 
			
		||||
    i2v = test_class_i2v(model_type) and not  base_model_type in ["i2v_2_2", "i2v_2_2_multitalk"]
 | 
			
		||||
    if model_family == "wan":
 | 
			
		||||
        lora_dir =args.lora_dir
 | 
			
		||||
        if i2v and len(lora_dir)==0:
 | 
			
		||||
 | 
			
		||||
		Loading…
	
		Reference in New Issue
	
	Block a user