mirror of
https://github.com/Wan-Video/Wan2.1.git
synced 2025-11-04 22:26:36 +00:00
Improved Loras support
This commit is contained in:
parent
71e2e68c8e
commit
65605a778c
@ -19,6 +19,7 @@ In this repository, we present **Wan2.1**, a comprehensive and open suite of vid
|
||||
|
||||
|
||||
## 🔥 Latest News!!
|
||||
* Mar 13, 2025: 👋 Wan2.1GP v1.6: Better Loras support, accelerated loading Loras. You will need to refresh the requirements *pip install -r requirements.txt*
|
||||
* Mar 10, 2025: 👋 Wan2.1GP v1.5: Official Teachache support + Smart Teacache (find automatically best parameters for a requested speed multiplier), 10% speed boost with no quality loss, improved lora presets (they can now include prompts and comments to guide the user)
|
||||
* Mar 07, 2025: 👋 Wan2.1GP v1.4: Fix Pytorch compilation, now it is really 20% faster when activated
|
||||
* Mar 04, 2025: 👋 Wan2.1GP v1.3: Support for Image to Video with multiples images for different images / prompts combinations (requires *--multiple-images* switch), and added command line *--preload x* to preload in VRAM x MB of the main diffusion model if you find there is too much unused VRAM and you want to (slightly) accelerate the generation process.
|
||||
@ -156,7 +157,7 @@ python gradio_server.py --attention sdpa
|
||||
|
||||
Every lora stored in the subfoler 'loras' for t2v and 'loras_i2v' will be automatically loaded. You will be then able to activate / desactive any of them when running the application by selecting them in the area below "Activated Loras" .
|
||||
|
||||
For each activated Lora, you may specify a *multiplier* that is one float number that corresponds to its weight (default is 1.0) .The multipliers for each Lora shoud be separated by a space character or a carriage return. For instance:\
|
||||
For each activated Lora, you may specify a *multiplier* that is one float number that corresponds to its weight (default is 1.0) .The multipliers for each Lora should be separated by a space character or a carriage return. For instance:\
|
||||
*1.2 0.8* means that the first lora will have a 1.2 multiplier and the second one will have 0.8.
|
||||
|
||||
Alternatively for each Lora's multiplier you may specify a list of float numbers multipliers separated by a "," (no space) that gives the evolution of this Lora's multiplier over the steps. For instance let's assume there are 30 denoising steps and the multiplier is *0.9,0.8,0.7* then for the steps ranges 0-9, 10-19 and 20-29 the Lora multiplier will be respectively 0.9, 0.8 and 0.7.
|
||||
|
||||
@ -291,8 +291,8 @@ offload.default_verboseLevel = verbose_level
|
||||
|
||||
download_models(transformer_filename_i2v if use_image2video else transformer_filename_t2v, text_encoder_filename)
|
||||
|
||||
def sanitize_file_name(file_name):
|
||||
return file_name.replace("/","").replace("\\","").replace(":","").replace("|","").replace("?","").replace("<","").replace(">","").replace("\"","")
|
||||
def sanitize_file_name(file_name, rep =""):
|
||||
return file_name.replace("/",rep).replace("\\",rep).replace(":",rep).replace("|",rep).replace("?",rep).replace("<",rep).replace(">",rep).replace("\"",rep)
|
||||
|
||||
def extract_preset(lset_name, loras):
|
||||
lset_name = sanitize_file_name(lset_name)
|
||||
@ -338,34 +338,35 @@ def setup_loras(pipe, lora_dir, lora_preselected_preset, split_linear_modules_m
|
||||
default_loras_choices = []
|
||||
default_loras_multis_str = ""
|
||||
loras_presets = []
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
if lora_dir != None :
|
||||
if not os.path.isdir(lora_dir):
|
||||
raise Exception("--lora-dir should be a path to a directory that contains Loras")
|
||||
|
||||
default_lora_preset = ""
|
||||
default_prompt = ""
|
||||
if lora_dir != None:
|
||||
import glob
|
||||
dir_loras = glob.glob( os.path.join(lora_dir , "*.sft") ) + glob.glob( os.path.join(lora_dir , "*.safetensors") )
|
||||
dir_loras.sort()
|
||||
loras += [element for element in dir_loras if element not in loras ]
|
||||
|
||||
dir_presets = glob.glob( os.path.join(lora_dir , "*.lset") )
|
||||
dir_presets.sort()
|
||||
loras_presets = [ Path(Path(file_path).parts[-1]).stem for file_path in dir_presets]
|
||||
if use_image2video or not "1.3B" in transformer_filename_t2v:
|
||||
from pathlib import Path
|
||||
|
||||
if len(loras) > 0:
|
||||
loras_names = [ Path(lora).stem for lora in loras ]
|
||||
offload.load_loras_into_model(pipe["transformer"], loras, activate_all_loras=False, split_linear_modules_map = split_linear_modules_map) #lora_multiplier,
|
||||
if lora_dir != None :
|
||||
if not os.path.isdir(lora_dir):
|
||||
raise Exception("--lora-dir should be a path to a directory that contains Loras")
|
||||
|
||||
if len(lora_preselected_preset) > 0:
|
||||
if not os.path.isfile(os.path.join(lora_dir, lora_preselected_preset + ".lset")):
|
||||
raise Exception(f"Unknown preset '{lora_preselected_preset}'")
|
||||
default_lora_preset = lora_preselected_preset
|
||||
default_loras_choices, default_loras_multis_str, default_prompt, _ = extract_preset(default_lora_preset, loras)
|
||||
if lora_dir != None:
|
||||
import glob
|
||||
dir_loras = glob.glob( os.path.join(lora_dir , "*.sft") ) + glob.glob( os.path.join(lora_dir , "*.safetensors") )
|
||||
dir_loras.sort()
|
||||
loras += [element for element in dir_loras if element not in loras ]
|
||||
|
||||
dir_presets = glob.glob( os.path.join(lora_dir , "*.lset") )
|
||||
dir_presets.sort()
|
||||
loras_presets = [ Path(Path(file_path).parts[-1]).stem for file_path in dir_presets]
|
||||
|
||||
if len(loras) > 0:
|
||||
loras_names = [ Path(lora).stem for lora in loras ]
|
||||
offload.load_loras_into_model(pipe["transformer"], loras, activate_all_loras=False, split_linear_modules_map = split_linear_modules_map) #lora_multiplier,
|
||||
|
||||
if len(lora_preselected_preset) > 0:
|
||||
if not os.path.isfile(os.path.join(lora_dir, lora_preselected_preset + ".lset")):
|
||||
raise Exception(f"Unknown preset '{lora_preselected_preset}'")
|
||||
default_lora_preset = lora_preselected_preset
|
||||
default_loras_choices, default_loras_multis_str, default_prompt, _ = extract_preset(default_lora_preset, loras)
|
||||
if len(default_prompt) == 0:
|
||||
default_prompt = get_default_prompt(use_image2video)
|
||||
return loras, loras_names, default_loras_choices, default_loras_multis_str, default_prompt, default_lora_preset, loras_presets
|
||||
@ -966,9 +967,9 @@ def generate_video(
|
||||
|
||||
time_flag = datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d-%Hh%Mm%Ss")
|
||||
if os.name == 'nt':
|
||||
file_name = f"{time_flag}_seed{seed}_{prompt[:50].replace('/','').strip()}.mp4".replace(':',' ').replace('\\',' ')
|
||||
file_name = f"{time_flag}_seed{seed}_{sanitize_file_name(prompt[:50])}.mp4"
|
||||
else:
|
||||
file_name = f"{time_flag}_seed{seed}_{prompt[:100].replace('/','').strip()}.mp4".replace(':',' ').replace('\\',' ')
|
||||
file_name = f"{time_flag}_seed{seed}_{sanitize_file_name(prompt[:100])}.mp4"
|
||||
video_path = os.path.join(os.getcwd(), "gradio_outputs", file_name)
|
||||
cache_video(
|
||||
tensor=sample[None],
|
||||
|
||||
@ -16,5 +16,5 @@ gradio>=5.0.0
|
||||
numpy>=1.23.5,<2
|
||||
einops
|
||||
moviepy==1.0.3
|
||||
mmgp==3.2.3
|
||||
mmgp==3.2.5
|
||||
peft==0.14.0
|
||||
Loading…
Reference in New Issue
Block a user