"
if "image" in model_filename:
model_name = "Wan2.1 image2video"
@@ -508,7 +582,8 @@ def generate_header(model_filename, compile, attention_mode):
if compile:
header += ", pytorch compilation ON"
- header += ") -----------------
"
+ header += ")
"
+
return header
@@ -591,19 +666,19 @@ def apply_changes( state,
# return "
New Config file created. Please restart the Gradio Server
"
-def update_defaults(state, num_inference_steps,flow_shift):
+def update_defaults(state, num_inference_steps,flow_shift, lset_name , loras_choices):
if "config_changes" not in state:
return get_default_flow("")
changes = state["config_changes"]
server_config = state["config_new"]
old_server_config = state["config_old"]
-
+ t2v_changed = False
if not use_image2video:
old_is_14B = "14B" in server_config["transformer_filename"]
new_is_14B = "14B" in old_server_config["transformer_filename"]
trans_file = server_config["transformer_filename"]
- # if old_is_14B != new_is_14B:
+ t2v_changed = old_is_14B != new_is_14B
# num_inference_steps, flow_shift = get_default_flow(trans_file)
else:
old_is_720P = "720P" in server_config["transformer_filename_i2v"]
@@ -615,9 +690,11 @@ def update_defaults(state, num_inference_steps,flow_shift):
header = generate_header(trans_file, server_config["compile"], server_config["attention_mode"] )
new_loras_choices = [ (loras_name, str(i)) for i,loras_name in enumerate(loras_names)]
lset_choices = [ (preset, preset) for preset in loras_presets]
- lset_choices.append( (new_preset_msg, ""))
-
- return num_inference_steps, flow_shift, header, gr.Dropdown(choices=lset_choices, value= ""), gr.Dropdown(choices=new_loras_choices, value= [])
+ lset_choices.append( (get_new_preset_msg(advanced), ""))
+ if t2v_changed:
+ return num_inference_steps, flow_shift, header, gr.Dropdown(choices=lset_choices, value= ""), gr.Dropdown(choices=new_loras_choices, value= [])
+ else:
+ return num_inference_steps, flow_shift, header, lset_name , loras_choices
from moviepy.editor import ImageSequenceClip
@@ -661,9 +738,20 @@ def abort_generation(state):
else:
return gr.Button(interactive= True)
-def refresh_gallery(state):
+def refresh_gallery(state, txt):
file_list = state.get("file_list", None)
- return file_list
+ prompt = state.get("prompt", "")
+ if len(prompt) == 0:
+ return file_list, gr.Text(visible= False, value="")
+ else:
+ prompts_max = state.get("prompts_max",0)
+ prompt_no = state.get("prompt_no",0)
+ if prompts_max >1 :
+ label = f"Current Prompt ({prompt_no+1}/{prompts_max})"
+ else:
+ label = f"Current Prompt"
+ return file_list, gr.Text(visible= True, value=prompt, label=label)
+
def finalize_gallery(state):
choice = 0
@@ -675,7 +763,7 @@ def finalize_gallery(state):
time.sleep(0.2)
global gen_in_progress
gen_in_progress = False
- return gr.Gallery(selected_index=choice), gr.Button(interactive= True), gr.Button(visible= True), gr.Checkbox(visible= False)
+ return gr.Gallery(selected_index=choice), gr.Button(interactive= True), gr.Button(visible= True), gr.Checkbox(visible= False), gr.Text(visible= False, value="")
def select_video(state , event_data: gr.EventData):
data= event_data._data
@@ -697,7 +785,9 @@ def one_more_video(state):
extra_orders = state.get("extra_orders", 0)
extra_orders += 1
state["extra_orders"] = extra_orders
- prompts_max = state["prompts_max"]
+ prompts_max = state.get("prompts_max",0)
+ if prompts_max == 0:
+ return state
prompt_no = state["prompt_no"]
video_no = state["video_no"]
total_video = state["total_video"]
@@ -730,6 +820,7 @@ def generate_video(
flow_shift,
embedded_guidance_scale,
repeat_generation,
+ multi_images_gen_type,
tea_cache,
tea_cache_start_step_perc,
loras_choices,
@@ -759,8 +850,11 @@ def generate_video(
elif attention_mode in attention_modes_supported:
attn = attention_mode
else:
- raise gr.Error(f"You have selected attention mode '{attention_mode}'. However it is not installed on your system. You should either install it or switch to the default 'sdpa' attention.")
+ gr.Info(f"You have selected attention mode '{attention_mode}'. However it is not installed on your system. You should either install it or switch to the default 'sdpa' attention.")
+ return
+ if state.get("validate_success",0) != 1:
+ return
width, height = resolution.split("x")
width, height = int(width), int(height)
@@ -768,17 +862,18 @@ def generate_video(
slg_layers = None
if use_image2video:
if "480p" in transformer_filename_i2v and width * height > 848*480:
- raise gr.Error("You must use the 720P image to video model to generate videos with a resolution equivalent to 720P")
+ gr.Info("You must use the 720P image to video model to generate videos with a resolution equivalent to 720P")
+ return
resolution = str(width) + "*" + str(height)
if resolution not in ['720*1280', '1280*720', '480*832', '832*480']:
- raise gr.Error(f"Resolution {resolution} not supported by image 2 video")
-
+ gr.Info(f"Resolution {resolution} not supported by image 2 video")
+ return
else:
if "1.3B" in transformer_filename_t2v and width * height > 848*480:
- raise gr.Error("You must use the 14B text to video model to generate videos with a resolution equivalent to 720P")
-
+ gr.Info("You must use the 14B text to video model to generate videos with a resolution equivalent to 720P")
+ return
offload.shared_state["_attention"] = attn
@@ -808,6 +903,9 @@ def generate_video(
temp_filename = None
if len(prompt) ==0:
return
+ prompt, errors = prompt_parser.process_template(prompt)
+ if len(errors) > 0:
+ gr.Info(f"Error processing prompt template: " + errors)
prompts = prompt.replace("\r", "").split("\n")
prompts = [prompt.strip() for prompt in prompts if len(prompt.strip())>0 and not prompt.startswith("#")]
if len(prompts) ==0:
@@ -818,22 +916,31 @@ def generate_video(
image_to_continue = [ tup[0] for tup in image_to_continue ]
else:
image_to_continue = [image_to_continue]
- if len(prompts) >= len(image_to_continue):
- if len(prompts) % len(image_to_continue) !=0:
- raise gr.Error("If there are more text prompts than input images the number of text prompts should be dividable by the number of images")
- rep = len(prompts) // len(image_to_continue)
- new_image_to_continue = []
- for i, _ in enumerate(prompts):
- new_image_to_continue.append(image_to_continue[i//rep] )
- image_to_continue = new_image_to_continue
- else:
- if len(image_to_continue) % len(prompts) !=0:
- raise gr.Error("If there are more input images than text prompts the number of images should be dividable by the number of text prompts")
- rep = len(image_to_continue) // len(prompts)
+ if multi_images_gen_type == 0:
new_prompts = []
- for i, _ in enumerate(image_to_continue):
- new_prompts.append( prompts[ i//rep] )
+ new_image_to_continue = []
+ for i in range(len(prompts) * len(image_to_continue) ):
+ new_prompts.append( prompts[ i % len(prompts)] )
+ new_image_to_continue.append(image_to_continue[i // len(prompts)] )
prompts = new_prompts
+ image_to_continue = new_image_to_continue
+ else:
+ if len(prompts) >= len(image_to_continue):
+ if len(prompts) % len(image_to_continue) !=0:
+ raise gr.Error("If there are more text prompts than input images the number of text prompts should be dividable by the number of images")
+ rep = len(prompts) // len(image_to_continue)
+ new_image_to_continue = []
+ for i, _ in enumerate(prompts):
+ new_image_to_continue.append(image_to_continue[i//rep] )
+ image_to_continue = new_image_to_continue
+ else:
+ if len(image_to_continue) % len(prompts) !=0:
+ raise gr.Error("If there are more input images than text prompts the number of images should be dividable by the number of text prompts")
+ rep = len(image_to_continue) // len(prompts)
+ new_prompts = []
+ for i, _ in enumerate(image_to_continue):
+ new_prompts.append( prompts[ i//rep] )
+ prompts = new_prompts
elif video_to_continue != None and len(video_to_continue) >0 :
input_image_or_video_path = video_to_continue
@@ -900,6 +1007,10 @@ def generate_video(
# TeaCache
trans.enable_teacache = tea_cache > 0
if trans.enable_teacache:
+ trans.teacache_multiplier = tea_cache
+ trans.rel_l1_thresh = 0
+ trans.teacache_start_step = int(tea_cache_start_step_perc*num_inference_steps/100)
+
if use_image2video:
if '480p' in transformer_filename_i2v:
# teacache_thresholds = [0.13, .19, 0.26]
@@ -935,9 +1046,11 @@ def generate_video(
start_time = time.time()
state["prompts_max"] = len(prompts)
for no, prompt in enumerate(prompts):
+ state["prompt"] = prompt
repeat_no = 0
state["prompt_no"] = no
extra_generation = 0
+ yield f"Prompt No{no}"
while True:
extra_orders = state.get("extra_orders",0)
state["extra_orders"] = 0
@@ -950,8 +1063,6 @@ def generate_video(
if trans.enable_teacache:
trans.teacache_counter = 0
- trans.teacache_multiplier = tea_cache
- trans.teacache_start_step = int(tea_cache_start_step_perc*num_inference_steps/100)
trans.num_steps = num_inference_steps
trans.teacache_skipped_steps = 0
trans.previous_residual_uncond = None
@@ -1035,6 +1146,7 @@ def generate_video(
if any( keyword in frame.name for keyword in keyword_list):
VRAM_crash = True
break
+ state["prompt"] = ""
if VRAM_crash:
raise gr.Error("The generation of the video has encountered an error: it is likely that you have unsufficient VRAM and you should therefore reduce the video resolution or its number of frames.")
else:
@@ -1054,6 +1166,7 @@ def generate_video(
if samples == None:
end_time = time.time()
abort = True
+ state["prompt"] = ""
yield f"Video generation was aborted. Total Generation Time: {end_time-start_time:.1f}s"
else:
sample = samples.cpu()
@@ -1076,9 +1189,10 @@ def generate_video(
print(f"New video saved to Path: "+video_path)
file_list.append(video_path)
if video_no < total_video:
- yield status
+ yield status
else:
end_time = time.time()
+ state["prompt"] = ""
yield f"Total Generation Time: {end_time-start_time:.1f}s"
seed += 1
repeat_no += 1
@@ -1089,18 +1203,22 @@ def generate_video(
offload.unload_loras_from_model(trans)
-new_preset_msg = "Enter a Name for a Lora Preset or Choose One Above"
+def get_new_preset_msg(advanced = True):
+ if advanced:
+ return "Enter here a Name for a Lora Preset or Choose one in the List"
+ else:
+ return "Choose a Lora Preset in this List to Apply a Special Effect"
def validate_delete_lset(lset_name):
- if len(lset_name) == 0 or lset_name == new_preset_msg:
+ if len(lset_name) == 0 or lset_name == get_new_preset_msg(True) or lset_name == get_new_preset_msg(False):
gr.Info(f"Choose a Preset to delete")
return gr.Button(visible= True), gr.Checkbox(visible= True), gr.Button(visible= True), gr.Button(visible= True), gr.Button(visible= False), gr.Button(visible= False)
else:
return gr.Button(visible= False), gr.Checkbox(visible= False), gr.Button(visible= False), gr.Button(visible= False), gr.Button(visible= True), gr.Button(visible= True)
def validate_save_lset(lset_name):
- if len(lset_name) == 0 or lset_name == new_preset_msg:
+ if len(lset_name) == 0 or lset_name == get_new_preset_msg(True) or lset_name == get_new_preset_msg(False):
gr.Info("Please enter a name for the preset")
return gr.Button(visible= True), gr.Checkbox(visible= True), gr.Button(visible= True), gr.Button(visible= True), gr.Button(visible= False), gr.Button(visible= False),gr.Checkbox(visible= False)
else:
@@ -1109,10 +1227,12 @@ def validate_save_lset(lset_name):
def cancel_lset():
return gr.Button(visible= True), gr.Button(visible= True), gr.Button(visible= True), gr.Button(visible= True), gr.Button(visible= False), gr.Button(visible= False), gr.Button(visible= False), gr.Checkbox(visible= False)
-def save_lset(lset_name, loras_choices, loras_mult_choices, prompt, save_lset_prompt_cbox):
+def save_lset(state, lset_name, loras_choices, loras_mult_choices, prompt, save_lset_prompt_cbox):
global loras_presets
- if len(lset_name) == 0 or lset_name== new_preset_msg:
+ if state.get("validate_success",0) == 0:
+ pass
+ if len(lset_name) == 0 or lset_name == get_new_preset_msg(True) or lset_name == get_new_preset_msg(False):
gr.Info("Please enter a name for the preset")
lset_choices =[("Please enter a name for a Lora Preset","")]
else:
@@ -1142,14 +1262,14 @@ def save_lset(lset_name, loras_choices, loras_mult_choices, prompt, save_lset_pr
gr.Info(f"Lora Preset '{lset_name}' has been created")
loras_presets.append(Path(Path(lset_name_filename).parts[-1]).stem )
lset_choices = [ ( preset, preset) for preset in loras_presets ]
- lset_choices.append( (new_preset_msg, ""))
+ lset_choices.append( (get_new_preset_msg(), ""))
return gr.Dropdown(choices=lset_choices, value= lset_name), gr.Button(visible= True), gr.Button(visible= True), gr.Button(visible= True), gr.Button(visible= True), gr.Button(visible= False), gr.Button(visible= False), gr.Checkbox(visible= False)
def delete_lset(lset_name):
global loras_presets
lset_name_filename = os.path.join(lora_dir, sanitize_file_name(lset_name) + ".lset" )
- if len(lset_name) > 0 and lset_name != new_preset_msg:
+ if len(lset_name) > 0 and lset_name != get_new_preset_msg(True) and lset_name != get_new_preset_msg(False):
if not os.path.isfile(lset_name_filename):
raise gr.Error(f"Preset '{lset_name}' not found ")
os.remove(lset_name_filename)
@@ -1161,7 +1281,7 @@ def delete_lset(lset_name):
gr.Info(f"Choose a Preset to delete")
lset_choices = [ (preset, preset) for preset in loras_presets]
- lset_choices.append((new_preset_msg, ""))
+ lset_choices.append((get_new_preset_msg(), ""))
return gr.Dropdown(choices=lset_choices, value= lset_choices[pos][1]), gr.Button(visible= True), gr.Button(visible= True), gr.Button(visible= True), gr.Button(visible= True), gr.Button(visible= False), gr.Checkbox(visible= False)
def refresh_lora_list(lset_name, loras_choices):
@@ -1179,15 +1299,15 @@ def refresh_lora_list(lset_name, loras_choices):
lora_names_selected.append(lora_id)
lset_choices = [ (preset, preset) for preset in loras_presets]
- lset_choices.append((new_preset_msg, ""))
+ lset_choices.append((get_new_preset_msg(advanced), ""))
if lset_name in loras_presets:
pos = loras_presets.index(lset_name)
else:
pos = len(loras_presets)
lset_name =""
- errors = wan_model.model._loras_errors
- if len(errors) > 0:
+ errors = getattr(wan_model.model, "_loras_errors", "")
+ if errors !=None and len(errors) > 0:
error_files = [path for path, _ in errors]
gr.Info("Error while refreshing Lora List, invalid Lora files: " + ", ".join(error_files))
else:
@@ -1196,9 +1316,11 @@ def refresh_lora_list(lset_name, loras_choices):
return gr.Dropdown(choices=lset_choices, value= lset_choices[pos][1]), gr.Dropdown(choices=new_loras_choices, value= lora_names_selected)
-def apply_lset(lset_name, loras_choices, loras_mult_choices, prompt):
+def apply_lset(state, lset_name, loras_choices, loras_mult_choices, prompt):
- if len(lset_name) == 0 or lset_name== new_preset_msg:
+ state["apply_success"] = 0
+
+ if len(lset_name) == 0 or lset_name== get_new_preset_msg(True) or lset_name== get_new_preset_msg(False):
gr.Info("Please choose a preset in the list or create one")
else:
loras_choices, loras_mult_choices, preset_prompt, full_prompt, error = extract_preset(lset_name, loras)
@@ -1213,34 +1335,221 @@ def apply_lset(lset_name, loras_choices, loras_mult_choices, prompt):
prompt = "\n".join(prompts)
prompt = preset_prompt + '\n' + prompt
gr.Info(f"Lora Preset '{lset_name}' has been applied")
+ state["apply_success"] = 1
+ state["wizard_prompt"] = 0
return loras_choices, loras_mult_choices, prompt
-def create_demo():
-
- default_inference_steps = 30
+def extract_prompt_from_wizard(state, prompt, wizard_prompt, allow_null_values, *args):
+
+ prompts = wizard_prompt.replace("\r" ,"").split("\n")
+
+ new_prompts = []
+ macro_already_written = False
+ for prompt in prompts:
+ if not macro_already_written and not prompt.startswith("#") and "{" in prompt and "}" in prompt:
+ variables = state["variables"]
+ values = args[:len(variables)]
+ macro = "! "
+ for i, (variable, value) in enumerate(zip(variables, values)):
+ if len(value) == 0 and not allow_null_values:
+ return prompt, "You need to provide a value for '" + variable + "'"
+ sub_values= [ "\"" + sub_value + "\"" for sub_value in value.split("\n") ]
+ value = ",".join(sub_values)
+ if i>0:
+ macro += " : "
+ macro += "{" + variable + "}"+ f"={value}"
+ if len(variables) > 0:
+ macro_already_written = True
+ new_prompts.append(macro)
+ new_prompts.append(prompt)
+ else:
+ new_prompts.append(prompt)
+
+ prompt = "\n".join(new_prompts)
+ return prompt, ""
+
+def validate_wizard_prompt(state, prompt, wizard_prompt, *args):
+ state["validate_success"] = 0
+
+ if state.get("wizard_prompt",0) != 1:
+ state["validate_success"] = 1
+ return prompt
+
+ prompt, errors = extract_prompt_from_wizard(state, prompt, wizard_prompt, False, *args)
+ if len(errors) > 0:
+ gr.Info(errors)
+ return prompt
+
+ state["validate_success"] = 1
+
+ return prompt
+
+def fill_prompt_from_wizard(state, prompt, wizard_prompt, *args):
+
+ if state.get("wizard_prompt",0) == 1:
+ prompt, errors = extract_prompt_from_wizard(state, prompt, wizard_prompt, True, *args)
+ if len(errors) > 0:
+ gr.Info(errors)
+
+ state["wizard_prompt"] = 0
+
+ return gr.Textbox(visible= True, value =prompt) , gr.Textbox(visible= False), gr.Column(visible = True), *[gr.Column(visible = False)] * 2, *[gr.Textbox(visible= False)] * PROMPT_VARS_MAX
+
+def extract_wizard_prompt(prompt):
+ variables = []
+ values = {}
+ prompts = prompt.replace("\r" ,"").split("\n")
+ if sum(prompt.startswith("!") for prompt in prompts) > 1:
+ return "", variables, values, "Prompt is too complex for basic Prompt editor, switching to Advanced Prompt"
+
+ new_prompts = []
+ errors = ""
+ for prompt in prompts:
+ if prompt.startswith("!"):
+ variables, errors = prompt_parser.extract_variable_names(prompt)
+ if len(errors) > 0:
+ return "", variables, values, "Error parsing Prompt templace: " + errors
+ if len(variables) > PROMPT_VARS_MAX:
+ return "", variables, values, "Prompt is too complex for basic Prompt editor, switching to Advanced Prompt"
+ values, errors = prompt_parser.extract_variable_values(prompt)
+ if len(errors) > 0:
+ return "", variables, values, "Error parsing Prompt templace: " + errors
+ else:
+ variables_extra, errors = prompt_parser.extract_variable_names(prompt)
+ if len(errors) > 0:
+ return "", variables, values, "Error parsing Prompt templace: " + errors
+ variables += variables_extra
+ variables = [var for pos, var in enumerate(variables) if var not in variables[:pos]]
+ if len(variables) > PROMPT_VARS_MAX:
+ return "", variables, values, "Prompt is too complex for basic Prompt editor, switching to Advanced Prompt"
+
+ new_prompts.append(prompt)
+ wizard_prompt = "\n".join(new_prompts)
+ return wizard_prompt, variables, values, errors
+
+def fill_wizard_prompt(state, prompt, wizard_prompt):
+ def get_hidden_textboxes(num = PROMPT_VARS_MAX ):
+ return [gr.Textbox(value="", visible=False)] * num
+
+ hidden_column = gr.Column(visible = False)
+ visible_column = gr.Column(visible = True)
+
+ if advanced or state.get("apply_success") != 1:
+ return prompt, wizard_prompt, gr.Column(), gr.Column(), hidden_column, *get_hidden_textboxes()
+ prompt_parts= []
+ state["wizard_prompt"] = 0
+
+ wizard_prompt, variables, values, errors = extract_wizard_prompt(prompt)
+ if len(errors) > 0:
+ gr.Info( errors )
+ return gr.Textbox(prompt, visible=True), gr.Textbox(wizard_prompt, visible=False), visible_column, *[hidden_column] * 2, *get_hidden_textboxes()
+
+ for variable in variables:
+ value = values.get(variable, "")
+ prompt_parts.append(gr.Textbox( placeholder=variable, info= variable, visible= True, value= "\n".join(value) ))
+ any_macro = len(variables) > 0
+
+ prompt_parts += get_hidden_textboxes(PROMPT_VARS_MAX-len(prompt_parts))
+
+ state["variables"] = variables
+ state["wizard_prompt"] = 1
+
+ return gr.Textbox(prompt, visible = False), gr.Textbox(wizard_prompt, visible = True), hidden_column, visible_column, visible_column if any_macro else hidden_column, *prompt_parts
+
+def switch_prompt_type(state, prompt, wizard_prompt, *prompt_vars):
+ if advanced:
+ return fill_prompt_from_wizard(state, prompt, wizard_prompt, *prompt_vars)
+ else:
+ state["apply_success"] = 1
+ return fill_wizard_prompt(state, prompt, wizard_prompt)
+
+
+visible= False
+def switch_advanced(new_advanced, lset_name):
+ global advanced
+ advanced= new_advanced
+ lset_choices = [ (preset, preset) for preset in loras_presets]
+ lset_choices.append((get_new_preset_msg(advanced), ""))
+ if lset_name== get_new_preset_msg(True) or lset_name== get_new_preset_msg(False) or lset_name=="":
+ lset_name = get_new_preset_msg(advanced)
+
+ if only_allow_edit_in_advanced:
+ return gr.Row(visible=new_advanced), gr.Row(visible=new_advanced), gr.Button(visible=new_advanced), gr.Row(visible= not new_advanced), gr.Dropdown(choices=lset_choices, value= lset_name)
+ else:
+ return gr.Row(visible=new_advanced), gr.Row(visible=True), gr.Button(visible=True), gr.Row(visible= False), gr.Dropdown(choices=lset_choices, value= lset_name)
+
+def download_loras():
+ from huggingface_hub import snapshot_download
+
+
+ yield "
Please wait while the Loras are being downloaded", *[gr.Column(visible=False)] * 2
+ log_path = os.path.join(lora_dir, "log.txt")
+ if not os.path.isfile(log_path):
+ import shutil
+ tmp_path = os.path.join(lora_dir, "tmp_lora_dowload")
+
+ import shutil, glob
+ snapshot_download(repo_id="DeepBeepMeep/Wan2.1", allow_patterns="loras_i2v/*", local_dir= tmp_path)
+ [shutil.move(f, lora_dir) for f in glob.glob(os.path.join(tmp_path, "loras_i2v", "*.*")) if not "README.txt" in f ]
+
+
+ yield "
Loras have been completely downloaded", *[gr.Column(visible=True)] * 2
+
+ from datetime import datetime
+ dt = datetime.today().strftime('%Y-%m-%d')
+ with open( log_path, "w", encoding="utf-8") as writer:
+ writer.write(f"Loras downloaded on the {dt} at {time.time()} on the {time.time()}")
+
+ return
+def create_demo():
+ css= """
+ .title-with-lines {
+ display: flex;
+ align-items: center;
+ margin: 30px 0;
+ }
+ .line {
+ flex-grow: 1;
+ height: 1px;
+ background-color: #333;
+ }
+ h2 {
+ margin: 0 20px;
+ white-space: nowrap;
+ }
+"""
default_flow_shift = get_default_flow(transformer_filename_i2v if use_image2video else transformer_filename_t2v)
- with gr.Blocks() as demo:
- state = gr.State({})
+ with gr.Blocks(css=css, theme=gr.themes.Soft(primary_hue="emerald", neutral_hue="slate", text_size= "md")) as demo:
+ state_dict = {}
if use_image2video:
- gr.Markdown("
")
+ gr.Markdown("
")
else:
- gr.Markdown("
")
+ gr.Markdown("
")
- gr.Markdown("
Welcome to Wan 2.1GP by DeepBeepMeep, a super fast and low VRAM Video Generator !")
+ gr.Markdown("
Welcome to Wan 2.1GP a super fast and low VRAM AI Video Generator !")
- if use_image2video and False:
- pass
- else:
- gr.Markdown("The VRAM requirements will depend greatly of the resolution and the duration of the video, for instance :")
- gr.Markdown("- 848 x 480 with a 14B model: 80 frames (5s) : 8 GB of VRAM")
- gr.Markdown("- 848 x 480 with the 1.3B model: 80 frames (5s) : 5 GB of VRAM")
- gr.Markdown("- 1280 x 720 with a 14B model: 80 frames (5s): 11 GB of VRAM")
- gr.Markdown("It is not recommmended to generate a video longer than 8s (128 frames) even if there is still some VRAM left as some artifacts may appear")
- gr.Markdown("Please note that if your turn on compilation, the first denoising step of the first video generation will be slow due to the compilation. Therefore all your tests should be done with compilation turned off.")
+ with gr.Accordion("Click here for some Info on how to use Wan2GP and to download 20+ Loras", open = False):
+ if use_image2video and False:
+ pass
+ else:
+ gr.Markdown("The VRAM requirements will depend greatly of the resolution and the duration of the video, for instance :")
+ gr.Markdown("- 848 x 480 with a 14B model: 80 frames (5s) : 8 GB of VRAM")
+ gr.Markdown("- 848 x 480 with the 1.3B model: 80 frames (5s) : 5 GB of VRAM")
+ gr.Markdown("- 1280 x 720 with a 14B model: 80 frames (5s): 11 GB of VRAM")
+ gr.Markdown("It is not recommmended to generate a video longer than 8s (128 frames) even if there is still some VRAM left as some artifacts may appear")
+ gr.Markdown("Please note that if your turn on compilation, the first denoising step of the first video generation will be slow due to the compilation. Therefore all your tests should be done with compilation turned off.")
+ if use_image2video:
+ with gr.Row():
+ with gr.Row(scale =3):
+ gr.Markdown("
Wan2GP's Lora Festival ! Press the following button to download i2v Remade Loras collection (and bonuses Loras). Dont't forget first to make a backup of your Loras just in case.")
+ with gr.Row(scale =1):
+ download_loras_btn = gr.Button("---> Let the Lora's Festival Start !", scale =1)
+ with gr.Row():
+ download_status = gr.Markdown()
# css = """