This commit is contained in:
Chris Malone 2025-04-10 14:14:44 +10:00
commit 4b6149fdc9
2 changed files with 39 additions and 19 deletions

View File

@ -22,4 +22,6 @@ mutagen
decord decord
onnxruntime-gpu onnxruntime-gpu
rembg[gpu]==2.0.65 rembg[gpu]==2.0.65
matplotlib
timm
# rembg==2.0.65 # rembg==2.0.65

36
wgp.py
View File

@ -174,8 +174,13 @@ def process_prompt_and_add_tasks(state, model_choice):
return return
else: else:
video_mask = None video_mask = None
if "O" in video_prompt_type and inputs["max_frames"]==0: if "O" in video_prompt_type :
gr.Info(f"In order to extend a video, you need to indicate how many frames you want to reuse in the source video.") max_frames= inputs["max_frames"]
video_length = inputs["video_length"]
if max_frames ==0:
gr.Info(f"Warning : you have asked to reuse all the frames of the control Video in the Alternate Video Ending it. Please make sure the number of frames of the control Video is lower than the total number of frames to generate otherwise it won't make a difference.")
elif max_frames >= video_length:
gr.Info(f"The number of frames in the control Video to reuse ({max_frames}) in Alternate Video Ending can not be bigger than the total number of frames ({video_length}) to generate.")
return return
if isinstance(image_refs, list): if isinstance(image_refs, list):
@ -2175,7 +2180,7 @@ def generate_video(
# gr.Info("Unable to generate a Video while a new configuration is being applied.") # gr.Info("Unable to generate a Video while a new configuration is being applied.")
# return # return
if "P" in preload_model_policy: if "P" in preload_model_policy and not "U" in preload_model_policy:
while wan_model == None: while wan_model == None:
time.sleep(1) time.sleep(1)
@ -3236,6 +3241,8 @@ def del_in_sequence(source_str, letters):
def refresh_video_prompt_type_image_refs(video_prompt_type, video_prompt_type_image_refs): def refresh_video_prompt_type_image_refs(video_prompt_type, video_prompt_type_image_refs):
# video_prompt_type = add_to_sequence(video_prompt_type, "I") if video_prompt_type_image_refs else del_in_sequence(video_prompt_type, "I")
video_prompt_type_image_refs = "I" in video_prompt_type_image_refs
video_prompt_type = add_to_sequence(video_prompt_type, "I") if video_prompt_type_image_refs else del_in_sequence(video_prompt_type, "I") video_prompt_type = add_to_sequence(video_prompt_type, "I") if video_prompt_type_image_refs else del_in_sequence(video_prompt_type, "I")
return video_prompt_type, gr.update(visible = video_prompt_type_image_refs),gr.update(visible = video_prompt_type_image_refs) return video_prompt_type, gr.update(visible = video_prompt_type_image_refs),gr.update(visible = video_prompt_type_image_refs)
@ -3356,20 +3363,31 @@ def generate_video_tab(update_form = False, state_dict = None, ui_defaults = Non
with gr.Column(visible= "Vace" in model_filename ) as video_prompt_column: with gr.Column(visible= "Vace" in model_filename ) as video_prompt_column:
video_prompt_type_value= ui_defaults.get("video_prompt_type","") video_prompt_type_value= ui_defaults.get("video_prompt_type","")
video_prompt_type = gr.Text(value= video_prompt_type_value, visible= False) video_prompt_type = gr.Text(value= video_prompt_type_value, visible= False)
with gr.Row():
video_prompt_type_video_guide = gr.Dropdown( video_prompt_type_video_guide = gr.Dropdown(
choices=[ choices=[
("None, use only the Text Prompt", ""), ("None", ""),
("Extend the Control Video", "OV"),
("Transfer Human Motion from the Control Video", "PV"), ("Transfer Human Motion from the Control Video", "PV"),
("Transfer Depth from the Control Video", "DV"), ("Transfer Depth from the Control Video", "DV"),
("Recolorize the Control Video", "CV"), ("Recolorize the Control Video", "CV"),
("Control Video contains Open Pose, Depth or Black & White ", "V"), # ("Alternate Video Ending", "OV"),
("Inpainting of Control Video using Mask Video ", "MV"), ("(adv) Video contains Open Pose, Depth or Black & White ", "V"),
("(adv) Inpainting of Control Video using Mask Video ", "MV"),
], ],
value=filter_letters(video_prompt_type_value, "ODPCMV"), value=filter_letters(video_prompt_type_value, "ODPCMV"),
label="Video to Video" label="Video to Video", scale = 3
) )
video_prompt_type_image_refs = gr.Checkbox(value="I" in video_prompt_type_value , label= "Use References Images (Faces, Objects) to customize New Video", scale =1 )
video_prompt_type_image_refs = gr.Dropdown(
choices=[
("None", ""),
("Inject custom Faces / Objects", "I"),
],
value="I" if "I" in video_prompt_type_value else "",
label="Reference Images", scale = 2
)
# video_prompt_type_image_refs = gr.Checkbox(value="I" in video_prompt_type_value , label= "Use References Images (Faces, Objects) to customize New Video", scale =1 )
video_guide = gr.Video(label= "Control Video", visible= "V" in video_prompt_type_value, value= ui_defaults.get("video_guide", None),) video_guide = gr.Video(label= "Control Video", visible= "V" in video_prompt_type_value, value= ui_defaults.get("video_guide", None),)
max_frames = gr.Slider(0, 100, value=ui_defaults.get("max_frames",0), step=1, label="Nb of frames in Control Video to use (0 = max)", visible= "V" in video_prompt_type_value, scale = 2 ) max_frames = gr.Slider(0, 100, value=ui_defaults.get("max_frames",0), step=1, label="Nb of frames in Control Video to use (0 = max)", visible= "V" in video_prompt_type_value, scale = 2 )