diff --git a/requirements.txt b/requirements.txt index a4cd3b3..70126cd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ ftfy dashscope imageio-ffmpeg # flash_attn -gradio>=5.0.0 +gradio>=5.0.0 numpy>=1.23.5,<2 einops moviepy==1.0.3 @@ -22,4 +22,6 @@ mutagen decord onnxruntime-gpu rembg[gpu]==2.0.65 +matplotlib +timm # rembg==2.0.65 \ No newline at end of file diff --git a/wgp.py b/wgp.py index 4f7d491..704df0c 100644 --- a/wgp.py +++ b/wgp.py @@ -174,9 +174,14 @@ def process_prompt_and_add_tasks(state, model_choice): return else: video_mask = None - if "O" in video_prompt_type and inputs["max_frames"]==0: - gr.Info(f"In order to extend a video, you need to indicate how many frames you want to reuse in the source video.") - return + if "O" in video_prompt_type : + max_frames= inputs["max_frames"] + video_length = inputs["video_length"] + if max_frames ==0: + gr.Info(f"Warning : you have asked to reuse all the frames of the control Video in the Alternate Video Ending it. Please make sure the number of frames of the control Video is lower than the total number of frames to generate otherwise it won't make a difference.") + elif max_frames >= video_length: + gr.Info(f"The number of frames in the control Video to reuse ({max_frames}) in Alternate Video Ending can not be bigger than the total number of frames ({video_length}) to generate.") + return if isinstance(image_refs, list): image_refs = [ convert_image(tup[0]) for tup in image_refs ] @@ -2175,7 +2180,7 @@ def generate_video( # gr.Info("Unable to generate a Video while a new configuration is being applied.") # return - if "P" in preload_model_policy: + if "P" in preload_model_policy and not "U" in preload_model_policy: while wan_model == None: time.sleep(1) @@ -3236,6 +3241,8 @@ def del_in_sequence(source_str, letters): def refresh_video_prompt_type_image_refs(video_prompt_type, video_prompt_type_image_refs): + # video_prompt_type = add_to_sequence(video_prompt_type, "I") if video_prompt_type_image_refs else del_in_sequence(video_prompt_type, "I") + video_prompt_type_image_refs = "I" in video_prompt_type_image_refs video_prompt_type = add_to_sequence(video_prompt_type, "I") if video_prompt_type_image_refs else del_in_sequence(video_prompt_type, "I") return video_prompt_type, gr.update(visible = video_prompt_type_image_refs),gr.update(visible = video_prompt_type_image_refs) @@ -3356,20 +3363,31 @@ def generate_video_tab(update_form = False, state_dict = None, ui_defaults = Non with gr.Column(visible= "Vace" in model_filename ) as video_prompt_column: video_prompt_type_value= ui_defaults.get("video_prompt_type","") video_prompt_type = gr.Text(value= video_prompt_type_value, visible= False) - video_prompt_type_video_guide = gr.Dropdown( - choices=[ - ("None, use only the Text Prompt", ""), - ("Extend the Control Video", "OV"), - ("Transfer Human Motion from the Control Video", "PV"), - ("Transfer Depth from the Control Video", "DV"), - ("Recolorize the Control Video", "CV"), - ("Control Video contains Open Pose, Depth or Black & White ", "V"), - ("Inpainting of Control Video using Mask Video ", "MV"), - ], - value=filter_letters(video_prompt_type_value, "ODPCMV"), - label="Video to Video" - ) - video_prompt_type_image_refs = gr.Checkbox(value="I" in video_prompt_type_value , label= "Use References Images (Faces, Objects) to customize New Video", scale =1 ) + with gr.Row(): + video_prompt_type_video_guide = gr.Dropdown( + choices=[ + ("None", ""), + ("Transfer Human Motion from the Control Video", "PV"), + ("Transfer Depth from the Control Video", "DV"), + ("Recolorize the Control Video", "CV"), + # ("Alternate Video Ending", "OV"), + ("(adv) Video contains Open Pose, Depth or Black & White ", "V"), + ("(adv) Inpainting of Control Video using Mask Video ", "MV"), + ], + value=filter_letters(video_prompt_type_value, "ODPCMV"), + label="Video to Video", scale = 3 + ) + + video_prompt_type_image_refs = gr.Dropdown( + choices=[ + ("None", ""), + ("Inject custom Faces / Objects", "I"), + ], + value="I" if "I" in video_prompt_type_value else "", + label="Reference Images", scale = 2 + ) + + # video_prompt_type_image_refs = gr.Checkbox(value="I" in video_prompt_type_value , label= "Use References Images (Faces, Objects) to customize New Video", scale =1 ) video_guide = gr.Video(label= "Control Video", visible= "V" in video_prompt_type_value, value= ui_defaults.get("video_guide", None),) max_frames = gr.Slider(0, 100, value=ui_defaults.get("max_frames",0), step=1, label="Nb of frames in Control Video to use (0 = max)", visible= "V" in video_prompt_type_value, scale = 2 )