mirror of
				https://github.com/Wan-Video/Wan2.1.git
				synced 2025-11-03 22:04:21 +00:00 
			
		
		
		
	no more pain
This commit is contained in:
		
							parent
							
								
									f2db023a3d
								
							
						
					
					
						commit
						959ab9e0c1
					
				@ -20,7 +20,7 @@ WanGP supports the Wan (and derived models), Hunyuan Video and LTV Video models
 | 
			
		||||
**Follow DeepBeepMeep on Twitter/X to get the Latest News**: https://x.com/deepbeepmeep
 | 
			
		||||
 | 
			
		||||
## 🔥 Latest Updates : 
 | 
			
		||||
### September 2 2025: WanGP v8.3 - At last the pain stops
 | 
			
		||||
### September 2 2025: WanGP v8.31 - At last the pain stops
 | 
			
		||||
 | 
			
		||||
- This single new feature should give you the strength to face all the potential bugs of this new release:
 | 
			
		||||
**Images Management (multiple additions or deletions, reordering) for Start Images / End Images / Images References.**  
 | 
			
		||||
@ -30,6 +30,7 @@ WanGP supports the Wan (and derived models), Hunyuan Video and LTV Video models
 | 
			
		||||
- **Experimental Sage 3 Attention support**: you will need to deserve this one, first you need a Blackwell GPU (RTX50xx), then you will have to compile Sage 3, install it and cross your fingers that there isn't any crash.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
*update 8.31: one shouldnt talk about bugs if one dont want to attract bugs*
 | 
			
		||||
 | 
			
		||||
### August 29 2025: WanGP v8.21 -  Here Goes Your Weekend
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -224,7 +224,9 @@ class AdvancedMediaGallery:
 | 
			
		||||
        n = len(get_list(gallery))
 | 
			
		||||
        sel = idx if (idx is not None and 0 <= idx < n) else None
 | 
			
		||||
        st["selected"] = sel
 | 
			
		||||
        return gr.update(selected_index=sel), st
 | 
			
		||||
        # return gr.update(selected_index=sel), st
 | 
			
		||||
        # return gr.update(), st
 | 
			
		||||
        return st
 | 
			
		||||
 | 
			
		||||
    def _on_gallery_change(self, value: List[Any], state: Dict[str, Any]) :
 | 
			
		||||
        # Fires when users add/drag/drop/delete via the Gallery itself.
 | 
			
		||||
@ -238,8 +240,10 @@ class AdvancedMediaGallery:
 | 
			
		||||
        else:
 | 
			
		||||
            new_sel = old_sel
 | 
			
		||||
        st["selected"] = new_sel
 | 
			
		||||
        return gr.update(value=items_filtered, selected_index=new_sel), st
 | 
			
		||||
        # return gr.update(value=items_filtered, selected_index=new_sel), st
 | 
			
		||||
        # return gr.update(value=items_filtered), st
 | 
			
		||||
 | 
			
		||||
        return gr.update(), st
 | 
			
		||||
 | 
			
		||||
    def _on_add(self, files_payload: Any, state: Dict[str, Any], gallery):
 | 
			
		||||
        """
 | 
			
		||||
@ -338,7 +342,8 @@ class AdvancedMediaGallery:
 | 
			
		||||
            return gr.update(value=[], selected_index=None), st
 | 
			
		||||
        new_sel = min(sel, len(items) - 1)
 | 
			
		||||
        st["items"] = items; st["selected"] = new_sel
 | 
			
		||||
        return gr.update(value=items, selected_index=new_sel), st
 | 
			
		||||
        # return gr.update(value=items, selected_index=new_sel), st
 | 
			
		||||
        return gr.update(value=items), st
 | 
			
		||||
 | 
			
		||||
    def _on_move(self, delta: int, state: Dict[str, Any], gallery) :
 | 
			
		||||
        st = get_state(state); items: List[Any] = get_list(gallery); sel = st.get("selected", None)
 | 
			
		||||
@ -352,8 +357,8 @@ class AdvancedMediaGallery:
 | 
			
		||||
        return gr.update(value=items, selected_index=j), st
 | 
			
		||||
 | 
			
		||||
    def _on_clear(self, state: Dict[str, Any]) :
 | 
			
		||||
        st = {"items": [], "selected": None, "single": state.get("single", False), "mode": self.media_mode}
 | 
			
		||||
        return gr.update(value=[], selected_index=None), st
 | 
			
		||||
        st = {"items": [], "selected": None, "single": get_state(state).get("single", False), "mode": self.media_mode}
 | 
			
		||||
        return gr.update(value=[], selected_index=0), st
 | 
			
		||||
 | 
			
		||||
    def _on_toggle_single(self, to_single: bool, state: Dict[str, Any]) :
 | 
			
		||||
        st = get_state(state); st["single"] = bool(to_single)
 | 
			
		||||
@ -397,7 +402,8 @@ class AdvancedMediaGallery:
 | 
			
		||||
                columns=self.columns,
 | 
			
		||||
                show_label=self.show_label,
 | 
			
		||||
                preview= True,
 | 
			
		||||
                type="pil",
 | 
			
		||||
                # type="pil",
 | 
			
		||||
                file_types= list(IMAGE_EXTS) if self.media_mode == "image" else list(VIDEO_EXTS), 
 | 
			
		||||
                selected_index=self._initial_state["selected"],  # server-side selection
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
@ -424,7 +430,7 @@ class AdvancedMediaGallery:
 | 
			
		||||
        self.gallery.select(
 | 
			
		||||
            self._on_select,
 | 
			
		||||
            inputs=[self.state, self.gallery],
 | 
			
		||||
            outputs=[self.gallery, self.state],
 | 
			
		||||
            outputs=[self.state],
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        # Gallery value changed by user actions (click-to-add, drag-drop, internal remove, etc.)
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										30
									
								
								wgp.py
									
									
									
									
									
								
							
							
						
						
									
										30
									
								
								wgp.py
									
									
									
									
									
								
							@ -60,7 +60,7 @@ AUTOSAVE_FILENAME = "queue.zip"
 | 
			
		||||
PROMPT_VARS_MAX = 10
 | 
			
		||||
 | 
			
		||||
target_mmgp_version = "3.5.11"
 | 
			
		||||
WanGP_version = "8.3"
 | 
			
		||||
WanGP_version = "8.31"
 | 
			
		||||
settings_version = 2.28
 | 
			
		||||
max_source_video_frames = 3000
 | 
			
		||||
prompt_enhancer_image_caption_model, prompt_enhancer_image_caption_processor, prompt_enhancer_llm_model, prompt_enhancer_llm_tokenizer = None, None, None, None
 | 
			
		||||
@ -186,6 +186,17 @@ def compute_sliding_window_no(current_video_length, sliding_window_size, discard
 | 
			
		||||
    return 1 + math.ceil(left_after_first_window / (sliding_window_size - discard_last_frames - reuse_frames))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def clean_image_list(gradio_list):
 | 
			
		||||
    if not isinstance(gradio_list, list): gradio_list = [gradio_list]
 | 
			
		||||
    gradio_list = [ tup[0] if isinstance(tup, tuple) else tup for tup in gradio_list ]        
 | 
			
		||||
 | 
			
		||||
    if any( not isinstance(image, (Image.Image, str))  for image in gradio_list): return None
 | 
			
		||||
    if any( isinstance(image, str) and not has_image_file_extension(image) for image in gradio_list): return None
 | 
			
		||||
    gradio_list = [ convert_image( Image.open(img) if isinstance(img, str) else img  ) for img in gradio_list  ]        
 | 
			
		||||
    return gradio_list
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def process_prompt_and_add_tasks(state, model_choice):
 | 
			
		||||
 
 | 
			
		||||
    if state.get("validate_success",0) != 1:
 | 
			
		||||
@ -436,11 +447,10 @@ def process_prompt_and_add_tasks(state, model_choice):
 | 
			
		||||
        if image_refs == None or len(image_refs) == 0:
 | 
			
		||||
            gr.Info("You must provide at least one Refererence Image")
 | 
			
		||||
            return
 | 
			
		||||
        if any(isinstance(image[0], str) for image in image_refs) :
 | 
			
		||||
        image_refs = clean_image_list(image_refs)
 | 
			
		||||
        if image_refs == None :
 | 
			
		||||
            gr.Info("A Reference Image should be an Image") 
 | 
			
		||||
            return
 | 
			
		||||
        if isinstance(image_refs, list):
 | 
			
		||||
            image_refs = [ convert_image(tup[0]) for tup in image_refs ]        
 | 
			
		||||
    else:
 | 
			
		||||
        image_refs = None
 | 
			
		||||
 | 
			
		||||
@ -497,12 +507,10 @@ def process_prompt_and_add_tasks(state, model_choice):
 | 
			
		||||
        if image_start == None or isinstance(image_start, list) and len(image_start) == 0:
 | 
			
		||||
            gr.Info("You must provide a Start Image")
 | 
			
		||||
            return
 | 
			
		||||
        if not isinstance(image_start, list):
 | 
			
		||||
            image_start = [image_start]
 | 
			
		||||
        if not all( not isinstance(img[0], str) for img in image_start) :
 | 
			
		||||
        image_start = clean_image_list(image_start)        
 | 
			
		||||
        if image_start == None :
 | 
			
		||||
            gr.Info("Start Image should be an Image") 
 | 
			
		||||
            return
 | 
			
		||||
        image_start = [ convert_image(tup[0]) for tup in image_start ]
 | 
			
		||||
    else:
 | 
			
		||||
        image_start = None
 | 
			
		||||
 | 
			
		||||
@ -510,15 +518,13 @@ def process_prompt_and_add_tasks(state, model_choice):
 | 
			
		||||
        if image_end == None or isinstance(image_end, list) and len(image_end) == 0:
 | 
			
		||||
            gr.Info("You must provide an End Image") 
 | 
			
		||||
            return
 | 
			
		||||
        if not isinstance(image_end, list):
 | 
			
		||||
            image_end = [image_end]
 | 
			
		||||
        if not all( not isinstance(img[0], str) for img in image_end) :
 | 
			
		||||
        image_end = clean_image_list(image_end)        
 | 
			
		||||
        if image_end == None :
 | 
			
		||||
            gr.Info("End Image should be an Image") 
 | 
			
		||||
            return
 | 
			
		||||
        if len(image_start) != len(image_end):
 | 
			
		||||
            gr.Info("The number of Start and End Images should be the same ")
 | 
			
		||||
            return         
 | 
			
		||||
        image_end = [ convert_image(tup[0]) for tup in image_end ]
 | 
			
		||||
    else:        
 | 
			
		||||
        image_end = None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
		Loading…
	
		Reference in New Issue
	
	Block a user