This commit is contained in:
DeepBeepMeep 2025-07-08 19:46:21 +02:00
parent b00354374c
commit b5d26646c5
3 changed files with 28 additions and 28 deletions

View File

@ -21,11 +21,11 @@ WanGP supports the Wan (and derived models), Hunyuan Video and LTV Video models
## 🔥 Latest Updates ## 🔥 Latest Updates
### July 8 2025: WanGP v6.6, WanGP offers you **Vace Multitalk Dual Voices Fusionix Infinite** : ### July 8 2025: WanGP v6.6, WanGP offers you **Vace Multitalk Dual Voices Fusionix Infinite** :
**Vace** our beloved super Control Net has been combined with **Multitalk** the new king in town that can animate two people speaking (**Dual Voices**). It is accelerated by the **Fusionix** model and thanks to *Sliding Windows* support and *Adaptive Projected Guidance* (much slower but should reduce the reddish effect with long videos) your two people will be able to talk for very a long time (which is an **Infinite** amount of time in the field of video generation). **Vace** our beloved super Control Net has been combined with **Multitalk** the new king in town that can animate up to two people speaking (**Dual Voices**). It is accelerated by the **Fusionix** model and thanks to *Sliding Windows* support and *Adaptive Projected Guidance* (much slower but should reduce the reddish effect with long videos) your two people will be able to talk for very a long time (which is an **Infinite** amount of time in the field of video generation).
Of course you will get as well *Multitalk* vanilla and also *Multitalk 720p* as a bonus. Of course you will get as well *Multitalk* vanilla and also *Multitalk 720p* as a bonus.
And since I am mister nice guy I had enclosed as an exclusivity an *Audio Separator* that will save you time to isolate each voice when using Multitalk with two people. And since I am mister nice guy I have enclosed as an exclusivity an *Audio Separator* that will save you time to isolate each voice when using Multitalk with two people.
As I feel like a resting a bit I haven't produced a nice sample Video to illustrate all these new capabilities. But here is the thing, I ams sure you will publish in the *Share Your Best Video* channel your *Master Pieces*. The best one will be added to the *Announcements Channel* and will bring eternal fame to its author. As I feel like a resting a bit I haven't produced a nice sample Video to illustrate all these new capabilities. But here is the thing, I ams sure you will publish in the *Share Your Best Video* channel your *Master Pieces*. The best one will be added to the *Announcements Channel* and will bring eternal fame to its author.

View File

@ -7,23 +7,23 @@ import torch.nn.functional as F
major, minor = torch.cuda.get_device_capability(None) major, minor = torch.cuda.get_device_capability(None)
bfloat16_supported = major >= 8 bfloat16_supported = major >= 8
try: # try:
from xformers.ops import memory_efficient_attention # from xformers.ops import memory_efficient_attention
except ImportError: # except ImportError:
memory_efficient_attention = None # memory_efficient_attention = None
try: # try:
import flash_attn_interface # import flash_attn_interface
FLASH_ATTN_3_AVAILABLE = True # FLASH_ATTN_3_AVAILABLE = True
except ModuleNotFoundError: # except ModuleNotFoundError:
FLASH_ATTN_3_AVAILABLE = False # FLASH_ATTN_3_AVAILABLE = False
try: # try:
import flash_attn # import flash_attn
FLASH_ATTN_2_AVAILABLE = True # FLASH_ATTN_2_AVAILABLE = True
except ModuleNotFoundError: # except ModuleNotFoundError:
FLASH_ATTN_2_AVAILABLE = False # FLASH_ATTN_2_AVAILABLE = False
flash_attn = None # flash_attn = None
try: try:
from sageattention import sageattn_varlen from sageattention import sageattn_varlen

View File

@ -5,19 +5,19 @@ from einops import rearrange, repeat
from .multitalk_utils import RotaryPositionalEmbedding1D, normalize_and_scale, split_token_counts_and_frame_ids from .multitalk_utils import RotaryPositionalEmbedding1D, normalize_and_scale, split_token_counts_and_frame_ids
from wan.modules.attention import pay_attention from wan.modules.attention import pay_attention
import xformers.ops # import xformers.ops
try: # try:
import flash_attn_interface # import flash_attn_interface
FLASH_ATTN_3_AVAILABLE = True # FLASH_ATTN_3_AVAILABLE = True
except ModuleNotFoundError: # except ModuleNotFoundError:
FLASH_ATTN_3_AVAILABLE = False # FLASH_ATTN_3_AVAILABLE = False
try: # try:
import flash_attn # import flash_attn
FLASH_ATTN_2_AVAILABLE = True # FLASH_ATTN_2_AVAILABLE = True
except ModuleNotFoundError: # except ModuleNotFoundError:
FLASH_ATTN_2_AVAILABLE = False # FLASH_ATTN_2_AVAILABLE = False
import warnings import warnings