From 34f9333fdcbe0c1ae3847bdbbea7edc04ebb9a00 Mon Sep 17 00:00:00 2001 From: DeepBeepMeep Date: Tue, 8 Jul 2025 19:53:50 +0200 Subject: [PATCH] I need rest! --- wan/modules/attention.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/wan/modules/attention.py b/wan/modules/attention.py index 758ad59..41a934b 100644 --- a/wan/modules/attention.py +++ b/wan/modules/attention.py @@ -7,23 +7,23 @@ import torch.nn.functional as F major, minor = torch.cuda.get_device_capability(None) bfloat16_supported = major >= 8 -# try: -# from xformers.ops import memory_efficient_attention -# except ImportError: -# memory_efficient_attention = None +try: + from xformers.ops import memory_efficient_attention +except ImportError: + memory_efficient_attention = None -# try: -# import flash_attn_interface -# FLASH_ATTN_3_AVAILABLE = True -# except ModuleNotFoundError: -# FLASH_ATTN_3_AVAILABLE = False +try: + import flash_attn_interface + FLASH_ATTN_3_AVAILABLE = True +except ModuleNotFoundError: + FLASH_ATTN_3_AVAILABLE = False -# try: -# import flash_attn -# FLASH_ATTN_2_AVAILABLE = True -# except ModuleNotFoundError: -# FLASH_ATTN_2_AVAILABLE = False -# flash_attn = None +try: + import flash_attn + FLASH_ATTN_2_AVAILABLE = True +except ModuleNotFoundError: + FLASH_ATTN_2_AVAILABLE = False + flash_attn = None try: from sageattention import sageattn_varlen