From c132cc08a46f9fc61abf4fb8bbe4d137f17f7738 Mon Sep 17 00:00:00 2001 From: WanX-Video Date: Tue, 25 Feb 2025 22:54:11 +0800 Subject: [PATCH 01/20] Update README.md --- README.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/README.md b/README.md index 7bf91c1..d22a050 100644 --- a/README.md +++ b/README.md @@ -22,10 +22,7 @@ In this repository, we present **Wan2.1**, a comprehensive and open suite of vid ## Video Demos
- +
## 🔥 Latest News!! From 656b9157c73260acf9bae3bbd9c461938d17dbb3 Mon Sep 17 00:00:00 2001 From: WanX-Video Date: Wed, 26 Feb 2025 11:18:26 +0800 Subject: [PATCH 02/20] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index d22a050..5d26fde 100644 --- a/README.md +++ b/README.md @@ -320,6 +320,8 @@ We test the computational efficiency of different **Wan2.1** models on different > (3) For the 1.3B model on a single 4090 GPU, set `--offload_model True --t5_cpu`; > (4) For all testings, no prompt extension was applied, meaning `--use_prompt_extend` was not enabled. +> 💡Note: T2V-14B is slower than I2V-14B because the former samples 50 steps while the latter uses 40 steps. + ## Community Contributions - [DiffSynth-Studio](https://github.com/modelscope/DiffSynth-Studio) provides more support for Wan, including video-to-video, FP8 quantization, VRAM optimization, LoRA training, and more. Please refer to [their examples](https://github.com/modelscope/DiffSynth-Studio/tree/main/examples/wanvideo). From 9ab8f963c809c0696890589686a3216a456f6e4b Mon Sep 17 00:00:00 2001 From: WanX-Video Date: Wed, 26 Feb 2025 12:02:30 +0800 Subject: [PATCH 03/20] Update requirements.txt --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index b24cb85..d416e7b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,4 +13,4 @@ dashscope imageio-ffmpeg flash_attn gradio>=5.0.0 -numpy==1.24.4 +numpy>=1.23.5,<2 From fb6dbad54cdfb5d0b166845dfd76fee44fc775c8 Mon Sep 17 00:00:00 2001 From: Adrian Corduneanu Date: Wed, 26 Feb 2025 02:56:57 -0800 Subject: [PATCH 04/20] Update text2video.py to reduce GPU memory by emptying cache (#44) * Update text2video.py to reduce GPU memory by emptying cache If offload_model is set, empty_cache() must be called after the model is moved to CPU to actually free the GPU. I verified on a RTX 4090 that without calling empty_cache the model remains in memory and the subsequent vae decoding never finishes. * Update text2video.py only one empty_cache needed before vae decode --- wan/text2video.py | 1 + 1 file changed, 1 insertion(+) diff --git a/wan/text2video.py b/wan/text2video.py index 96cfa78..2400545 100644 --- a/wan/text2video.py +++ b/wan/text2video.py @@ -252,6 +252,7 @@ class WanT2V: x0 = latents if offload_model: self.model.cpu() + torch.cuda.empty_cache() if self.rank == 0: videos = self.vae.decode(x0) From 4c503a8bc27b83135c16bb7f8fa7cc8354f64846 Mon Sep 17 00:00:00 2001 From: cocktailpeanut <121128867+cocktailpeanut@users.noreply.github.com> Date: Wed, 26 Feb 2025 05:57:30 -0500 Subject: [PATCH 05/20] os.path.sep instead of / (#12) --- gradio/i2v_14B_singleGPU.py | 3 ++- gradio/t2i_14B_singleGPU.py | 3 ++- gradio/t2v_1.3B_singleGPU.py | 3 ++- gradio/t2v_14B_singleGPU.py | 3 ++- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/gradio/i2v_14B_singleGPU.py b/gradio/i2v_14B_singleGPU.py index 9a22297..100002b 100644 --- a/gradio/i2v_14B_singleGPU.py +++ b/gradio/i2v_14B_singleGPU.py @@ -2,6 +2,7 @@ import argparse import gc import os.path as osp +import os import sys import warnings @@ -10,7 +11,7 @@ import gradio as gr warnings.filterwarnings('ignore') # Model -sys.path.insert(0, '/'.join(osp.realpath(__file__).split('/')[:-2])) +sys.path.insert(0, os.path.sep.join(osp.realpath(__file__).split(os.path.sep)[:-2])) import wan from wan.configs import MAX_AREA_CONFIGS, WAN_CONFIGS from wan.utils.prompt_extend import DashScopePromptExpander, QwenPromptExpander diff --git a/gradio/t2i_14B_singleGPU.py b/gradio/t2i_14B_singleGPU.py index f81129a..cb42e38 100644 --- a/gradio/t2i_14B_singleGPU.py +++ b/gradio/t2i_14B_singleGPU.py @@ -1,6 +1,7 @@ # Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved. import argparse import os.path as osp +import os import sys import warnings @@ -9,7 +10,7 @@ import gradio as gr warnings.filterwarnings('ignore') # Model -sys.path.insert(0, '/'.join(osp.realpath(__file__).split('/')[:-2])) +sys.path.insert(0, os.path.sep.join(osp.realpath(__file__).split(os.path.sep)[:-2])) import wan from wan.configs import WAN_CONFIGS from wan.utils.prompt_extend import DashScopePromptExpander, QwenPromptExpander diff --git a/gradio/t2v_1.3B_singleGPU.py b/gradio/t2v_1.3B_singleGPU.py index 54706b2..87c414e 100644 --- a/gradio/t2v_1.3B_singleGPU.py +++ b/gradio/t2v_1.3B_singleGPU.py @@ -1,6 +1,7 @@ # Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved. import argparse import os.path as osp +import os import sys import warnings @@ -9,7 +10,7 @@ import gradio as gr warnings.filterwarnings('ignore') # Model -sys.path.insert(0, '/'.join(osp.realpath(__file__).split('/')[:-2])) +sys.path.insert(0, os.path.sep.join(osp.realpath(__file__).split(os.path.sep)[:-2])) import wan from wan.configs import WAN_CONFIGS from wan.utils.prompt_extend import DashScopePromptExpander, QwenPromptExpander diff --git a/gradio/t2v_14B_singleGPU.py b/gradio/t2v_14B_singleGPU.py index b7448ef..a9b7485 100644 --- a/gradio/t2v_14B_singleGPU.py +++ b/gradio/t2v_14B_singleGPU.py @@ -1,6 +1,7 @@ # Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved. import argparse import os.path as osp +import os import sys import warnings @@ -9,7 +10,7 @@ import gradio as gr warnings.filterwarnings('ignore') # Model -sys.path.insert(0, '/'.join(osp.realpath(__file__).split('/')[:-2])) +sys.path.insert(0, os.path.sep.join(osp.realpath(__file__).split(os.path.sep)[:-2])) import wan from wan.configs import WAN_CONFIGS from wan.utils.prompt_extend import DashScopePromptExpander, QwenPromptExpander From 4169800a95be7e1ea47c882e6d78ee3025faafac Mon Sep 17 00:00:00 2001 From: WanX-Video Date: Wed, 26 Feb 2025 20:33:18 +0800 Subject: [PATCH 06/20] update gradio (#58) --- gradio/t2v_1.3B_singleGPU.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gradio/t2v_1.3B_singleGPU.py b/gradio/t2v_1.3B_singleGPU.py index 87c414e..0a752d2 100644 --- a/gradio/t2v_1.3B_singleGPU.py +++ b/gradio/t2v_1.3B_singleGPU.py @@ -46,7 +46,7 @@ def t2v_generation(txt2vid_prompt, resolution, sd_steps, guide_scale, guide_scale=guide_scale, n_prompt=n_prompt, seed=seed, - offload_model=False) + offload_model=True) cache_video( tensor=video[None], From 8d75c013ac1d3b8c3b7fe4eb9c00712e6975b397 Mon Sep 17 00:00:00 2001 From: Yingda Chen Date: Wed, 26 Feb 2025 22:31:12 +0800 Subject: [PATCH 07/20] add modelscope download cli --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 5d26fde..8c95c58 100644 --- a/README.md +++ b/README.md @@ -79,6 +79,11 @@ pip install "huggingface_hub[cli]" huggingface-cli download Wan-AI/Wan2.1-T2V-14B --local-dir ./Wan2.1-T2V-14B ``` +Download models using modelscope-cli: +``` +pip install modelscope +modelscope download Wan-AI/Wan2.1-T2V-14B --local_dir ./Wan2.1-T2V-14B +``` #### Run Text-to-Video Generation This repository supports two Text-to-Video models (1.3B and 14B) and two resolutions (480P and 720P). The parameters and configurations for these models are as follows: From 9d3d4d784f506aa7e243184aa7d3f8e39e806414 Mon Sep 17 00:00:00 2001 From: Bakhtiyor Sulaymonov Date: Wed, 26 Feb 2025 22:18:18 +0500 Subject: [PATCH 08/20] Check for cuda is available for macos --- wan/text2video.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/wan/text2video.py b/wan/text2video.py index 96cfa78..aeaa9d4 100644 --- a/wan/text2video.py +++ b/wan/text2video.py @@ -57,7 +57,7 @@ class WanT2V: t5_cpu (`bool`, *optional*, defaults to False): Whether to place T5 model on CPU. Only works without t5_fsdp. """ - self.device = torch.device(f"cuda:{device_id}") + self.device = torch.device("mps" if torch.backends.mps.is_available() else "cpu") self.config = config self.rank = rank self.t5_cpu = t5_cpu @@ -259,7 +259,8 @@ class WanT2V: del sample_scheduler if offload_model: gc.collect() - torch.cuda.synchronize() + if torch.cuda.is_available(): + torch.cuda.synchronize() if dist.is_initialized(): dist.barrier() From 58726520f829abf30856883d91ecb1c60b38933c Mon Sep 17 00:00:00 2001 From: Bakhtiyor Sulaymonov Date: Wed, 26 Feb 2025 22:20:28 +0500 Subject: [PATCH 09/20] Adapted model for macOS with M1 Pro chip and other improvements --- .gitignore | 2 + generate.py | 187 +++++++++-------------- requirements.txt | 2 +- wan/configs/shared_config.py | 4 +- wan/distributed/fsdp.py | 2 +- wan/distributed/xdit_context_parallel.py | 4 +- wan/image2video.py | 9 +- wan/modules/attention.py | 31 ++-- wan/modules/model.py | 6 +- wan/modules/t5.py | 6 +- wan/utils/prompt_extend.py | 18 ++- wan/utils/qwen_vl_utils.py | 10 ++ 12 files changed, 136 insertions(+), 145 deletions(-) diff --git a/.gitignore b/.gitignore index 1f975d8..358fda4 100644 --- a/.gitignore +++ b/.gitignore @@ -34,3 +34,5 @@ Wan2.1-T2V-14B/ Wan2.1-T2V-1.3B/ Wan2.1-I2V-14B-480P/ Wan2.1-I2V-14B-720P/ +venv_wan/ +venv_wan_py310/ diff --git a/generate.py b/generate.py index f27bb98..5d57d2c 100644 --- a/generate.py +++ b/generate.py @@ -186,6 +186,11 @@ def _parse_args(): type=float, default=5.0, help="Classifier free guidance scale.") + parser.add_argument( + "--device", + type=str, + default=None, + help="Device to use for computation (mps, cpu).") args = parser.parse_args() @@ -207,43 +212,21 @@ def _init_logging(rank): def generate(args): - rank = int(os.getenv("RANK", 0)) - world_size = int(os.getenv("WORLD_SIZE", 1)) - local_rank = int(os.getenv("LOCAL_RANK", 0)) - device = local_rank - _init_logging(rank) - + # Set device based on args or availability + if args.device: + device = torch.device(args.device) + else: + device = torch.device("mps" if torch.backends.mps.is_available() else "cpu") + + _init_logging(0) # Use rank 0 logging for single-device + + # Ensure all torch operations use this device + torch.set_default_device(device) + if args.offload_model is None: - args.offload_model = False if world_size > 1 else True + args.offload_model = True # Default to True for single device to save memory logging.info( f"offload_model is not specified, set to {args.offload_model}.") - if world_size > 1: - torch.cuda.set_device(local_rank) - dist.init_process_group( - backend="nccl", - init_method="env://", - rank=rank, - world_size=world_size) - else: - assert not ( - args.t5_fsdp or args.dit_fsdp - ), f"t5_fsdp and dit_fsdp are not supported in non-distributed environments." - assert not ( - args.ulysses_size > 1 or args.ring_size > 1 - ), f"context parallel are not supported in non-distributed environments." - - if args.ulysses_size > 1 or args.ring_size > 1: - assert args.ulysses_size * args.ring_size == world_size, f"The number of ulysses_size and ring_size should be equal to the world size." - from xfuser.core.distributed import (initialize_model_parallel, - init_distributed_environment) - init_distributed_environment( - rank=dist.get_rank(), world_size=dist.get_world_size()) - - initialize_model_parallel( - sequence_parallel_degree=dist.get_world_size(), - ring_degree=args.ring_size, - ulysses_degree=args.ulysses_size, - ) if args.use_prompt_extend: if args.prompt_extend_method == "dashscope": @@ -253,58 +236,44 @@ def generate(args): prompt_expander = QwenPromptExpander( model_name=args.prompt_extend_model, is_vl="i2v" in args.task, - device=rank) + device=device) # Use MPS/CPU device instead of rank else: raise NotImplementedError( f"Unsupport prompt_extend_method: {args.prompt_extend_method}") cfg = WAN_CONFIGS[args.task] - if args.ulysses_size > 1: - assert cfg.num_heads % args.ulysses_size == 0, f"`num_heads` must be divisible by `ulysses_size`." - logging.info(f"Generation job args: {args}") logging.info(f"Generation model config: {cfg}") - if dist.is_initialized(): - base_seed = [args.base_seed] if rank == 0 else [None] - dist.broadcast_object_list(base_seed, src=0) - args.base_seed = base_seed[0] - if "t2v" in args.task or "t2i" in args.task: if args.prompt is None: args.prompt = EXAMPLE_PROMPT[args.task]["prompt"] logging.info(f"Input prompt: {args.prompt}") if args.use_prompt_extend: logging.info("Extending prompt ...") - if rank == 0: - prompt_output = prompt_expander( - args.prompt, - tar_lang=args.prompt_extend_target_lang, - seed=args.base_seed) - if prompt_output.status == False: - logging.info( - f"Extending prompt failed: {prompt_output.message}") - logging.info("Falling back to original prompt.") - input_prompt = args.prompt - else: - input_prompt = prompt_output.prompt - input_prompt = [input_prompt] + prompt_output = prompt_expander( + args.prompt, + tar_lang=args.prompt_extend_target_lang, + seed=args.base_seed) + if prompt_output.status == False: + logging.info( + f"Extending prompt failed: {prompt_output.message}") + logging.info("Falling back to original prompt.") + input_prompt = args.prompt else: - input_prompt = [None] - if dist.is_initialized(): - dist.broadcast_object_list(input_prompt, src=0) - args.prompt = input_prompt[0] + input_prompt = prompt_output.prompt + args.prompt = input_prompt logging.info(f"Extended prompt: {args.prompt}") logging.info("Creating WanT2V pipeline.") wan_t2v = wan.WanT2V( config=cfg, checkpoint_dir=args.ckpt_dir, - device_id=device, - rank=rank, - t5_fsdp=args.t5_fsdp, - dit_fsdp=args.dit_fsdp, - use_usp=(args.ulysses_size > 1 or args.ring_size > 1), + device_id=device, # Use MPS/CPU device instead of local_rank + rank=0, # Single device, so use rank 0 + t5_fsdp=False, # Disable FSDP (not supported on MPS) + dit_fsdp=False, # Disable FSDP (not supported on MPS) + use_usp=False, # Disable Ulysses/ring parallelism (single device) t5_cpu=args.t5_cpu, ) @@ -332,36 +301,30 @@ def generate(args): img = Image.open(args.image).convert("RGB") if args.use_prompt_extend: logging.info("Extending prompt ...") - if rank == 0: - prompt_output = prompt_expander( - args.prompt, - tar_lang=args.prompt_extend_target_lang, - image=img, - seed=args.base_seed) - if prompt_output.status == False: - logging.info( - f"Extending prompt failed: {prompt_output.message}") - logging.info("Falling back to original prompt.") - input_prompt = args.prompt - else: - input_prompt = prompt_output.prompt - input_prompt = [input_prompt] + prompt_output = prompt_expander( + args.prompt, + tar_lang=args.prompt_extend_target_lang, + image=img, + seed=args.base_seed) + if prompt_output.status == False: + logging.info( + f"Extending prompt failed: {prompt_output.message}") + logging.info("Falling back to original prompt.") + input_prompt = args.prompt else: - input_prompt = [None] - if dist.is_initialized(): - dist.broadcast_object_list(input_prompt, src=0) - args.prompt = input_prompt[0] + input_prompt = prompt_output.prompt + args.prompt = input_prompt logging.info(f"Extended prompt: {args.prompt}") logging.info("Creating WanI2V pipeline.") wan_i2v = wan.WanI2V( config=cfg, checkpoint_dir=args.ckpt_dir, - device_id=device, - rank=rank, - t5_fsdp=args.t5_fsdp, - dit_fsdp=args.dit_fsdp, - use_usp=(args.ulysses_size > 1 or args.ring_size > 1), + device_id=device, # Use MPS/CPU device instead of local_rank + rank=0, # Single device, so use rank 0 + t5_fsdp=False, # Disable FSDP (not supported on MPS) + dit_fsdp=False, # Disable FSDP (not supported on MPS) + use_usp=False, # Disable Ulysses/ring parallelism (single device) t5_cpu=args.t5_cpu, ) @@ -378,34 +341,32 @@ def generate(args): seed=args.base_seed, offload_model=args.offload_model) - if rank == 0: - if args.save_file is None: - formatted_time = datetime.now().strftime("%Y%m%d_%H%M%S") - formatted_prompt = args.prompt.replace(" ", "_").replace("/", - "_")[:50] - suffix = '.png' if "t2i" in args.task else '.mp4' - args.save_file = f"{args.task}_{args.size}_{args.ulysses_size}_{args.ring_size}_{formatted_prompt}_{formatted_time}" + suffix + # Save output (single device, so no rank check needed) + if args.save_file is None: + formatted_time = datetime.now().strftime("%Y%m%d_%H%M%S") + formatted_prompt = args.prompt.replace(" ", "_").replace("/", "_")[:50] + suffix = '.png' if "t2i" in args.task else '.mp4' + args.save_file = f"{args.task}_{args.size}_{formatted_prompt}_{formatted_time}" + suffix - if "t2i" in args.task: - logging.info(f"Saving generated image to {args.save_file}") - cache_image( - tensor=video.squeeze(1)[None], - save_file=args.save_file, - nrow=1, - normalize=True, - value_range=(-1, 1)) - else: - logging.info(f"Saving generated video to {args.save_file}") - cache_video( - tensor=video[None], - save_file=args.save_file, - fps=cfg.sample_fps, - nrow=1, - normalize=True, - value_range=(-1, 1)) + if "t2i" in args.task: + logging.info(f"Saving generated image to {args.save_file}") + cache_image( + tensor=video.squeeze(1)[None], + save_file=args.save_file, + nrow=1, + normalize=True, + value_range=(-1, 1)) + else: + logging.info(f"Saving generated video to {args.save_file}") + cache_video( + tensor=video[None], + save_file=args.save_file, + fps=cfg.sample_fps, + nrow=1, + normalize=True, + value_range=(-1, 1)) logging.info("Finished.") - if __name__ == "__main__": args = _parse_args() generate(args) diff --git a/requirements.txt b/requirements.txt index d416e7b..e40a45d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,6 +11,6 @@ easydict ftfy dashscope imageio-ffmpeg -flash_attn +# flash_attn gradio>=5.0.0 numpy>=1.23.5,<2 diff --git a/wan/configs/shared_config.py b/wan/configs/shared_config.py index 04a9f45..62e634d 100644 --- a/wan/configs/shared_config.py +++ b/wan/configs/shared_config.py @@ -7,11 +7,11 @@ wan_shared_cfg = EasyDict() # t5 wan_shared_cfg.t5_model = 'umt5_xxl' -wan_shared_cfg.t5_dtype = torch.bfloat16 +wan_shared_cfg.t5_dtype = torch.float32 wan_shared_cfg.text_len = 512 # transformer -wan_shared_cfg.param_dtype = torch.bfloat16 +wan_shared_cfg.param_dtype = torch.float32 # inference wan_shared_cfg.num_train_timesteps = 1000 diff --git a/wan/distributed/fsdp.py b/wan/distributed/fsdp.py index 258d4af..ccbf21f 100644 --- a/wan/distributed/fsdp.py +++ b/wan/distributed/fsdp.py @@ -10,7 +10,7 @@ from torch.distributed.fsdp.wrap import lambda_auto_wrap_policy def shard_model( model, device_id, - param_dtype=torch.bfloat16, + param_dtype=torch.float32, reduce_dtype=torch.float32, buffer_dtype=torch.float32, process_group=None, diff --git a/wan/distributed/xdit_context_parallel.py b/wan/distributed/xdit_context_parallel.py index 01936ce..29dd50f 100644 --- a/wan/distributed/xdit_context_parallel.py +++ b/wan/distributed/xdit_context_parallel.py @@ -151,9 +151,9 @@ def usp_attn_forward(self, seq_lens, grid_sizes, freqs, - dtype=torch.bfloat16): + dtype=torch.float32): b, s, n, d = *x.shape[:2], self.num_heads, self.head_dim - half_dtypes = (torch.float16, torch.bfloat16) + half_dtypes = (torch.float16, torch.float32) def half(x): return x if x.dtype in half_dtypes else x.to(dtype) diff --git a/wan/image2video.py b/wan/image2video.py index 468f17c..55d3f57 100644 --- a/wan/image2video.py +++ b/wan/image2video.py @@ -63,7 +63,14 @@ class WanI2V: init_on_cpu (`bool`, *optional*, defaults to True): Enable initializing Transformer Model on CPU. Only works without FSDP or USP. """ - self.device = torch.device(f"cuda:{device_id}") + # Check if device_id is a torch.device instance + if isinstance(device_id, torch.device): + self.device = device_id + elif device_id == "mps" or (isinstance(device_id, int) and device_id == -1): + self.device = torch.device("mps" if torch.backends.mps.is_available() else "cpu") + else: + self.device = torch.device(f"cuda:{device_id}") + self.config = config self.rank = rank self.use_usp = use_usp diff --git a/wan/modules/attention.py b/wan/modules/attention.py index 4dbbe03..6dcc54a 100644 --- a/wan/modules/attention.py +++ b/wan/modules/attention.py @@ -33,25 +33,26 @@ def flash_attention( causal=False, window_size=(-1, -1), deterministic=False, - dtype=torch.bfloat16, + dtype=torch.float32, version=None, ): """ - q: [B, Lq, Nq, C1]. - k: [B, Lk, Nk, C1]. - v: [B, Lk, Nk, C2]. Nq must be divisible by Nk. - q_lens: [B]. - k_lens: [B]. - dropout_p: float. Dropout probability. - softmax_scale: float. The scaling of QK^T before applying softmax. - causal: bool. Whether to apply causal attention mask. - window_size: (left right). If not (-1, -1), apply sliding window local attention. - deterministic: bool. If True, slightly slower and uses more memory. - dtype: torch.dtype. Apply when dtype of q/k/v is not float16/bfloat16. + Flash attention implementation with fallback for CPU and MPS devices """ - half_dtypes = (torch.float16, torch.bfloat16) + half_dtypes = (torch.float16, torch.float32) assert dtype in half_dtypes - assert q.device.type == 'cuda' and q.size(-1) <= 256 + assert q.size(-1) <= 256, "Sequence length exceeds the maximum limit." + + # Add CPU/MPS fallback implementation + if not (FLASH_ATTN_2_AVAILABLE or FLASH_ATTN_3_AVAILABLE) or q.device.type in ['cpu', 'mps']: + # Implement standard attention for CPU/MPS + return attention(q, k, v, + q_lens=q_lens, + k_lens=k_lens, + dropout_p=dropout_p, + softmax_scale=softmax_scale, + causal=causal, + window_size=window_size) # params b, lq, lk, out_dtype = q.size(0), q.size(1), k.size(1), q.dtype @@ -142,7 +143,7 @@ def attention( causal=False, window_size=(-1, -1), deterministic=False, - dtype=torch.bfloat16, + dtype=torch.float32, fa_version=None, ): if FLASH_ATTN_2_AVAILABLE or FLASH_ATTN_3_AVAILABLE: diff --git a/wan/modules/model.py b/wan/modules/model.py index b65021c..7144055 100644 --- a/wan/modules/model.py +++ b/wan/modules/model.py @@ -16,7 +16,7 @@ def sinusoidal_embedding_1d(dim, position): # preprocess assert dim % 2 == 0 half = dim // 2 - position = position.type(torch.float64) + position = position.type(torch.float32) # calculation sinusoid = torch.outer( @@ -31,7 +31,7 @@ def rope_params(max_seq_len, dim, theta=10000): freqs = torch.outer( torch.arange(max_seq_len), 1.0 / torch.pow(theta, - torch.arange(0, dim, 2).to(torch.float64).div(dim))) + torch.arange(0, dim, 2).to(torch.float32).div(dim))) freqs = torch.polar(torch.ones_like(freqs), freqs) return freqs @@ -49,7 +49,7 @@ def rope_apply(x, grid_sizes, freqs): seq_len = f * h * w # precompute multipliers - x_i = torch.view_as_complex(x[i, :seq_len].to(torch.float64).reshape( + x_i = torch.view_as_complex(x[i, :seq_len].to(torch.float32).reshape( seq_len, n, -1, 2)) freqs_i = torch.cat([ freqs[0][:f].view(f, 1, 1, -1).expand(f, h, w, -1), diff --git a/wan/modules/t5.py b/wan/modules/t5.py index c841b04..3a26ed1 100644 --- a/wan/modules/t5.py +++ b/wan/modules/t5.py @@ -61,7 +61,7 @@ class T5LayerNorm(nn.Module): def forward(self, x): x = x * torch.rsqrt(x.float().pow(2).mean(dim=-1, keepdim=True) + self.eps) - if self.weight.dtype in [torch.float16, torch.bfloat16]: + if self.weight.dtype in [torch.float16, torch.float32]: x = x.type_as(self.weight) return self.weight * x @@ -474,8 +474,8 @@ class T5EncoderModel: def __init__( self, text_len, - dtype=torch.bfloat16, - device=torch.cuda.current_device(), + dtype=torch.float32, + device='mps' if torch.backends.mps.is_available() else 'cpu', checkpoint_path=None, tokenizer_path=None, shard_fn=None, diff --git a/wan/utils/prompt_extend.py b/wan/utils/prompt_extend.py index e7a21b5..00f4f20 100644 --- a/wan/utils/prompt_extend.py +++ b/wan/utils/prompt_extend.py @@ -44,7 +44,7 @@ LM_EN_SYS_PROMPT = \ '''1. For overly concise user inputs, reasonably infer and add details to make the video more complete and appealing without altering the original intent;\n''' \ '''2. Enhance the main features in user descriptions (e.g., appearance, expression, quantity, race, posture, etc.), visual style, spatial relationships, and shot scales;\n''' \ '''3. Output the entire prompt in English, retaining original text in quotes and titles, and preserving key input information;\n''' \ - '''4. Prompts should match the user’s intent and accurately reflect the specified style. If the user does not specify a style, choose the most appropriate style for the video;\n''' \ + '''4. Prompts should match the user's intent and accurately reflect the specified style. If the user does not specify a style, choose the most appropriate style for the video;\n''' \ '''5. Emphasize motion information and different camera movements present in the input description;\n''' \ '''6. Your output should have natural motion attributes. For the target category described, add natural actions of the target using simple and direct verbs;\n''' \ '''7. The revised prompt should be around 80-100 characters long.\n''' \ @@ -82,7 +82,7 @@ VL_EN_SYS_PROMPT = \ '''1. For overly brief user inputs, reasonably infer and supplement details without changing the original meaning, making the image more complete and visually appealing;\n''' \ '''2. Improve the characteristics of the main subject in the user's description (such as appearance, expression, quantity, ethnicity, posture, etc.), rendering style, spatial relationships, and camera angles;\n''' \ '''3. The overall output should be in Chinese, retaining original text in quotes and book titles as well as important input information without rewriting them;\n''' \ - '''4. The prompt should match the user’s intent and provide a precise and detailed style description. If the user has not specified a style, you need to carefully analyze the style of the user's provided photo and use that as a reference for rewriting;\n''' \ + '''4. The prompt should match the user's intent and provide a precise and detailed style description. If the user has not specified a style, you need to carefully analyze the style of the user's provided photo and use that as a reference for rewriting;\n''' \ '''5. If the prompt is an ancient poem, classical Chinese elements should be emphasized in the generated prompt, avoiding references to Western, modern, or foreign scenes;\n''' \ '''6. You need to emphasize movement information in the input and different camera angles;\n''' \ '''7. Your output should convey natural movement attributes, incorporating natural actions related to the described subject category, using simple and direct verbs as much as possible;\n''' \ @@ -93,7 +93,7 @@ VL_EN_SYS_PROMPT = \ '''1. A Japanese fresh film-style photo of a young East Asian girl with double braids sitting by the boat. The girl wears a white square collar puff sleeve dress, decorated with pleats and buttons. She has fair skin, delicate features, and slightly melancholic eyes, staring directly at the camera. Her hair falls naturally, with bangs covering part of her forehead. She rests her hands on the boat, appearing natural and relaxed. The background features a blurred outdoor scene, with hints of blue sky, mountains, and some dry plants. The photo has a vintage film texture. A medium shot of a seated portrait.\n''' \ '''2. An anime illustration in vibrant thick painting style of a white girl with cat ears holding a folder, showing a slightly dissatisfied expression. She has long dark purple hair and red eyes, wearing a dark gray skirt and a light gray top with a white waist tie and a name tag in bold Chinese characters that says "紫阳" (Ziyang). The background has a light yellow indoor tone, with faint outlines of some furniture visible. A pink halo hovers above her head, in a smooth Japanese cel-shading style. A close-up shot from a slightly elevated perspective.\n''' \ '''3. CG game concept digital art featuring a huge crocodile with its mouth wide open, with trees and thorns growing on its back. The crocodile's skin is rough and grayish-white, resembling stone or wood texture. Its back is lush with trees, shrubs, and thorny protrusions. With its mouth agape, the crocodile reveals a pink tongue and sharp teeth. The background features a dusk sky with some distant trees, giving the overall scene a dark and cold atmosphere. A close-up from a low angle.\n''' \ - '''4. In the style of an American drama promotional poster, Walter White sits in a metal folding chair wearing a yellow protective suit, with the words "Breaking Bad" written in sans-serif English above him, surrounded by piles of dollar bills and blue plastic storage boxes. He wears glasses, staring forward, dressed in a yellow jumpsuit, with his hands resting on his knees, exuding a calm and confident demeanor. The background shows an abandoned, dim factory with light filtering through the windows. There’s a noticeable grainy texture. A medium shot with a straight-on close-up of the character.\n''' \ + '''4. In the style of an American drama promotional poster, Walter White sits in a metal folding chair wearing a yellow protective suit, with the words "Breaking Bad" written in sans-serif English above him, surrounded by piles of dollar bills and blue plastic storage boxes. He wears glasses, staring forward, dressed in a yellow jumpsuit, with his hands resting on his knees, exuding a calm and confident demeanor. The background shows an abandoned, dim factory with light filtering through the windows. There's a noticeable grainy texture. A medium shot with a straight-on close-up of the character.\n''' \ '''Directly output the rewritten English text.''' @@ -347,7 +347,7 @@ class QwenPromptExpander(PromptExpander): use_fast=True) self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( self.model_name, - torch_dtype=torch.bfloat16 if FLASH_VER == 2 else + torch_dtype=torch.float32 if FLASH_VER == 2 else torch.float16 if "AWQ" in self.model_name else "auto", attn_implementation="flash_attention_2" if FLASH_VER == 2 else None, @@ -363,6 +363,16 @@ class QwenPromptExpander(PromptExpander): device_map="cpu") self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) + # Initialize device + if isinstance(device, torch.device): + self.device = device + elif device == "mps" or (isinstance(device, str) and "mps" in device): + self.device = torch.device("mps" if torch.backends.mps.is_available() else "cpu") + elif isinstance(device, int) and device == -1: + self.device = torch.device("cpu") + else: + self.device = torch.device(f"cuda:{device}" if isinstance(device, int) else device) + def extend(self, prompt, system_prompt, seed=-1, *args, **kwargs): self.model = self.model.to(self.device) messages = [{ diff --git a/wan/utils/qwen_vl_utils.py b/wan/utils/qwen_vl_utils.py index 3c682e6..c8194c2 100644 --- a/wan/utils/qwen_vl_utils.py +++ b/wan/utils/qwen_vl_utils.py @@ -274,6 +274,12 @@ def get_video_reader_backend() -> str: def fetch_video( ele: dict, image_factor: int = IMAGE_FACTOR) -> torch.Tensor | list[Image.Image]: + # Handle MPS device compatibility + original_device = None + if isinstance(ele.get("video"), torch.Tensor) and ele["video"].device.type == "mps": + original_device = ele["video"].device + ele["video"] = ele["video"].cpu() + if isinstance(ele["video"], str): video_reader_backend = get_video_reader_backend() video = VIDEO_READER_BACKENDS[video_reader_backend](ele) @@ -324,6 +330,10 @@ def fetch_video( images.extend([images[-1]] * (nframes - len(images))) return images + # Return to original device if needed + if original_device is not None and isinstance(video, torch.Tensor): + video = video.to(original_device) + def extract_vision_info( conversations: list[dict] | list[list[dict]]) -> list[dict]: From b6a0d1e594ba6e2257d9cbfd24f7938ffc264a59 Mon Sep 17 00:00:00 2001 From: Bakhtiyor Sulaymonov Date: Wed, 26 Feb 2025 22:27:34 +0500 Subject: [PATCH 10/20] Update README with macOS setup and usage instructions --- README.md | 424 ++++++------------------------------------------------ 1 file changed, 41 insertions(+), 383 deletions(-) diff --git a/README.md b/README.md index 8c95c58..155850e 100644 --- a/README.md +++ b/README.md @@ -1,402 +1,60 @@ -# Wan2.1 +# Wan2.1 Text-to-Video Model -

- -

+This repository contains the Wan2.1 text-to-video model, adapted for macOS with M1 Pro chip. This adaptation allows macOS users to run the model efficiently, overcoming CUDA-specific limitations. -

- 💜 Wan    |    🖥️ GitHub    |   🤗 Hugging Face   |   🤖 ModelScope   |    📑 Paper (Coming soon)    |    📑 Blog    |   💬 WeChat Group   |    📖 Discord   -
+## Introduction ------ +The Wan2.1 model is an open-source text-to-video generation model. It transforms textual descriptions into video sequences, leveraging advanced machine learning techniques. -[**Wan: Open and Advanced Large-Scale Video Generative Models**]("") +## Changes for macOS -In this repository, we present **Wan2.1**, a comprehensive and open suite of video foundation models that pushes the boundaries of video generation. **Wan2.1** offers these key features: -- 👍 **SOTA Performance**: **Wan2.1** consistently outperforms existing open-source models and state-of-the-art commercial solutions across multiple benchmarks. -- 👍 **Supports Consumer-grade GPUs**: The T2V-1.3B model requires only 8.19 GB VRAM, making it compatible with almost all consumer-grade GPUs. It can generate a 5-second 480P video on an RTX 4090 in about 4 minutes (without optimization techniques like quantization). Its performance is even comparable to some closed-source models. -- 👍 **Multiple Tasks**: **Wan2.1** excels in Text-to-Video, Image-to-Video, Video Editing, Text-to-Image, and Video-to-Audio, advancing the field of video generation. -- 👍 **Visual Text Generation**: **Wan2.1** is the first video model capable of generating both Chinese and English text, featuring robust text generation that enhances its practical applications. -- 👍 **Powerful Video VAE**: **Wan-VAE** delivers exceptional efficiency and performance, encoding and decoding 1080P videos of any length while preserving temporal information, making it an ideal foundation for video and image generation. +This version includes modifications to make the model compatible with macOS, specifically for systems using the M1 Pro chip. Key changes include: -## Video Demos +- Adaptation of CUDA-specific code to work with MPS (Metal Performance Shaders) on macOS. +- Environment variable settings for MPS fallback to CPU for unsupported operations. +- Adjustments to command-line arguments for better compatibility with macOS. -

- -
+## Installation Instructions -## 🔥 Latest News!! +Follow these steps to set up the environment on macOS: -* Feb 25, 2025: 👋 We've released the inference code and weights of Wan2.1. +1. **Install Homebrew**: If not already installed, use Homebrew to manage packages. + ```bash + /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" + ``` +2. **Install Python 3.10+**: + ```bash + brew install python@3.10 + ``` -## 📑 Todo List -- Wan2.1 Text-to-Video - - [x] Multi-GPU Inference code of the 14B and 1.3B models - - [x] Checkpoints of the 14B and 1.3B models - - [x] Gradio demo - - [ ] Diffusers integration - - [ ] ComfyUI integration -- Wan2.1 Image-to-Video - - [x] Multi-GPU Inference code of the 14B model - - [x] Checkpoints of the 14B model - - [x] Gradio demo - - [ ] Diffusers integration - - [ ] ComfyUI integration +3. **Create and Activate a Virtual Environment**: + ```bash + python3.10 -m venv venv_wan + source venv_wan/bin/activate + ``` +4. **Install Dependencies**: + ```bash + pip install -r requirements.txt + pip install einops + ``` -## Quickstart +## Usage -#### Installation -Clone the repo: -``` -git clone https://github.com/Wan-Video/Wan2.1.git -cd Wan2.1 +To generate a video, use the following command: + +```bash +export PYTORCH_ENABLE_MPS_FALLBACK=1 +python generate.py --task t2v-1.3B --size "480*832" --frame_num 16 --sample_steps 25 --ckpt_dir ./Wan2.1-T2V-1.3B --offload_model True --t5_cpu --device mps --prompt "Lion running under snow in Samarkand" --save_file output_video.mp4 ``` -Install dependencies: -``` -# Ensure torch >= 2.4.0 -pip install -r requirements.txt -``` +## Optimization Tips +- **Use CPU for Large Models**: If you encounter memory issues, use `--device cpu`. +- **Reduce Resolution and Frame Count**: Use smaller resolutions and fewer frames to reduce memory usage. +- **Monitor System Resources**: Keep an eye on memory usage and adjust parameters as needed. -#### Model Download +## Acknowledgments -| Models | Download Link | Notes | -| --------------|-------------------------------------------------------------------------------|-------------------------------| -| T2V-14B | 🤗 [Huggingface](https://huggingface.co/Wan-AI/Wan2.1-T2V-14B) 🤖 [ModelScope](https://www.modelscope.cn/models/Wan-AI/Wan2.1-T2V-14B) | Supports both 480P and 720P -| I2V-14B-720P | 🤗 [Huggingface](https://huggingface.co/Wan-AI/Wan2.1-I2V-14B-720P) 🤖 [ModelScope](https://www.modelscope.cn/models/Wan-AI/Wan2.1-I2V-14B-720P) | Supports 720P -| I2V-14B-480P | 🤗 [Huggingface](https://huggingface.co/Wan-AI/Wan2.1-I2V-14B-480P) 🤖 [ModelScope](https://www.modelscope.cn/models/Wan-AI/Wan2.1-I2V-14B-480P) | Supports 480P -| T2V-1.3B | 🤗 [Huggingface](https://huggingface.co/Wan-AI/Wan2.1-T2V-1.3B) 🤖 [ModelScope](https://www.modelscope.cn/models/Wan-AI/Wan2.1-T2V-1.3B) | Supports 480P - -> 💡Note: The 1.3B model is capable of generating videos at 720P resolution. However, due to limited training at this resolution, the results are generally less stable compared to 480P. For optimal performance, we recommend using 480P resolution. - - -Download models using huggingface-cli: -``` -pip install "huggingface_hub[cli]" -huggingface-cli download Wan-AI/Wan2.1-T2V-14B --local-dir ./Wan2.1-T2V-14B -``` - -Download models using modelscope-cli: -``` -pip install modelscope -modelscope download Wan-AI/Wan2.1-T2V-14B --local_dir ./Wan2.1-T2V-14B -``` -#### Run Text-to-Video Generation - -This repository supports two Text-to-Video models (1.3B and 14B) and two resolutions (480P and 720P). The parameters and configurations for these models are as follows: - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TaskResolutionModel
480P720P
t2v-14B✔️✔️Wan2.1-T2V-14B
t2v-1.3B✔️Wan2.1-T2V-1.3B
- - -##### (1) Without Prompt Extention - -To facilitate implementation, we will start with a basic version of the inference process that skips the [prompt extension](#2-using-prompt-extention) step. - -- Single-GPU inference - -``` -python generate.py --task t2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-T2V-14B --prompt "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage." -``` - -If you encounter OOM (Out-of-Memory) issues, you can use the `--offload_model True` and `--t5_cpu` options to reduce GPU memory usage. For example, on an RTX 4090 GPU: - -``` -python generate.py --task t2v-1.3B --size 832*480 --ckpt_dir ./Wan2.1-T2V-1.3B --offload_model True --t5_cpu --sample_shift 8 --sample_guide_scale 6 --prompt "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage." -``` - -> 💡Note: If you are using the `T2V-1.3B` model, we recommend setting the parameter `--sample_guide_scale 6`. The `--sample_shift parameter` can be adjusted within the range of 8 to 12 based on the performance. - - -- Multi-GPU inference using FSDP + xDiT USP - -``` -pip install "xfuser>=0.4.1" -torchrun --nproc_per_node=8 generate.py --task t2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-T2V-14B --dit_fsdp --t5_fsdp --ulysses_size 8 --prompt "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage." -``` - - -##### (2) Using Prompt Extention - -Extending the prompts can effectively enrich the details in the generated videos, further enhancing the video quality. Therefore, we recommend enabling prompt extension. We provide the following two methods for prompt extension: - -- Use the Dashscope API for extension. - - Apply for a `dashscope.api_key` in advance ([EN](https://www.alibabacloud.com/help/en/model-studio/getting-started/first-api-call-to-qwen) | [CN](https://help.aliyun.com/zh/model-studio/getting-started/first-api-call-to-qwen)). - - Configure the environment variable `DASH_API_KEY` to specify the Dashscope API key. For users of Alibaba Cloud's international site, you also need to set the environment variable `DASH_API_URL` to 'https://dashscope-intl.aliyuncs.com/api/v1'. For more detailed instructions, please refer to the [dashscope document](https://www.alibabacloud.com/help/en/model-studio/developer-reference/use-qwen-by-calling-api?spm=a2c63.p38356.0.i1). - - Use the `qwen-plus` model for text-to-video tasks and `qwen-vl-max` for image-to-video tasks. - - You can modify the model used for extension with the parameter `--prompt_extend_model`. For example: -``` -DASH_API_KEY=your_key python generate.py --task t2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-T2V-14B --prompt "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage" --use_prompt_extend --prompt_extend_method 'dashscope' --prompt_extend_target_lang 'ch' -``` - -- Using a local model for extension. - - - By default, the Qwen model on HuggingFace is used for this extension. Users can choose Qwen models or other models based on the available GPU memory size. - - For text-to-video tasks, you can use models like `Qwen/Qwen2.5-14B-Instruct`, `Qwen/Qwen2.5-7B-Instruct` and `Qwen/Qwen2.5-3B-Instruct`. - - For image-to-video tasks, you can use models like `Qwen/Qwen2.5-VL-7B-Instruct` and `Qwen/Qwen2.5-VL-3B-Instruct`. - - Larger models generally provide better extension results but require more GPU memory. - - You can modify the model used for extension with the parameter `--prompt_extend_model` , allowing you to specify either a local model path or a Hugging Face model. For example: - -``` -python generate.py --task t2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-T2V-14B --prompt "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage" --use_prompt_extend --prompt_extend_method 'local_qwen' --prompt_extend_target_lang 'ch' -``` - -##### (3) Runing local gradio - -``` -cd gradio -# if one uses dashscope’s API for prompt extension -DASH_API_KEY=your_key python t2v_14B_singleGPU.py --prompt_extend_method 'dashscope' --ckpt_dir ./Wan2.1-T2V-14B - -# if one uses a local model for prompt extension -python t2v_14B_singleGPU.py --prompt_extend_method 'local_qwen' --ckpt_dir ./Wan2.1-T2V-14B -``` - - -#### Run Image-to-Video Generation - -Similar to Text-to-Video, Image-to-Video is also divided into processes with and without the prompt extension step. The specific parameters and their corresponding settings are as follows: - - - - - - - - - - - - - - - - - - - - - - - - - - -
TaskResolutionModel
480P720P
i2v-14B✔️Wan2.1-I2V-14B-720P
i2v-14B✔️Wan2.1-T2V-14B-480P
- - -##### (1) Without Prompt Extention - -- Single-GPU inference -``` -python generate.py --task i2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-I2V-14B-720P --image examples/i2v_input.JPG --prompt "Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside." -``` - -> 💡For the Image-to-Video task, the `size` parameter represents the area of the generated video, with the aspect ratio following that of the original input image. - - -- Multi-GPU inference using FSDP + xDiT USP - -``` -pip install "xfuser>=0.4.1" -torchrun --nproc_per_node=8 generate.py --task i2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-I2V-14B-720P --image examples/i2v_input.JPG --dit_fsdp --t5_fsdp --ulysses_size 8 --prompt "Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside." -``` - -##### (2) Using Prompt Extention - - -The process of prompt extension can be referenced [here](#2-using-prompt-extention). - -Run with local prompt extention using `Qwen/Qwen2.5-VL-7B-Instruct`: -``` -python generate.py --task i2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-I2V-14B-720P --image examples/i2v_input.JPG --use_prompt_extend --prompt_extend_model Qwen/Qwen2.5-VL-7B-Instruct --prompt "Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside." -``` - -Run with remote prompt extention using `dashscope`: -``` -DASH_API_KEY=your_key python generate.py --task i2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-I2V-14B-720P --image examples/i2v_input.JPG --use_prompt_extend --prompt_extend_method 'dashscope' --prompt "Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside." -``` - -##### (3) Runing local gradio - -``` -cd gradio -# if one only uses 480P model in gradio -DASH_API_KEY=your_key python i2v_14B_singleGPU.py --prompt_extend_method 'dashscope' --ckpt_dir_480p ./Wan2.1-I2V-14B-480P - -# if one only uses 720P model in gradio -DASH_API_KEY=your_key python i2v_14B_singleGPU.py --prompt_extend_method 'dashscope' --ckpt_dir_720p ./Wan2.1-I2V-14B-720P - -# if one uses both 480P and 720P models in gradio -DASH_API_KEY=your_key python i2v_14B_singleGPU.py --prompt_extend_method 'dashscope' --ckpt_dir_480p ./Wan2.1-I2V-14B-480P --ckpt_dir_720p ./Wan2.1-I2V-14B-720P -``` - - -#### Run Text-to-Image Generation - -Wan2.1 is a unified model for both image and video generation. Since it was trained on both types of data, it can also generate images. The command for generating images is similar to video generation, as follows: - -##### (1) Without Prompt Extention - -- Single-GPU inference -``` -python generate.py --task t2i-14B --size 1024*1024 --ckpt_dir ./Wan2.1-T2V-14B --prompt '一个朴素端庄的美人' -``` - -- Multi-GPU inference using FSDP + xDiT USP - -``` -torchrun --nproc_per_node=8 generate.py --dit_fsdp --t5_fsdp --ulysses_size 8 --base_seed 0 --frame_num 1 --task t2i-14B --size 1024*1024 --prompt '一个朴素端庄的美人' --ckpt_dir ./Wan2.1-T2V-14B -``` - -##### (2) With Prompt Extention - -- Single-GPU inference -``` -python generate.py --task t2i-14B --size 1024*1024 --ckpt_dir ./Wan2.1-T2V-14B --prompt '一个朴素端庄的美人' --use_prompt_extend -``` - -- Multi-GPU inference using FSDP + xDiT USP -``` -torchrun --nproc_per_node=8 generate.py --dit_fsdp --t5_fsdp --ulysses_size 8 --base_seed 0 --frame_num 1 --task t2i-14B --size 1024*1024 --ckpt_dir ./Wan2.1-T2V-14B --prompt '一个朴素端庄的美人' --use_prompt_extend -``` - - -## Manual Evaluation - -##### (1) Text-to-Video Evaluation - -Through manual evaluation, the results generated after prompt extension are superior to those from both closed-source and open-source models. - -
- -
- - -##### (2) Image-to-Video Evaluation - -We also conducted extensive manual evaluations to evaluate the performance of the Image-to-Video model, and the results are presented in the table below. The results clearly indicate that **Wan2.1** outperforms both closed-source and open-source models. - -
- -
- - -## Computational Efficiency on Different GPUs - -We test the computational efficiency of different **Wan2.1** models on different GPUs in the following table. The results are presented in the format: **Total time (s) / peak GPU memory (GB)**. - - -
- -
- -> The parameter settings for the tests presented in this table are as follows: -> (1) For the 1.3B model on 8 GPUs, set `--ring_size 8` and `--ulysses_size 1`; -> (2) For the 14B model on 1 GPU, use `--offload_model True`; -> (3) For the 1.3B model on a single 4090 GPU, set `--offload_model True --t5_cpu`; -> (4) For all testings, no prompt extension was applied, meaning `--use_prompt_extend` was not enabled. - -> 💡Note: T2V-14B is slower than I2V-14B because the former samples 50 steps while the latter uses 40 steps. - - -## Community Contributions -- [DiffSynth-Studio](https://github.com/modelscope/DiffSynth-Studio) provides more support for Wan, including video-to-video, FP8 quantization, VRAM optimization, LoRA training, and more. Please refer to [their examples](https://github.com/modelscope/DiffSynth-Studio/tree/main/examples/wanvideo). - -------- - -## Introduction of Wan2.1 - -**Wan2.1** is designed on the mainstream diffusion transformer paradigm, achieving significant advancements in generative capabilities through a series of innovations. These include our novel spatio-temporal variational autoencoder (VAE), scalable training strategies, large-scale data construction, and automated evaluation metrics. Collectively, these contributions enhance the model’s performance and versatility. - - -##### (1) 3D Variational Autoencoders -We propose a novel 3D causal VAE architecture, termed **Wan-VAE** specifically designed for video generation. By combining multiple strategies, we improve spatio-temporal compression, reduce memory usage, and ensure temporal causality. **Wan-VAE** demonstrates significant advantages in performance efficiency compared to other open-source VAEs. Furthermore, our **Wan-VAE** can encode and decode unlimited-length 1080P videos without losing historical temporal information, making it particularly well-suited for video generation tasks. - - -
- -
- - -##### (2) Video Diffusion DiT - -**Wan2.1** is designed using the Flow Matching framework within the paradigm of mainstream Diffusion Transformers. Our model's architecture uses the T5 Encoder to encode multilingual text input, with cross-attention in each transformer block embedding the text into the model structure. Additionally, we employ an MLP with a Linear layer and a SiLU layer to process the input time embeddings and predict six modulation parameters individually. This MLP is shared across all transformer blocks, with each block learning a distinct set of biases. Our experimental findings reveal a significant performance improvement with this approach at the same parameter scale. - -
- -
- - -| Model | Dimension | Input Dimension | Output Dimension | Feedforward Dimension | Frequency Dimension | Number of Heads | Number of Layers | -|--------|-----------|-----------------|------------------|-----------------------|---------------------|-----------------|------------------| -| 1.3B | 1536 | 16 | 16 | 8960 | 256 | 12 | 30 | -| 14B | 5120 | 16 | 16 | 13824 | 256 | 40 | 40 | - - - -##### Data - -We curated and deduplicated a candidate dataset comprising a vast amount of image and video data. During the data curation process, we designed a four-step data cleaning process, focusing on fundamental dimensions, visual quality and motion quality. Through the robust data processing pipeline, we can easily obtain high-quality, diverse, and large-scale training sets of images and videos. - -![figure1](assets/data_for_diff_stage.jpg "figure1") - - -##### Comparisons to SOTA -We compared **Wan2.1** with leading open-source and closed-source models to evaluate the performace. Using our carefully designed set of 1,035 internal prompts, we tested across 14 major dimensions and 26 sub-dimensions. We then compute the total score by performing a weighted calculation on the scores of each dimension, utilizing weights derived from human preferences in the matching process. The detailed results are shown in the table below. These results demonstrate our model's superior performance compared to both open-source and closed-source models. - -![figure1](assets/vben_vs_sota.png "figure1") - - -## Citation -If you find our work helpful, please cite us. - -``` -@article{wan2.1, - title = {Wan: Open and Advanced Large-Scale Video Generative Models}, - author = {Wan Team}, - journal = {}, - year = {2025} -} -``` - -## License Agreement -The models in this repository are licensed under the Apache 2.0 License. We claim no rights over the your generate contents, granting you the freedom to use them while ensuring that your usage complies with the provisions of this license. You are fully accountable for your use of the models, which must not involve sharing any content that violates applicable laws, causes harm to individuals or groups, disseminates personal information intended for harm, spreads misinformation, or targets vulnerable populations. For a complete list of restrictions and details regarding your rights, please refer to the full text of the [license](LICENSE.txt). - - -## Acknowledgements - -We would like to thank the contributors to the [SD3](https://huggingface.co/stabilityai/stable-diffusion-3-medium), [Qwen](https://huggingface.co/Qwen), [umt5-xxl](https://huggingface.co/google/umt5-xxl), [diffusers](https://github.com/huggingface/diffusers) and [HuggingFace](https://huggingface.co) repositories, for their open research. - - - -## Contact Us -If you would like to leave a message to our research or product teams, feel free to join our [Discord](https://discord.gg/p5XbdQV7) or [WeChat groups](https://gw.alicdn.com/imgextra/i2/O1CN01tqjWFi1ByuyehkTSB_!!6000000000015-0-tps-611-1279.jpg)! +This project is based on the original Wan2.1 model. Special thanks to the original authors and contributors for their work. From e2287e5126dcc426be3a557e45d0a7cf75810552 Mon Sep 17 00:00:00 2001 From: Adrian Corduneanu Date: Wed, 26 Feb 2025 02:56:57 -0800 Subject: [PATCH 11/20] Update text2video.py to reduce GPU memory by emptying cache (#44) * Update text2video.py to reduce GPU memory by emptying cache If offload_model is set, empty_cache() must be called after the model is moved to CPU to actually free the GPU. I verified on a RTX 4090 that without calling empty_cache the model remains in memory and the subsequent vae decoding never finishes. * Update text2video.py only one empty_cache needed before vae decode --- wan/text2video.py | 1 + 1 file changed, 1 insertion(+) diff --git a/wan/text2video.py b/wan/text2video.py index aeaa9d4..8aedecc 100644 --- a/wan/text2video.py +++ b/wan/text2video.py @@ -252,6 +252,7 @@ class WanT2V: x0 = latents if offload_model: self.model.cpu() + torch.cuda.empty_cache() if self.rank == 0: videos = self.vae.decode(x0) From 18818168fffee6a94f8f340b2b5740901b2a605a Mon Sep 17 00:00:00 2001 From: cocktailpeanut <121128867+cocktailpeanut@users.noreply.github.com> Date: Wed, 26 Feb 2025 05:57:30 -0500 Subject: [PATCH 12/20] os.path.sep instead of / (#12) --- gradio/i2v_14B_singleGPU.py | 3 ++- gradio/t2i_14B_singleGPU.py | 3 ++- gradio/t2v_1.3B_singleGPU.py | 3 ++- gradio/t2v_14B_singleGPU.py | 3 ++- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/gradio/i2v_14B_singleGPU.py b/gradio/i2v_14B_singleGPU.py index 9a22297..100002b 100644 --- a/gradio/i2v_14B_singleGPU.py +++ b/gradio/i2v_14B_singleGPU.py @@ -2,6 +2,7 @@ import argparse import gc import os.path as osp +import os import sys import warnings @@ -10,7 +11,7 @@ import gradio as gr warnings.filterwarnings('ignore') # Model -sys.path.insert(0, '/'.join(osp.realpath(__file__).split('/')[:-2])) +sys.path.insert(0, os.path.sep.join(osp.realpath(__file__).split(os.path.sep)[:-2])) import wan from wan.configs import MAX_AREA_CONFIGS, WAN_CONFIGS from wan.utils.prompt_extend import DashScopePromptExpander, QwenPromptExpander diff --git a/gradio/t2i_14B_singleGPU.py b/gradio/t2i_14B_singleGPU.py index f81129a..cb42e38 100644 --- a/gradio/t2i_14B_singleGPU.py +++ b/gradio/t2i_14B_singleGPU.py @@ -1,6 +1,7 @@ # Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved. import argparse import os.path as osp +import os import sys import warnings @@ -9,7 +10,7 @@ import gradio as gr warnings.filterwarnings('ignore') # Model -sys.path.insert(0, '/'.join(osp.realpath(__file__).split('/')[:-2])) +sys.path.insert(0, os.path.sep.join(osp.realpath(__file__).split(os.path.sep)[:-2])) import wan from wan.configs import WAN_CONFIGS from wan.utils.prompt_extend import DashScopePromptExpander, QwenPromptExpander diff --git a/gradio/t2v_1.3B_singleGPU.py b/gradio/t2v_1.3B_singleGPU.py index 54706b2..87c414e 100644 --- a/gradio/t2v_1.3B_singleGPU.py +++ b/gradio/t2v_1.3B_singleGPU.py @@ -1,6 +1,7 @@ # Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved. import argparse import os.path as osp +import os import sys import warnings @@ -9,7 +10,7 @@ import gradio as gr warnings.filterwarnings('ignore') # Model -sys.path.insert(0, '/'.join(osp.realpath(__file__).split('/')[:-2])) +sys.path.insert(0, os.path.sep.join(osp.realpath(__file__).split(os.path.sep)[:-2])) import wan from wan.configs import WAN_CONFIGS from wan.utils.prompt_extend import DashScopePromptExpander, QwenPromptExpander diff --git a/gradio/t2v_14B_singleGPU.py b/gradio/t2v_14B_singleGPU.py index b7448ef..a9b7485 100644 --- a/gradio/t2v_14B_singleGPU.py +++ b/gradio/t2v_14B_singleGPU.py @@ -1,6 +1,7 @@ # Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved. import argparse import os.path as osp +import os import sys import warnings @@ -9,7 +10,7 @@ import gradio as gr warnings.filterwarnings('ignore') # Model -sys.path.insert(0, '/'.join(osp.realpath(__file__).split('/')[:-2])) +sys.path.insert(0, os.path.sep.join(osp.realpath(__file__).split(os.path.sep)[:-2])) import wan from wan.configs import WAN_CONFIGS from wan.utils.prompt_extend import DashScopePromptExpander, QwenPromptExpander From 3f0dde1f8157590a673336fa659c22873a5cf950 Mon Sep 17 00:00:00 2001 From: WanX-Video Date: Wed, 26 Feb 2025 20:33:18 +0800 Subject: [PATCH 13/20] update gradio (#58) --- gradio/t2v_1.3B_singleGPU.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gradio/t2v_1.3B_singleGPU.py b/gradio/t2v_1.3B_singleGPU.py index 87c414e..0a752d2 100644 --- a/gradio/t2v_1.3B_singleGPU.py +++ b/gradio/t2v_1.3B_singleGPU.py @@ -46,7 +46,7 @@ def t2v_generation(txt2vid_prompt, resolution, sd_steps, guide_scale, guide_scale=guide_scale, n_prompt=n_prompt, seed=seed, - offload_model=False) + offload_model=True) cache_video( tensor=video[None], From b562f86ec5dd3c538d20c9e90511bce7624b1691 Mon Sep 17 00:00:00 2001 From: Yingda Chen Date: Wed, 26 Feb 2025 22:31:12 +0800 Subject: [PATCH 14/20] add modelscope download cli --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 5d26fde..8c95c58 100644 --- a/README.md +++ b/README.md @@ -79,6 +79,11 @@ pip install "huggingface_hub[cli]" huggingface-cli download Wan-AI/Wan2.1-T2V-14B --local-dir ./Wan2.1-T2V-14B ``` +Download models using modelscope-cli: +``` +pip install modelscope +modelscope download Wan-AI/Wan2.1-T2V-14B --local_dir ./Wan2.1-T2V-14B +``` #### Run Text-to-Video Generation This repository supports two Text-to-Video models (1.3B and 14B) and two resolutions (480P and 720P). The parameters and configurations for these models are as follows: From cf578ab14befcf0d3b571f6b5555883469af8e61 Mon Sep 17 00:00:00 2001 From: Bakhtiyor Sulaymonov Date: Wed, 26 Feb 2025 22:20:28 +0500 Subject: [PATCH 15/20] Adapted model for macOS with M1 Pro chip and other improvements --- .gitignore | 2 + generate.py | 187 +++++++++-------------- requirements.txt | 2 +- wan/configs/shared_config.py | 4 +- wan/distributed/fsdp.py | 2 +- wan/distributed/xdit_context_parallel.py | 4 +- wan/image2video.py | 9 +- wan/modules/attention.py | 31 ++-- wan/modules/model.py | 6 +- wan/modules/t5.py | 6 +- wan/utils/prompt_extend.py | 18 ++- wan/utils/qwen_vl_utils.py | 10 ++ 12 files changed, 136 insertions(+), 145 deletions(-) diff --git a/.gitignore b/.gitignore index 1f975d8..358fda4 100644 --- a/.gitignore +++ b/.gitignore @@ -34,3 +34,5 @@ Wan2.1-T2V-14B/ Wan2.1-T2V-1.3B/ Wan2.1-I2V-14B-480P/ Wan2.1-I2V-14B-720P/ +venv_wan/ +venv_wan_py310/ diff --git a/generate.py b/generate.py index f27bb98..5d57d2c 100644 --- a/generate.py +++ b/generate.py @@ -186,6 +186,11 @@ def _parse_args(): type=float, default=5.0, help="Classifier free guidance scale.") + parser.add_argument( + "--device", + type=str, + default=None, + help="Device to use for computation (mps, cpu).") args = parser.parse_args() @@ -207,43 +212,21 @@ def _init_logging(rank): def generate(args): - rank = int(os.getenv("RANK", 0)) - world_size = int(os.getenv("WORLD_SIZE", 1)) - local_rank = int(os.getenv("LOCAL_RANK", 0)) - device = local_rank - _init_logging(rank) - + # Set device based on args or availability + if args.device: + device = torch.device(args.device) + else: + device = torch.device("mps" if torch.backends.mps.is_available() else "cpu") + + _init_logging(0) # Use rank 0 logging for single-device + + # Ensure all torch operations use this device + torch.set_default_device(device) + if args.offload_model is None: - args.offload_model = False if world_size > 1 else True + args.offload_model = True # Default to True for single device to save memory logging.info( f"offload_model is not specified, set to {args.offload_model}.") - if world_size > 1: - torch.cuda.set_device(local_rank) - dist.init_process_group( - backend="nccl", - init_method="env://", - rank=rank, - world_size=world_size) - else: - assert not ( - args.t5_fsdp or args.dit_fsdp - ), f"t5_fsdp and dit_fsdp are not supported in non-distributed environments." - assert not ( - args.ulysses_size > 1 or args.ring_size > 1 - ), f"context parallel are not supported in non-distributed environments." - - if args.ulysses_size > 1 or args.ring_size > 1: - assert args.ulysses_size * args.ring_size == world_size, f"The number of ulysses_size and ring_size should be equal to the world size." - from xfuser.core.distributed import (initialize_model_parallel, - init_distributed_environment) - init_distributed_environment( - rank=dist.get_rank(), world_size=dist.get_world_size()) - - initialize_model_parallel( - sequence_parallel_degree=dist.get_world_size(), - ring_degree=args.ring_size, - ulysses_degree=args.ulysses_size, - ) if args.use_prompt_extend: if args.prompt_extend_method == "dashscope": @@ -253,58 +236,44 @@ def generate(args): prompt_expander = QwenPromptExpander( model_name=args.prompt_extend_model, is_vl="i2v" in args.task, - device=rank) + device=device) # Use MPS/CPU device instead of rank else: raise NotImplementedError( f"Unsupport prompt_extend_method: {args.prompt_extend_method}") cfg = WAN_CONFIGS[args.task] - if args.ulysses_size > 1: - assert cfg.num_heads % args.ulysses_size == 0, f"`num_heads` must be divisible by `ulysses_size`." - logging.info(f"Generation job args: {args}") logging.info(f"Generation model config: {cfg}") - if dist.is_initialized(): - base_seed = [args.base_seed] if rank == 0 else [None] - dist.broadcast_object_list(base_seed, src=0) - args.base_seed = base_seed[0] - if "t2v" in args.task or "t2i" in args.task: if args.prompt is None: args.prompt = EXAMPLE_PROMPT[args.task]["prompt"] logging.info(f"Input prompt: {args.prompt}") if args.use_prompt_extend: logging.info("Extending prompt ...") - if rank == 0: - prompt_output = prompt_expander( - args.prompt, - tar_lang=args.prompt_extend_target_lang, - seed=args.base_seed) - if prompt_output.status == False: - logging.info( - f"Extending prompt failed: {prompt_output.message}") - logging.info("Falling back to original prompt.") - input_prompt = args.prompt - else: - input_prompt = prompt_output.prompt - input_prompt = [input_prompt] + prompt_output = prompt_expander( + args.prompt, + tar_lang=args.prompt_extend_target_lang, + seed=args.base_seed) + if prompt_output.status == False: + logging.info( + f"Extending prompt failed: {prompt_output.message}") + logging.info("Falling back to original prompt.") + input_prompt = args.prompt else: - input_prompt = [None] - if dist.is_initialized(): - dist.broadcast_object_list(input_prompt, src=0) - args.prompt = input_prompt[0] + input_prompt = prompt_output.prompt + args.prompt = input_prompt logging.info(f"Extended prompt: {args.prompt}") logging.info("Creating WanT2V pipeline.") wan_t2v = wan.WanT2V( config=cfg, checkpoint_dir=args.ckpt_dir, - device_id=device, - rank=rank, - t5_fsdp=args.t5_fsdp, - dit_fsdp=args.dit_fsdp, - use_usp=(args.ulysses_size > 1 or args.ring_size > 1), + device_id=device, # Use MPS/CPU device instead of local_rank + rank=0, # Single device, so use rank 0 + t5_fsdp=False, # Disable FSDP (not supported on MPS) + dit_fsdp=False, # Disable FSDP (not supported on MPS) + use_usp=False, # Disable Ulysses/ring parallelism (single device) t5_cpu=args.t5_cpu, ) @@ -332,36 +301,30 @@ def generate(args): img = Image.open(args.image).convert("RGB") if args.use_prompt_extend: logging.info("Extending prompt ...") - if rank == 0: - prompt_output = prompt_expander( - args.prompt, - tar_lang=args.prompt_extend_target_lang, - image=img, - seed=args.base_seed) - if prompt_output.status == False: - logging.info( - f"Extending prompt failed: {prompt_output.message}") - logging.info("Falling back to original prompt.") - input_prompt = args.prompt - else: - input_prompt = prompt_output.prompt - input_prompt = [input_prompt] + prompt_output = prompt_expander( + args.prompt, + tar_lang=args.prompt_extend_target_lang, + image=img, + seed=args.base_seed) + if prompt_output.status == False: + logging.info( + f"Extending prompt failed: {prompt_output.message}") + logging.info("Falling back to original prompt.") + input_prompt = args.prompt else: - input_prompt = [None] - if dist.is_initialized(): - dist.broadcast_object_list(input_prompt, src=0) - args.prompt = input_prompt[0] + input_prompt = prompt_output.prompt + args.prompt = input_prompt logging.info(f"Extended prompt: {args.prompt}") logging.info("Creating WanI2V pipeline.") wan_i2v = wan.WanI2V( config=cfg, checkpoint_dir=args.ckpt_dir, - device_id=device, - rank=rank, - t5_fsdp=args.t5_fsdp, - dit_fsdp=args.dit_fsdp, - use_usp=(args.ulysses_size > 1 or args.ring_size > 1), + device_id=device, # Use MPS/CPU device instead of local_rank + rank=0, # Single device, so use rank 0 + t5_fsdp=False, # Disable FSDP (not supported on MPS) + dit_fsdp=False, # Disable FSDP (not supported on MPS) + use_usp=False, # Disable Ulysses/ring parallelism (single device) t5_cpu=args.t5_cpu, ) @@ -378,34 +341,32 @@ def generate(args): seed=args.base_seed, offload_model=args.offload_model) - if rank == 0: - if args.save_file is None: - formatted_time = datetime.now().strftime("%Y%m%d_%H%M%S") - formatted_prompt = args.prompt.replace(" ", "_").replace("/", - "_")[:50] - suffix = '.png' if "t2i" in args.task else '.mp4' - args.save_file = f"{args.task}_{args.size}_{args.ulysses_size}_{args.ring_size}_{formatted_prompt}_{formatted_time}" + suffix + # Save output (single device, so no rank check needed) + if args.save_file is None: + formatted_time = datetime.now().strftime("%Y%m%d_%H%M%S") + formatted_prompt = args.prompt.replace(" ", "_").replace("/", "_")[:50] + suffix = '.png' if "t2i" in args.task else '.mp4' + args.save_file = f"{args.task}_{args.size}_{formatted_prompt}_{formatted_time}" + suffix - if "t2i" in args.task: - logging.info(f"Saving generated image to {args.save_file}") - cache_image( - tensor=video.squeeze(1)[None], - save_file=args.save_file, - nrow=1, - normalize=True, - value_range=(-1, 1)) - else: - logging.info(f"Saving generated video to {args.save_file}") - cache_video( - tensor=video[None], - save_file=args.save_file, - fps=cfg.sample_fps, - nrow=1, - normalize=True, - value_range=(-1, 1)) + if "t2i" in args.task: + logging.info(f"Saving generated image to {args.save_file}") + cache_image( + tensor=video.squeeze(1)[None], + save_file=args.save_file, + nrow=1, + normalize=True, + value_range=(-1, 1)) + else: + logging.info(f"Saving generated video to {args.save_file}") + cache_video( + tensor=video[None], + save_file=args.save_file, + fps=cfg.sample_fps, + nrow=1, + normalize=True, + value_range=(-1, 1)) logging.info("Finished.") - if __name__ == "__main__": args = _parse_args() generate(args) diff --git a/requirements.txt b/requirements.txt index d416e7b..e40a45d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,6 +11,6 @@ easydict ftfy dashscope imageio-ffmpeg -flash_attn +# flash_attn gradio>=5.0.0 numpy>=1.23.5,<2 diff --git a/wan/configs/shared_config.py b/wan/configs/shared_config.py index 04a9f45..62e634d 100644 --- a/wan/configs/shared_config.py +++ b/wan/configs/shared_config.py @@ -7,11 +7,11 @@ wan_shared_cfg = EasyDict() # t5 wan_shared_cfg.t5_model = 'umt5_xxl' -wan_shared_cfg.t5_dtype = torch.bfloat16 +wan_shared_cfg.t5_dtype = torch.float32 wan_shared_cfg.text_len = 512 # transformer -wan_shared_cfg.param_dtype = torch.bfloat16 +wan_shared_cfg.param_dtype = torch.float32 # inference wan_shared_cfg.num_train_timesteps = 1000 diff --git a/wan/distributed/fsdp.py b/wan/distributed/fsdp.py index 258d4af..ccbf21f 100644 --- a/wan/distributed/fsdp.py +++ b/wan/distributed/fsdp.py @@ -10,7 +10,7 @@ from torch.distributed.fsdp.wrap import lambda_auto_wrap_policy def shard_model( model, device_id, - param_dtype=torch.bfloat16, + param_dtype=torch.float32, reduce_dtype=torch.float32, buffer_dtype=torch.float32, process_group=None, diff --git a/wan/distributed/xdit_context_parallel.py b/wan/distributed/xdit_context_parallel.py index 01936ce..29dd50f 100644 --- a/wan/distributed/xdit_context_parallel.py +++ b/wan/distributed/xdit_context_parallel.py @@ -151,9 +151,9 @@ def usp_attn_forward(self, seq_lens, grid_sizes, freqs, - dtype=torch.bfloat16): + dtype=torch.float32): b, s, n, d = *x.shape[:2], self.num_heads, self.head_dim - half_dtypes = (torch.float16, torch.bfloat16) + half_dtypes = (torch.float16, torch.float32) def half(x): return x if x.dtype in half_dtypes else x.to(dtype) diff --git a/wan/image2video.py b/wan/image2video.py index 468f17c..55d3f57 100644 --- a/wan/image2video.py +++ b/wan/image2video.py @@ -63,7 +63,14 @@ class WanI2V: init_on_cpu (`bool`, *optional*, defaults to True): Enable initializing Transformer Model on CPU. Only works without FSDP or USP. """ - self.device = torch.device(f"cuda:{device_id}") + # Check if device_id is a torch.device instance + if isinstance(device_id, torch.device): + self.device = device_id + elif device_id == "mps" or (isinstance(device_id, int) and device_id == -1): + self.device = torch.device("mps" if torch.backends.mps.is_available() else "cpu") + else: + self.device = torch.device(f"cuda:{device_id}") + self.config = config self.rank = rank self.use_usp = use_usp diff --git a/wan/modules/attention.py b/wan/modules/attention.py index 4dbbe03..6dcc54a 100644 --- a/wan/modules/attention.py +++ b/wan/modules/attention.py @@ -33,25 +33,26 @@ def flash_attention( causal=False, window_size=(-1, -1), deterministic=False, - dtype=torch.bfloat16, + dtype=torch.float32, version=None, ): """ - q: [B, Lq, Nq, C1]. - k: [B, Lk, Nk, C1]. - v: [B, Lk, Nk, C2]. Nq must be divisible by Nk. - q_lens: [B]. - k_lens: [B]. - dropout_p: float. Dropout probability. - softmax_scale: float. The scaling of QK^T before applying softmax. - causal: bool. Whether to apply causal attention mask. - window_size: (left right). If not (-1, -1), apply sliding window local attention. - deterministic: bool. If True, slightly slower and uses more memory. - dtype: torch.dtype. Apply when dtype of q/k/v is not float16/bfloat16. + Flash attention implementation with fallback for CPU and MPS devices """ - half_dtypes = (torch.float16, torch.bfloat16) + half_dtypes = (torch.float16, torch.float32) assert dtype in half_dtypes - assert q.device.type == 'cuda' and q.size(-1) <= 256 + assert q.size(-1) <= 256, "Sequence length exceeds the maximum limit." + + # Add CPU/MPS fallback implementation + if not (FLASH_ATTN_2_AVAILABLE or FLASH_ATTN_3_AVAILABLE) or q.device.type in ['cpu', 'mps']: + # Implement standard attention for CPU/MPS + return attention(q, k, v, + q_lens=q_lens, + k_lens=k_lens, + dropout_p=dropout_p, + softmax_scale=softmax_scale, + causal=causal, + window_size=window_size) # params b, lq, lk, out_dtype = q.size(0), q.size(1), k.size(1), q.dtype @@ -142,7 +143,7 @@ def attention( causal=False, window_size=(-1, -1), deterministic=False, - dtype=torch.bfloat16, + dtype=torch.float32, fa_version=None, ): if FLASH_ATTN_2_AVAILABLE or FLASH_ATTN_3_AVAILABLE: diff --git a/wan/modules/model.py b/wan/modules/model.py index b65021c..7144055 100644 --- a/wan/modules/model.py +++ b/wan/modules/model.py @@ -16,7 +16,7 @@ def sinusoidal_embedding_1d(dim, position): # preprocess assert dim % 2 == 0 half = dim // 2 - position = position.type(torch.float64) + position = position.type(torch.float32) # calculation sinusoid = torch.outer( @@ -31,7 +31,7 @@ def rope_params(max_seq_len, dim, theta=10000): freqs = torch.outer( torch.arange(max_seq_len), 1.0 / torch.pow(theta, - torch.arange(0, dim, 2).to(torch.float64).div(dim))) + torch.arange(0, dim, 2).to(torch.float32).div(dim))) freqs = torch.polar(torch.ones_like(freqs), freqs) return freqs @@ -49,7 +49,7 @@ def rope_apply(x, grid_sizes, freqs): seq_len = f * h * w # precompute multipliers - x_i = torch.view_as_complex(x[i, :seq_len].to(torch.float64).reshape( + x_i = torch.view_as_complex(x[i, :seq_len].to(torch.float32).reshape( seq_len, n, -1, 2)) freqs_i = torch.cat([ freqs[0][:f].view(f, 1, 1, -1).expand(f, h, w, -1), diff --git a/wan/modules/t5.py b/wan/modules/t5.py index c841b04..3a26ed1 100644 --- a/wan/modules/t5.py +++ b/wan/modules/t5.py @@ -61,7 +61,7 @@ class T5LayerNorm(nn.Module): def forward(self, x): x = x * torch.rsqrt(x.float().pow(2).mean(dim=-1, keepdim=True) + self.eps) - if self.weight.dtype in [torch.float16, torch.bfloat16]: + if self.weight.dtype in [torch.float16, torch.float32]: x = x.type_as(self.weight) return self.weight * x @@ -474,8 +474,8 @@ class T5EncoderModel: def __init__( self, text_len, - dtype=torch.bfloat16, - device=torch.cuda.current_device(), + dtype=torch.float32, + device='mps' if torch.backends.mps.is_available() else 'cpu', checkpoint_path=None, tokenizer_path=None, shard_fn=None, diff --git a/wan/utils/prompt_extend.py b/wan/utils/prompt_extend.py index e7a21b5..00f4f20 100644 --- a/wan/utils/prompt_extend.py +++ b/wan/utils/prompt_extend.py @@ -44,7 +44,7 @@ LM_EN_SYS_PROMPT = \ '''1. For overly concise user inputs, reasonably infer and add details to make the video more complete and appealing without altering the original intent;\n''' \ '''2. Enhance the main features in user descriptions (e.g., appearance, expression, quantity, race, posture, etc.), visual style, spatial relationships, and shot scales;\n''' \ '''3. Output the entire prompt in English, retaining original text in quotes and titles, and preserving key input information;\n''' \ - '''4. Prompts should match the user’s intent and accurately reflect the specified style. If the user does not specify a style, choose the most appropriate style for the video;\n''' \ + '''4. Prompts should match the user's intent and accurately reflect the specified style. If the user does not specify a style, choose the most appropriate style for the video;\n''' \ '''5. Emphasize motion information and different camera movements present in the input description;\n''' \ '''6. Your output should have natural motion attributes. For the target category described, add natural actions of the target using simple and direct verbs;\n''' \ '''7. The revised prompt should be around 80-100 characters long.\n''' \ @@ -82,7 +82,7 @@ VL_EN_SYS_PROMPT = \ '''1. For overly brief user inputs, reasonably infer and supplement details without changing the original meaning, making the image more complete and visually appealing;\n''' \ '''2. Improve the characteristics of the main subject in the user's description (such as appearance, expression, quantity, ethnicity, posture, etc.), rendering style, spatial relationships, and camera angles;\n''' \ '''3. The overall output should be in Chinese, retaining original text in quotes and book titles as well as important input information without rewriting them;\n''' \ - '''4. The prompt should match the user’s intent and provide a precise and detailed style description. If the user has not specified a style, you need to carefully analyze the style of the user's provided photo and use that as a reference for rewriting;\n''' \ + '''4. The prompt should match the user's intent and provide a precise and detailed style description. If the user has not specified a style, you need to carefully analyze the style of the user's provided photo and use that as a reference for rewriting;\n''' \ '''5. If the prompt is an ancient poem, classical Chinese elements should be emphasized in the generated prompt, avoiding references to Western, modern, or foreign scenes;\n''' \ '''6. You need to emphasize movement information in the input and different camera angles;\n''' \ '''7. Your output should convey natural movement attributes, incorporating natural actions related to the described subject category, using simple and direct verbs as much as possible;\n''' \ @@ -93,7 +93,7 @@ VL_EN_SYS_PROMPT = \ '''1. A Japanese fresh film-style photo of a young East Asian girl with double braids sitting by the boat. The girl wears a white square collar puff sleeve dress, decorated with pleats and buttons. She has fair skin, delicate features, and slightly melancholic eyes, staring directly at the camera. Her hair falls naturally, with bangs covering part of her forehead. She rests her hands on the boat, appearing natural and relaxed. The background features a blurred outdoor scene, with hints of blue sky, mountains, and some dry plants. The photo has a vintage film texture. A medium shot of a seated portrait.\n''' \ '''2. An anime illustration in vibrant thick painting style of a white girl with cat ears holding a folder, showing a slightly dissatisfied expression. She has long dark purple hair and red eyes, wearing a dark gray skirt and a light gray top with a white waist tie and a name tag in bold Chinese characters that says "紫阳" (Ziyang). The background has a light yellow indoor tone, with faint outlines of some furniture visible. A pink halo hovers above her head, in a smooth Japanese cel-shading style. A close-up shot from a slightly elevated perspective.\n''' \ '''3. CG game concept digital art featuring a huge crocodile with its mouth wide open, with trees and thorns growing on its back. The crocodile's skin is rough and grayish-white, resembling stone or wood texture. Its back is lush with trees, shrubs, and thorny protrusions. With its mouth agape, the crocodile reveals a pink tongue and sharp teeth. The background features a dusk sky with some distant trees, giving the overall scene a dark and cold atmosphere. A close-up from a low angle.\n''' \ - '''4. In the style of an American drama promotional poster, Walter White sits in a metal folding chair wearing a yellow protective suit, with the words "Breaking Bad" written in sans-serif English above him, surrounded by piles of dollar bills and blue plastic storage boxes. He wears glasses, staring forward, dressed in a yellow jumpsuit, with his hands resting on his knees, exuding a calm and confident demeanor. The background shows an abandoned, dim factory with light filtering through the windows. There’s a noticeable grainy texture. A medium shot with a straight-on close-up of the character.\n''' \ + '''4. In the style of an American drama promotional poster, Walter White sits in a metal folding chair wearing a yellow protective suit, with the words "Breaking Bad" written in sans-serif English above him, surrounded by piles of dollar bills and blue plastic storage boxes. He wears glasses, staring forward, dressed in a yellow jumpsuit, with his hands resting on his knees, exuding a calm and confident demeanor. The background shows an abandoned, dim factory with light filtering through the windows. There's a noticeable grainy texture. A medium shot with a straight-on close-up of the character.\n''' \ '''Directly output the rewritten English text.''' @@ -347,7 +347,7 @@ class QwenPromptExpander(PromptExpander): use_fast=True) self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( self.model_name, - torch_dtype=torch.bfloat16 if FLASH_VER == 2 else + torch_dtype=torch.float32 if FLASH_VER == 2 else torch.float16 if "AWQ" in self.model_name else "auto", attn_implementation="flash_attention_2" if FLASH_VER == 2 else None, @@ -363,6 +363,16 @@ class QwenPromptExpander(PromptExpander): device_map="cpu") self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) + # Initialize device + if isinstance(device, torch.device): + self.device = device + elif device == "mps" or (isinstance(device, str) and "mps" in device): + self.device = torch.device("mps" if torch.backends.mps.is_available() else "cpu") + elif isinstance(device, int) and device == -1: + self.device = torch.device("cpu") + else: + self.device = torch.device(f"cuda:{device}" if isinstance(device, int) else device) + def extend(self, prompt, system_prompt, seed=-1, *args, **kwargs): self.model = self.model.to(self.device) messages = [{ diff --git a/wan/utils/qwen_vl_utils.py b/wan/utils/qwen_vl_utils.py index 3c682e6..c8194c2 100644 --- a/wan/utils/qwen_vl_utils.py +++ b/wan/utils/qwen_vl_utils.py @@ -274,6 +274,12 @@ def get_video_reader_backend() -> str: def fetch_video( ele: dict, image_factor: int = IMAGE_FACTOR) -> torch.Tensor | list[Image.Image]: + # Handle MPS device compatibility + original_device = None + if isinstance(ele.get("video"), torch.Tensor) and ele["video"].device.type == "mps": + original_device = ele["video"].device + ele["video"] = ele["video"].cpu() + if isinstance(ele["video"], str): video_reader_backend = get_video_reader_backend() video = VIDEO_READER_BACKENDS[video_reader_backend](ele) @@ -324,6 +330,10 @@ def fetch_video( images.extend([images[-1]] * (nframes - len(images))) return images + # Return to original device if needed + if original_device is not None and isinstance(video, torch.Tensor): + video = video.to(original_device) + def extract_vision_info( conversations: list[dict] | list[list[dict]]) -> list[dict]: From 60ecbf43808e17ca0ded311bd0d93d9f1af685bd Mon Sep 17 00:00:00 2001 From: Bakhtiyor Sulaymonov Date: Wed, 26 Feb 2025 22:27:34 +0500 Subject: [PATCH 16/20] Update README with macOS setup and usage instructions --- README.md | 424 ++++++------------------------------------------------ 1 file changed, 41 insertions(+), 383 deletions(-) diff --git a/README.md b/README.md index 8c95c58..155850e 100644 --- a/README.md +++ b/README.md @@ -1,402 +1,60 @@ -# Wan2.1 +# Wan2.1 Text-to-Video Model -

- -

+This repository contains the Wan2.1 text-to-video model, adapted for macOS with M1 Pro chip. This adaptation allows macOS users to run the model efficiently, overcoming CUDA-specific limitations. -

- 💜 Wan    |    🖥️ GitHub    |   🤗 Hugging Face   |   🤖 ModelScope   |    📑 Paper (Coming soon)    |    📑 Blog    |   💬 WeChat Group   |    📖 Discord   -
+## Introduction ------ +The Wan2.1 model is an open-source text-to-video generation model. It transforms textual descriptions into video sequences, leveraging advanced machine learning techniques. -[**Wan: Open and Advanced Large-Scale Video Generative Models**]("") +## Changes for macOS -In this repository, we present **Wan2.1**, a comprehensive and open suite of video foundation models that pushes the boundaries of video generation. **Wan2.1** offers these key features: -- 👍 **SOTA Performance**: **Wan2.1** consistently outperforms existing open-source models and state-of-the-art commercial solutions across multiple benchmarks. -- 👍 **Supports Consumer-grade GPUs**: The T2V-1.3B model requires only 8.19 GB VRAM, making it compatible with almost all consumer-grade GPUs. It can generate a 5-second 480P video on an RTX 4090 in about 4 minutes (without optimization techniques like quantization). Its performance is even comparable to some closed-source models. -- 👍 **Multiple Tasks**: **Wan2.1** excels in Text-to-Video, Image-to-Video, Video Editing, Text-to-Image, and Video-to-Audio, advancing the field of video generation. -- 👍 **Visual Text Generation**: **Wan2.1** is the first video model capable of generating both Chinese and English text, featuring robust text generation that enhances its practical applications. -- 👍 **Powerful Video VAE**: **Wan-VAE** delivers exceptional efficiency and performance, encoding and decoding 1080P videos of any length while preserving temporal information, making it an ideal foundation for video and image generation. +This version includes modifications to make the model compatible with macOS, specifically for systems using the M1 Pro chip. Key changes include: -## Video Demos +- Adaptation of CUDA-specific code to work with MPS (Metal Performance Shaders) on macOS. +- Environment variable settings for MPS fallback to CPU for unsupported operations. +- Adjustments to command-line arguments for better compatibility with macOS. -

- -
+## Installation Instructions -## 🔥 Latest News!! +Follow these steps to set up the environment on macOS: -* Feb 25, 2025: 👋 We've released the inference code and weights of Wan2.1. +1. **Install Homebrew**: If not already installed, use Homebrew to manage packages. + ```bash + /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" + ``` +2. **Install Python 3.10+**: + ```bash + brew install python@3.10 + ``` -## 📑 Todo List -- Wan2.1 Text-to-Video - - [x] Multi-GPU Inference code of the 14B and 1.3B models - - [x] Checkpoints of the 14B and 1.3B models - - [x] Gradio demo - - [ ] Diffusers integration - - [ ] ComfyUI integration -- Wan2.1 Image-to-Video - - [x] Multi-GPU Inference code of the 14B model - - [x] Checkpoints of the 14B model - - [x] Gradio demo - - [ ] Diffusers integration - - [ ] ComfyUI integration +3. **Create and Activate a Virtual Environment**: + ```bash + python3.10 -m venv venv_wan + source venv_wan/bin/activate + ``` +4. **Install Dependencies**: + ```bash + pip install -r requirements.txt + pip install einops + ``` -## Quickstart +## Usage -#### Installation -Clone the repo: -``` -git clone https://github.com/Wan-Video/Wan2.1.git -cd Wan2.1 +To generate a video, use the following command: + +```bash +export PYTORCH_ENABLE_MPS_FALLBACK=1 +python generate.py --task t2v-1.3B --size "480*832" --frame_num 16 --sample_steps 25 --ckpt_dir ./Wan2.1-T2V-1.3B --offload_model True --t5_cpu --device mps --prompt "Lion running under snow in Samarkand" --save_file output_video.mp4 ``` -Install dependencies: -``` -# Ensure torch >= 2.4.0 -pip install -r requirements.txt -``` +## Optimization Tips +- **Use CPU for Large Models**: If you encounter memory issues, use `--device cpu`. +- **Reduce Resolution and Frame Count**: Use smaller resolutions and fewer frames to reduce memory usage. +- **Monitor System Resources**: Keep an eye on memory usage and adjust parameters as needed. -#### Model Download +## Acknowledgments -| Models | Download Link | Notes | -| --------------|-------------------------------------------------------------------------------|-------------------------------| -| T2V-14B | 🤗 [Huggingface](https://huggingface.co/Wan-AI/Wan2.1-T2V-14B) 🤖 [ModelScope](https://www.modelscope.cn/models/Wan-AI/Wan2.1-T2V-14B) | Supports both 480P and 720P -| I2V-14B-720P | 🤗 [Huggingface](https://huggingface.co/Wan-AI/Wan2.1-I2V-14B-720P) 🤖 [ModelScope](https://www.modelscope.cn/models/Wan-AI/Wan2.1-I2V-14B-720P) | Supports 720P -| I2V-14B-480P | 🤗 [Huggingface](https://huggingface.co/Wan-AI/Wan2.1-I2V-14B-480P) 🤖 [ModelScope](https://www.modelscope.cn/models/Wan-AI/Wan2.1-I2V-14B-480P) | Supports 480P -| T2V-1.3B | 🤗 [Huggingface](https://huggingface.co/Wan-AI/Wan2.1-T2V-1.3B) 🤖 [ModelScope](https://www.modelscope.cn/models/Wan-AI/Wan2.1-T2V-1.3B) | Supports 480P - -> 💡Note: The 1.3B model is capable of generating videos at 720P resolution. However, due to limited training at this resolution, the results are generally less stable compared to 480P. For optimal performance, we recommend using 480P resolution. - - -Download models using huggingface-cli: -``` -pip install "huggingface_hub[cli]" -huggingface-cli download Wan-AI/Wan2.1-T2V-14B --local-dir ./Wan2.1-T2V-14B -``` - -Download models using modelscope-cli: -``` -pip install modelscope -modelscope download Wan-AI/Wan2.1-T2V-14B --local_dir ./Wan2.1-T2V-14B -``` -#### Run Text-to-Video Generation - -This repository supports two Text-to-Video models (1.3B and 14B) and two resolutions (480P and 720P). The parameters and configurations for these models are as follows: - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TaskResolutionModel
480P720P
t2v-14B✔️✔️Wan2.1-T2V-14B
t2v-1.3B✔️Wan2.1-T2V-1.3B
- - -##### (1) Without Prompt Extention - -To facilitate implementation, we will start with a basic version of the inference process that skips the [prompt extension](#2-using-prompt-extention) step. - -- Single-GPU inference - -``` -python generate.py --task t2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-T2V-14B --prompt "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage." -``` - -If you encounter OOM (Out-of-Memory) issues, you can use the `--offload_model True` and `--t5_cpu` options to reduce GPU memory usage. For example, on an RTX 4090 GPU: - -``` -python generate.py --task t2v-1.3B --size 832*480 --ckpt_dir ./Wan2.1-T2V-1.3B --offload_model True --t5_cpu --sample_shift 8 --sample_guide_scale 6 --prompt "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage." -``` - -> 💡Note: If you are using the `T2V-1.3B` model, we recommend setting the parameter `--sample_guide_scale 6`. The `--sample_shift parameter` can be adjusted within the range of 8 to 12 based on the performance. - - -- Multi-GPU inference using FSDP + xDiT USP - -``` -pip install "xfuser>=0.4.1" -torchrun --nproc_per_node=8 generate.py --task t2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-T2V-14B --dit_fsdp --t5_fsdp --ulysses_size 8 --prompt "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage." -``` - - -##### (2) Using Prompt Extention - -Extending the prompts can effectively enrich the details in the generated videos, further enhancing the video quality. Therefore, we recommend enabling prompt extension. We provide the following two methods for prompt extension: - -- Use the Dashscope API for extension. - - Apply for a `dashscope.api_key` in advance ([EN](https://www.alibabacloud.com/help/en/model-studio/getting-started/first-api-call-to-qwen) | [CN](https://help.aliyun.com/zh/model-studio/getting-started/first-api-call-to-qwen)). - - Configure the environment variable `DASH_API_KEY` to specify the Dashscope API key. For users of Alibaba Cloud's international site, you also need to set the environment variable `DASH_API_URL` to 'https://dashscope-intl.aliyuncs.com/api/v1'. For more detailed instructions, please refer to the [dashscope document](https://www.alibabacloud.com/help/en/model-studio/developer-reference/use-qwen-by-calling-api?spm=a2c63.p38356.0.i1). - - Use the `qwen-plus` model for text-to-video tasks and `qwen-vl-max` for image-to-video tasks. - - You can modify the model used for extension with the parameter `--prompt_extend_model`. For example: -``` -DASH_API_KEY=your_key python generate.py --task t2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-T2V-14B --prompt "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage" --use_prompt_extend --prompt_extend_method 'dashscope' --prompt_extend_target_lang 'ch' -``` - -- Using a local model for extension. - - - By default, the Qwen model on HuggingFace is used for this extension. Users can choose Qwen models or other models based on the available GPU memory size. - - For text-to-video tasks, you can use models like `Qwen/Qwen2.5-14B-Instruct`, `Qwen/Qwen2.5-7B-Instruct` and `Qwen/Qwen2.5-3B-Instruct`. - - For image-to-video tasks, you can use models like `Qwen/Qwen2.5-VL-7B-Instruct` and `Qwen/Qwen2.5-VL-3B-Instruct`. - - Larger models generally provide better extension results but require more GPU memory. - - You can modify the model used for extension with the parameter `--prompt_extend_model` , allowing you to specify either a local model path or a Hugging Face model. For example: - -``` -python generate.py --task t2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-T2V-14B --prompt "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage" --use_prompt_extend --prompt_extend_method 'local_qwen' --prompt_extend_target_lang 'ch' -``` - -##### (3) Runing local gradio - -``` -cd gradio -# if one uses dashscope’s API for prompt extension -DASH_API_KEY=your_key python t2v_14B_singleGPU.py --prompt_extend_method 'dashscope' --ckpt_dir ./Wan2.1-T2V-14B - -# if one uses a local model for prompt extension -python t2v_14B_singleGPU.py --prompt_extend_method 'local_qwen' --ckpt_dir ./Wan2.1-T2V-14B -``` - - -#### Run Image-to-Video Generation - -Similar to Text-to-Video, Image-to-Video is also divided into processes with and without the prompt extension step. The specific parameters and their corresponding settings are as follows: - - - - - - - - - - - - - - - - - - - - - - - - - - -
TaskResolutionModel
480P720P
i2v-14B✔️Wan2.1-I2V-14B-720P
i2v-14B✔️Wan2.1-T2V-14B-480P
- - -##### (1) Without Prompt Extention - -- Single-GPU inference -``` -python generate.py --task i2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-I2V-14B-720P --image examples/i2v_input.JPG --prompt "Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside." -``` - -> 💡For the Image-to-Video task, the `size` parameter represents the area of the generated video, with the aspect ratio following that of the original input image. - - -- Multi-GPU inference using FSDP + xDiT USP - -``` -pip install "xfuser>=0.4.1" -torchrun --nproc_per_node=8 generate.py --task i2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-I2V-14B-720P --image examples/i2v_input.JPG --dit_fsdp --t5_fsdp --ulysses_size 8 --prompt "Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside." -``` - -##### (2) Using Prompt Extention - - -The process of prompt extension can be referenced [here](#2-using-prompt-extention). - -Run with local prompt extention using `Qwen/Qwen2.5-VL-7B-Instruct`: -``` -python generate.py --task i2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-I2V-14B-720P --image examples/i2v_input.JPG --use_prompt_extend --prompt_extend_model Qwen/Qwen2.5-VL-7B-Instruct --prompt "Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside." -``` - -Run with remote prompt extention using `dashscope`: -``` -DASH_API_KEY=your_key python generate.py --task i2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-I2V-14B-720P --image examples/i2v_input.JPG --use_prompt_extend --prompt_extend_method 'dashscope' --prompt "Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside." -``` - -##### (3) Runing local gradio - -``` -cd gradio -# if one only uses 480P model in gradio -DASH_API_KEY=your_key python i2v_14B_singleGPU.py --prompt_extend_method 'dashscope' --ckpt_dir_480p ./Wan2.1-I2V-14B-480P - -# if one only uses 720P model in gradio -DASH_API_KEY=your_key python i2v_14B_singleGPU.py --prompt_extend_method 'dashscope' --ckpt_dir_720p ./Wan2.1-I2V-14B-720P - -# if one uses both 480P and 720P models in gradio -DASH_API_KEY=your_key python i2v_14B_singleGPU.py --prompt_extend_method 'dashscope' --ckpt_dir_480p ./Wan2.1-I2V-14B-480P --ckpt_dir_720p ./Wan2.1-I2V-14B-720P -``` - - -#### Run Text-to-Image Generation - -Wan2.1 is a unified model for both image and video generation. Since it was trained on both types of data, it can also generate images. The command for generating images is similar to video generation, as follows: - -##### (1) Without Prompt Extention - -- Single-GPU inference -``` -python generate.py --task t2i-14B --size 1024*1024 --ckpt_dir ./Wan2.1-T2V-14B --prompt '一个朴素端庄的美人' -``` - -- Multi-GPU inference using FSDP + xDiT USP - -``` -torchrun --nproc_per_node=8 generate.py --dit_fsdp --t5_fsdp --ulysses_size 8 --base_seed 0 --frame_num 1 --task t2i-14B --size 1024*1024 --prompt '一个朴素端庄的美人' --ckpt_dir ./Wan2.1-T2V-14B -``` - -##### (2) With Prompt Extention - -- Single-GPU inference -``` -python generate.py --task t2i-14B --size 1024*1024 --ckpt_dir ./Wan2.1-T2V-14B --prompt '一个朴素端庄的美人' --use_prompt_extend -``` - -- Multi-GPU inference using FSDP + xDiT USP -``` -torchrun --nproc_per_node=8 generate.py --dit_fsdp --t5_fsdp --ulysses_size 8 --base_seed 0 --frame_num 1 --task t2i-14B --size 1024*1024 --ckpt_dir ./Wan2.1-T2V-14B --prompt '一个朴素端庄的美人' --use_prompt_extend -``` - - -## Manual Evaluation - -##### (1) Text-to-Video Evaluation - -Through manual evaluation, the results generated after prompt extension are superior to those from both closed-source and open-source models. - -
- -
- - -##### (2) Image-to-Video Evaluation - -We also conducted extensive manual evaluations to evaluate the performance of the Image-to-Video model, and the results are presented in the table below. The results clearly indicate that **Wan2.1** outperforms both closed-source and open-source models. - -
- -
- - -## Computational Efficiency on Different GPUs - -We test the computational efficiency of different **Wan2.1** models on different GPUs in the following table. The results are presented in the format: **Total time (s) / peak GPU memory (GB)**. - - -
- -
- -> The parameter settings for the tests presented in this table are as follows: -> (1) For the 1.3B model on 8 GPUs, set `--ring_size 8` and `--ulysses_size 1`; -> (2) For the 14B model on 1 GPU, use `--offload_model True`; -> (3) For the 1.3B model on a single 4090 GPU, set `--offload_model True --t5_cpu`; -> (4) For all testings, no prompt extension was applied, meaning `--use_prompt_extend` was not enabled. - -> 💡Note: T2V-14B is slower than I2V-14B because the former samples 50 steps while the latter uses 40 steps. - - -## Community Contributions -- [DiffSynth-Studio](https://github.com/modelscope/DiffSynth-Studio) provides more support for Wan, including video-to-video, FP8 quantization, VRAM optimization, LoRA training, and more. Please refer to [their examples](https://github.com/modelscope/DiffSynth-Studio/tree/main/examples/wanvideo). - -------- - -## Introduction of Wan2.1 - -**Wan2.1** is designed on the mainstream diffusion transformer paradigm, achieving significant advancements in generative capabilities through a series of innovations. These include our novel spatio-temporal variational autoencoder (VAE), scalable training strategies, large-scale data construction, and automated evaluation metrics. Collectively, these contributions enhance the model’s performance and versatility. - - -##### (1) 3D Variational Autoencoders -We propose a novel 3D causal VAE architecture, termed **Wan-VAE** specifically designed for video generation. By combining multiple strategies, we improve spatio-temporal compression, reduce memory usage, and ensure temporal causality. **Wan-VAE** demonstrates significant advantages in performance efficiency compared to other open-source VAEs. Furthermore, our **Wan-VAE** can encode and decode unlimited-length 1080P videos without losing historical temporal information, making it particularly well-suited for video generation tasks. - - -
- -
- - -##### (2) Video Diffusion DiT - -**Wan2.1** is designed using the Flow Matching framework within the paradigm of mainstream Diffusion Transformers. Our model's architecture uses the T5 Encoder to encode multilingual text input, with cross-attention in each transformer block embedding the text into the model structure. Additionally, we employ an MLP with a Linear layer and a SiLU layer to process the input time embeddings and predict six modulation parameters individually. This MLP is shared across all transformer blocks, with each block learning a distinct set of biases. Our experimental findings reveal a significant performance improvement with this approach at the same parameter scale. - -
- -
- - -| Model | Dimension | Input Dimension | Output Dimension | Feedforward Dimension | Frequency Dimension | Number of Heads | Number of Layers | -|--------|-----------|-----------------|------------------|-----------------------|---------------------|-----------------|------------------| -| 1.3B | 1536 | 16 | 16 | 8960 | 256 | 12 | 30 | -| 14B | 5120 | 16 | 16 | 13824 | 256 | 40 | 40 | - - - -##### Data - -We curated and deduplicated a candidate dataset comprising a vast amount of image and video data. During the data curation process, we designed a four-step data cleaning process, focusing on fundamental dimensions, visual quality and motion quality. Through the robust data processing pipeline, we can easily obtain high-quality, diverse, and large-scale training sets of images and videos. - -![figure1](assets/data_for_diff_stage.jpg "figure1") - - -##### Comparisons to SOTA -We compared **Wan2.1** with leading open-source and closed-source models to evaluate the performace. Using our carefully designed set of 1,035 internal prompts, we tested across 14 major dimensions and 26 sub-dimensions. We then compute the total score by performing a weighted calculation on the scores of each dimension, utilizing weights derived from human preferences in the matching process. The detailed results are shown in the table below. These results demonstrate our model's superior performance compared to both open-source and closed-source models. - -![figure1](assets/vben_vs_sota.png "figure1") - - -## Citation -If you find our work helpful, please cite us. - -``` -@article{wan2.1, - title = {Wan: Open and Advanced Large-Scale Video Generative Models}, - author = {Wan Team}, - journal = {}, - year = {2025} -} -``` - -## License Agreement -The models in this repository are licensed under the Apache 2.0 License. We claim no rights over the your generate contents, granting you the freedom to use them while ensuring that your usage complies with the provisions of this license. You are fully accountable for your use of the models, which must not involve sharing any content that violates applicable laws, causes harm to individuals or groups, disseminates personal information intended for harm, spreads misinformation, or targets vulnerable populations. For a complete list of restrictions and details regarding your rights, please refer to the full text of the [license](LICENSE.txt). - - -## Acknowledgements - -We would like to thank the contributors to the [SD3](https://huggingface.co/stabilityai/stable-diffusion-3-medium), [Qwen](https://huggingface.co/Qwen), [umt5-xxl](https://huggingface.co/google/umt5-xxl), [diffusers](https://github.com/huggingface/diffusers) and [HuggingFace](https://huggingface.co) repositories, for their open research. - - - -## Contact Us -If you would like to leave a message to our research or product teams, feel free to join our [Discord](https://discord.gg/p5XbdQV7) or [WeChat groups](https://gw.alicdn.com/imgextra/i2/O1CN01tqjWFi1ByuyehkTSB_!!6000000000015-0-tps-611-1279.jpg)! +This project is based on the original Wan2.1 model. Special thanks to the original authors and contributors for their work. From 2beb726132f5d81945b2efbb9e641269fbcac614 Mon Sep 17 00:00:00 2001 From: Bakhtiyor Sulaymonov Date: Thu, 27 Feb 2025 10:43:21 +0500 Subject: [PATCH 17/20] Update README.md Add model files download step --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index 155850e..71a9ae7 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,17 @@ Follow these steps to set up the environment on macOS: pip install einops ``` +5. **Download models using huggingface-cli**: + ```bash + pip install "huggingface_hub[cli]" + huggingface-cli download Wan-AI/Wan2.1-T2V-1.3B --local-dir ./Wan2.1-T2V-1.3B + ``` + **Or download models using huggingface-cli**: + ```bash + pip install modelscope + modelscope download Wan-AI/Wan2.1-T2V-1.3B --local_dir ./Wan2.1-T2V-1.3B + ``` + ## Usage To generate a video, use the following command: From 5cb67c67e8d08badf7a64b76c2eded903982bf1b Mon Sep 17 00:00:00 2001 From: Bakhtiyor Sulaymonov Date: Thu, 27 Feb 2025 18:55:10 +0500 Subject: [PATCH 18/20] Fix MPS compatibility for I2V by adjusting device usage and dtype --- wan/configs/wan_i2v_14B.py | 2 +- wan/image2video.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/wan/configs/wan_i2v_14B.py b/wan/configs/wan_i2v_14B.py index 12e8e20..a666719 100644 --- a/wan/configs/wan_i2v_14B.py +++ b/wan/configs/wan_i2v_14B.py @@ -14,7 +14,7 @@ i2v_14B.t5_tokenizer = 'google/umt5-xxl' # clip i2v_14B.clip_model = 'clip_xlm_roberta_vit_h_14' -i2v_14B.clip_dtype = torch.float16 +i2v_14B.clip_dtype = torch.float32 i2v_14B.clip_checkpoint = 'models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth' i2v_14B.clip_tokenizer = 'xlm-roberta-large' diff --git a/wan/image2video.py b/wan/image2video.py index 55d3f57..6bff1b8 100644 --- a/wan/image2video.py +++ b/wan/image2video.py @@ -244,7 +244,7 @@ class WanI2V: y = self.vae.encode([ torch.concat([ torch.nn.functional.interpolate( - img[None].cpu(), size=(h, w), mode='bicubic').transpose( + img[None], size=(h, w), mode='bicubic').transpose( 0, 1), torch.zeros(3, 80, h, w) ], From ece812041590b5131412fa05ededc3530fda2b4a Mon Sep 17 00:00:00 2001 From: Bakhtiyor Sulaymonov Date: Mon, 17 Mar 2025 12:42:17 +0500 Subject: [PATCH 19/20] Update generate.py: Fix hardcoding for one device --- generate.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/generate.py b/generate.py index 87ed37b..7f0ed21 100644 --- a/generate.py +++ b/generate.py @@ -212,13 +212,25 @@ def _init_logging(rank): def generate(args): - # Set device based on args or availability - if args.device: - device = torch.device(args.device) + # Handle both distributed and single-device scenarios + if "RANK" in os.environ: + # Distributed setup + rank = int(os.getenv("RANK", 0)) + world_size = int(os.getenv("WORLD_SIZE", 1)) + local_rank = int(os.getenv("LOCAL_RANK", 0)) + device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu") + init_logging(rank) else: - device = torch.device("mps" if torch.backends.mps.is_available() else "cpu") - - _init_logging(0) # Use rank 0 logging for single-device + # Single-device setup with MPS fallback + rank = 0 + world_size = 1 + if args.device: + device = torch.device(args.device) + else: + device = torch.device("cuda:0" if torch.cuda.is_available() else + "mps" if torch.backends.mps.is_available() else + "cpu") + init_logging(rank) # Ensure all torch operations use this device torch.set_default_device(device) From a9d7494136c85a1ba0a280e3ee834ebd51a171a6 Mon Sep 17 00:00:00 2001 From: Bakhtiyor Sulaymonov Date: Mon, 17 Mar 2025 12:45:10 +0500 Subject: [PATCH 20/20] Update README.md: Put back acknowledgement (it was accidentally removed in merge conflict)) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 2594f95..fa71888 100644 --- a/README.md +++ b/README.md @@ -544,6 +544,7 @@ The models in this repository are licensed under the Apache 2.0 License. We clai ## Acknowledgements +We would like to thank the contributors to the [SD3](https://huggingface.co/stabilityai/stable-diffusion-3-medium), [Qwen](https://huggingface.co/Qwen), [umt5-xxl](https://huggingface.co/google/umt5-xxl), [diffusers](https://github.com/huggingface/diffusers) and [HuggingFace](https://huggingface.co) repositories, for their open research. ## Optimization Tips