mirror of
				https://github.com/Wan-Video/Wan2.1.git
				synced 2025-11-04 06:15:17 +00:00 
			
		
		
		
	Add professional-grade Docker setup for local deployment
This commit introduces comprehensive Docker support for running Wan2.1 video generation models locally with GPU acceleration. Changes: - Add Dockerfile with CUDA 12.1 support and optimized layer caching - Add docker-compose.yml for easy container orchestration - Add .dockerignore for efficient Docker builds - Add DOCKER_SETUP.md with detailed setup and troubleshooting guide - Add DOCKER_QUICKSTART.md for rapid deployment - Add docker-run.sh helper script for container management - Update Makefile with Docker management commands Features: - Full GPU support with NVIDIA Docker runtime - Single-GPU and multi-GPU (FSDP + xDiT) configurations - Memory optimization flags for consumer GPUs (8GB+) - Gradio web interface support on port 7860 - Volume mounts for models, outputs, and cache - Comprehensive troubleshooting and optimization guides - Production-ready security best practices The Docker setup supports all Wan2.1 models (T2V, I2V, FLF2V, VACE) and includes both 1.3B (consumer GPU) and 14B (high-end GPU) variants. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
		
							parent
							
								
									7c81b2f27d
								
							
						
					
					
						commit
						0bd40b9bf0
					
				
							
								
								
									
										92
									
								
								.dockerignore
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										92
									
								
								.dockerignore
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,92 @@
 | 
			
		||||
# Git files
 | 
			
		||||
.git
 | 
			
		||||
.gitignore
 | 
			
		||||
.gitattributes
 | 
			
		||||
 | 
			
		||||
# Docker files
 | 
			
		||||
Dockerfile
 | 
			
		||||
docker-compose.yml
 | 
			
		||||
.dockerignore
 | 
			
		||||
 | 
			
		||||
# Python cache
 | 
			
		||||
__pycache__/
 | 
			
		||||
*.py[cod]
 | 
			
		||||
*$py.class
 | 
			
		||||
*.so
 | 
			
		||||
.Python
 | 
			
		||||
build/
 | 
			
		||||
develop-eggs/
 | 
			
		||||
dist/
 | 
			
		||||
downloads/
 | 
			
		||||
eggs/
 | 
			
		||||
.eggs/
 | 
			
		||||
lib/
 | 
			
		||||
lib64/
 | 
			
		||||
parts/
 | 
			
		||||
sdist/
 | 
			
		||||
var/
 | 
			
		||||
wheels/
 | 
			
		||||
*.egg-info/
 | 
			
		||||
.installed.cfg
 | 
			
		||||
*.egg
 | 
			
		||||
 | 
			
		||||
# Virtual environments
 | 
			
		||||
venv/
 | 
			
		||||
env/
 | 
			
		||||
ENV/
 | 
			
		||||
.venv
 | 
			
		||||
 | 
			
		||||
# IDE files
 | 
			
		||||
.vscode/
 | 
			
		||||
.idea/
 | 
			
		||||
*.swp
 | 
			
		||||
*.swo
 | 
			
		||||
*~
 | 
			
		||||
.DS_Store
 | 
			
		||||
 | 
			
		||||
# Model files (download separately)
 | 
			
		||||
models/
 | 
			
		||||
*.pth
 | 
			
		||||
*.pt
 | 
			
		||||
*.bin
 | 
			
		||||
*.safetensors
 | 
			
		||||
*.ckpt
 | 
			
		||||
 | 
			
		||||
# Output files
 | 
			
		||||
outputs/
 | 
			
		||||
output/
 | 
			
		||||
*.mp4
 | 
			
		||||
*.avi
 | 
			
		||||
*.mov
 | 
			
		||||
*.png
 | 
			
		||||
*.jpg
 | 
			
		||||
*.jpeg
 | 
			
		||||
 | 
			
		||||
# Cache directories
 | 
			
		||||
cache/
 | 
			
		||||
.cache/
 | 
			
		||||
__pycache__/
 | 
			
		||||
 | 
			
		||||
# Logs
 | 
			
		||||
*.log
 | 
			
		||||
logs/
 | 
			
		||||
 | 
			
		||||
# Documentation (keep lightweight)
 | 
			
		||||
*.md
 | 
			
		||||
!README.md
 | 
			
		||||
!DOCKER_SETUP.md
 | 
			
		||||
 | 
			
		||||
# Test files
 | 
			
		||||
tests/
 | 
			
		||||
test/
 | 
			
		||||
*.test.py
 | 
			
		||||
 | 
			
		||||
# CI/CD
 | 
			
		||||
.github/
 | 
			
		||||
.gitlab-ci.yml
 | 
			
		||||
.travis.yml
 | 
			
		||||
 | 
			
		||||
# Temporary files
 | 
			
		||||
tmp/
 | 
			
		||||
temp/
 | 
			
		||||
*.tmp
 | 
			
		||||
							
								
								
									
										157
									
								
								DOCKER_QUICKSTART.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										157
									
								
								DOCKER_QUICKSTART.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,157 @@
 | 
			
		||||
# Wan2.1 Docker Quick Start
 | 
			
		||||
 | 
			
		||||
Get Wan2.1 running in Docker in 5 minutes!
 | 
			
		||||
 | 
			
		||||
## Prerequisites
 | 
			
		||||
 | 
			
		||||
- Docker 20.10+ installed ([Get Docker](https://docs.docker.com/get-docker/))
 | 
			
		||||
- NVIDIA GPU with 8GB+ VRAM (for GPU acceleration)
 | 
			
		||||
- NVIDIA Docker runtime installed ([Install Guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html))
 | 
			
		||||
 | 
			
		||||
## Quick Start (3 Steps)
 | 
			
		||||
 | 
			
		||||
### Step 1: Clone and Navigate
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
git clone https://github.com/Wan-Video/Wan2.1.git
 | 
			
		||||
cd Wan2.1
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Step 2: Build and Start
 | 
			
		||||
 | 
			
		||||
**Option A: Using the helper script** (Recommended)
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
./docker-run.sh start
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
**Option B: Using Make**
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
make docker-build
 | 
			
		||||
make docker-up
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
**Option C: Using Docker Compose directly**
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
docker compose up -d wan2-1
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Step 3: Download Models and Run
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
# Enter the container
 | 
			
		||||
./docker-run.sh shell
 | 
			
		||||
# OR
 | 
			
		||||
make docker-shell
 | 
			
		||||
# OR
 | 
			
		||||
docker compose exec wan2-1 bash
 | 
			
		||||
 | 
			
		||||
# Download a model (1.3B for consumer GPUs)
 | 
			
		||||
pip install "huggingface_hub[cli]"
 | 
			
		||||
huggingface-cli download Wan-AI/Wan2.1-T2V-1.3B --local-dir /app/models/Wan2.1-T2V-1.3B
 | 
			
		||||
 | 
			
		||||
# Generate your first video!
 | 
			
		||||
python generate.py \
 | 
			
		||||
  --task t2v-1.3B \
 | 
			
		||||
  --size 832*480 \
 | 
			
		||||
  --ckpt_dir /app/models/Wan2.1-T2V-1.3B \
 | 
			
		||||
  --offload_model True \
 | 
			
		||||
  --t5_cpu \
 | 
			
		||||
  --sample_shift 8 \
 | 
			
		||||
  --sample_guide_scale 6 \
 | 
			
		||||
  --prompt "A cute cat playing with a ball of yarn"
 | 
			
		||||
 | 
			
		||||
# Your video will be in /app/outputs (accessible at ./outputs on your host)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Common Commands
 | 
			
		||||
 | 
			
		||||
### Container Management
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
# Start container
 | 
			
		||||
./docker-run.sh start
 | 
			
		||||
 | 
			
		||||
# Stop container
 | 
			
		||||
./docker-run.sh stop
 | 
			
		||||
 | 
			
		||||
# Restart container
 | 
			
		||||
./docker-run.sh restart
 | 
			
		||||
 | 
			
		||||
# View logs
 | 
			
		||||
./docker-run.sh logs
 | 
			
		||||
 | 
			
		||||
# Enter shell
 | 
			
		||||
./docker-run.sh shell
 | 
			
		||||
 | 
			
		||||
# Check status
 | 
			
		||||
./docker-run.sh status
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Using Make Commands
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
make docker-up        # Start
 | 
			
		||||
make docker-down      # Stop
 | 
			
		||||
make docker-shell     # Enter shell
 | 
			
		||||
make docker-logs      # View logs
 | 
			
		||||
make docker-status    # Check status
 | 
			
		||||
make help            # Show all commands
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Run Gradio Web Interface
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
# Inside the container
 | 
			
		||||
cd gradio
 | 
			
		||||
python t2v_14B_singleGPU.py --ckpt_dir /app/models/Wan2.1-T2V-1.3B
 | 
			
		||||
 | 
			
		||||
# Open browser to: http://localhost:7860
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Available Models
 | 
			
		||||
 | 
			
		||||
| Model | VRAM | Resolution | Download Command |
 | 
			
		||||
|-------|------|------------|------------------|
 | 
			
		||||
| T2V-1.3B | 8GB+ | 480P | `huggingface-cli download Wan-AI/Wan2.1-T2V-1.3B --local-dir /app/models/Wan2.1-T2V-1.3B` |
 | 
			
		||||
| T2V-14B | 24GB+ | 720P | `huggingface-cli download Wan-AI/Wan2.1-T2V-14B --local-dir /app/models/Wan2.1-T2V-14B` |
 | 
			
		||||
| I2V-14B-720P | 24GB+ | 720P | `huggingface-cli download Wan-AI/Wan2.1-I2V-14B-720P --local-dir /app/models/Wan2.1-I2V-14B-720P` |
 | 
			
		||||
| I2V-14B-480P | 16GB+ | 480P | `huggingface-cli download Wan-AI/Wan2.1-I2V-14B-480P --local-dir /app/models/Wan2.1-I2V-14B-480P` |
 | 
			
		||||
 | 
			
		||||
## Troubleshooting
 | 
			
		||||
 | 
			
		||||
### "CUDA out of memory"
 | 
			
		||||
- Use the 1.3B model with `--offload_model True --t5_cpu`
 | 
			
		||||
- Reduce resolution to 480P
 | 
			
		||||
 | 
			
		||||
### "nvidia-smi not found"
 | 
			
		||||
- Ensure NVIDIA Docker runtime is installed
 | 
			
		||||
- Run: `docker run --rm --gpus all nvidia/cuda:12.1.1-base-ubuntu22.04 nvidia-smi`
 | 
			
		||||
 | 
			
		||||
### Can't access Gradio interface
 | 
			
		||||
- Check if port 7860 is exposed: `docker ps | grep 7860`
 | 
			
		||||
- Try: `http://127.0.0.1:7860` instead of `localhost`
 | 
			
		||||
 | 
			
		||||
## Next Steps
 | 
			
		||||
 | 
			
		||||
- Read the full [DOCKER_SETUP.md](DOCKER_SETUP.md) for advanced configuration
 | 
			
		||||
- Check the main [README.md](README.md) for model details
 | 
			
		||||
- Join the [Discord community](https://discord.gg/AKNgpMK4Yj)
 | 
			
		||||
 | 
			
		||||
## File Structure
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
Wan2.1/
 | 
			
		||||
├── models/          # Downloaded models (created automatically)
 | 
			
		||||
├── outputs/         # Generated videos (accessible from host)
 | 
			
		||||
├── cache/           # Model cache
 | 
			
		||||
├── Dockerfile       # Docker image definition
 | 
			
		||||
├── docker-compose.yml  # Container orchestration
 | 
			
		||||
├── docker-run.sh    # Helper script
 | 
			
		||||
├── Makefile         # Make commands
 | 
			
		||||
└── DOCKER_SETUP.md  # Detailed documentation
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
**Happy Generating!** 🎬
 | 
			
		||||
							
								
								
									
										663
									
								
								DOCKER_SETUP.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										663
									
								
								DOCKER_SETUP.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,663 @@
 | 
			
		||||
# Wan2.1 Docker Setup Guide
 | 
			
		||||
 | 
			
		||||
Professional-grade instructions for running Wan2.1 video generation models in Docker containers with GPU support.
 | 
			
		||||
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## Table of Contents
 | 
			
		||||
- [Prerequisites](#prerequisites)
 | 
			
		||||
- [System Requirements](#system-requirements)
 | 
			
		||||
- [Installation Steps](#installation-steps)
 | 
			
		||||
- [Quick Start](#quick-start)
 | 
			
		||||
- [Model Download](#model-download)
 | 
			
		||||
- [Running Inference](#running-inference)
 | 
			
		||||
- [Gradio Web Interface](#gradio-web-interface)
 | 
			
		||||
- [Advanced Configuration](#advanced-configuration)
 | 
			
		||||
- [Troubleshooting](#troubleshooting)
 | 
			
		||||
- [Performance Optimization](#performance-optimization)
 | 
			
		||||
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## Prerequisites
 | 
			
		||||
 | 
			
		||||
### Required Software
 | 
			
		||||
 | 
			
		||||
1. **Docker Engine** (version 20.10+)
 | 
			
		||||
   - [Installation Guide](https://docs.docker.com/engine/install/)
 | 
			
		||||
 | 
			
		||||
2. **NVIDIA Docker Runtime** (for GPU support)
 | 
			
		||||
   - Required for GPU acceleration
 | 
			
		||||
   - [Installation Guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html)
 | 
			
		||||
 | 
			
		||||
3. **NVIDIA Drivers** (version 525.60.13+)
 | 
			
		||||
   - CUDA 12.1 compatible drivers
 | 
			
		||||
   - Check with: `nvidia-smi`
 | 
			
		||||
 | 
			
		||||
4. **Docker Compose** (version 2.0+)
 | 
			
		||||
   - Typically included with Docker Desktop
 | 
			
		||||
   - [Installation Guide](https://docs.docker.com/compose/install/)
 | 
			
		||||
 | 
			
		||||
### Optional Software
 | 
			
		||||
 | 
			
		||||
- **Git** - For cloning the repository
 | 
			
		||||
- **Make** - For using convenience commands
 | 
			
		||||
- **NVIDIA Container Toolkit** - For multi-GPU support
 | 
			
		||||
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## System Requirements
 | 
			
		||||
 | 
			
		||||
### Minimum Requirements (T2V-1.3B at 480P)
 | 
			
		||||
- **GPU**: NVIDIA GPU with 8GB+ VRAM (e.g., RTX 4060 Ti)
 | 
			
		||||
- **RAM**: 16GB system memory
 | 
			
		||||
- **Storage**: 50GB free space (for models and cache)
 | 
			
		||||
- **OS**: Linux (Ubuntu 20.04+), Windows 10/11 with WSL2
 | 
			
		||||
 | 
			
		||||
### Recommended Requirements (T2V-14B at 720P)
 | 
			
		||||
- **GPU**: NVIDIA GPU with 24GB+ VRAM (e.g., RTX 4090, A5000)
 | 
			
		||||
- **RAM**: 32GB+ system memory
 | 
			
		||||
- **Storage**: 100GB+ free space
 | 
			
		||||
- **OS**: Linux (Ubuntu 22.04+)
 | 
			
		||||
 | 
			
		||||
### Multi-GPU Setup (for 8x GPU)
 | 
			
		||||
- **GPUs**: 8x NVIDIA GPUs (A100, H100, etc.)
 | 
			
		||||
- **RAM**: 128GB+ system memory
 | 
			
		||||
- **Storage**: 200GB+ free space
 | 
			
		||||
- **Network**: High-bandwidth GPU interconnect (NVLink preferred)
 | 
			
		||||
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## Installation Steps
 | 
			
		||||
 | 
			
		||||
### Step 1: Verify Docker and NVIDIA Runtime
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
# Check Docker installation
 | 
			
		||||
docker --version
 | 
			
		||||
docker compose version
 | 
			
		||||
 | 
			
		||||
# Check NVIDIA driver
 | 
			
		||||
nvidia-smi
 | 
			
		||||
 | 
			
		||||
# Test NVIDIA Docker runtime
 | 
			
		||||
docker run --rm --gpus all nvidia/cuda:12.1.1-base-ubuntu22.04 nvidia-smi
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
**Expected output**: You should see your GPU(s) listed in the nvidia-smi output.
 | 
			
		||||
 | 
			
		||||
### Step 2: Clone the Repository
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
git clone https://github.com/Wan-Video/Wan2.1.git
 | 
			
		||||
cd Wan2.1
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Step 3: Create Required Directories
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
# Create directories for models, outputs, and cache
 | 
			
		||||
mkdir -p models outputs cache examples
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Step 4: Set Environment Variables (Optional)
 | 
			
		||||
 | 
			
		||||
For prompt extension with Dashscope API:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
# Create a .env file
 | 
			
		||||
cat > .env << EOF
 | 
			
		||||
DASH_API_KEY=your_dashscope_api_key_here
 | 
			
		||||
DASH_API_URL=https://dashscope.aliyuncs.com/api/v1
 | 
			
		||||
EOF
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
For international Alibaba Cloud users:
 | 
			
		||||
```bash
 | 
			
		||||
DASH_API_URL=https://dashscope-intl.aliyuncs.com/api/v1
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Step 5: Build the Docker Image
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
# Build using Docker Compose (recommended)
 | 
			
		||||
docker compose build
 | 
			
		||||
 | 
			
		||||
# OR build manually
 | 
			
		||||
docker build -t wan2.1:latest .
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
**Build time**: Approximately 10-20 minutes depending on your internet connection.
 | 
			
		||||
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## Quick Start
 | 
			
		||||
 | 
			
		||||
### Option 1: Using Docker Compose (Recommended)
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
# Start the container with GPU support
 | 
			
		||||
docker compose up -d wan2-1
 | 
			
		||||
 | 
			
		||||
# Check container status
 | 
			
		||||
docker compose ps
 | 
			
		||||
 | 
			
		||||
# View logs
 | 
			
		||||
docker compose logs -f wan2-1
 | 
			
		||||
 | 
			
		||||
# Access the container shell
 | 
			
		||||
docker compose exec wan2-1 bash
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Option 2: Using Docker Run
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
docker run -it --gpus all \
 | 
			
		||||
  --name wan2.1-container \
 | 
			
		||||
  -v $(pwd)/models:/app/models \
 | 
			
		||||
  -v $(pwd)/outputs:/app/outputs \
 | 
			
		||||
  -v $(pwd)/cache:/app/cache \
 | 
			
		||||
  -p 7860:7860 \
 | 
			
		||||
  --shm-size=16g \
 | 
			
		||||
  wan2.1:latest bash
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### For CPU-only Mode
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
# Using Docker Compose
 | 
			
		||||
docker compose --profile cpu up -d wan2-1-cpu
 | 
			
		||||
 | 
			
		||||
# Using Docker Run
 | 
			
		||||
docker run -it \
 | 
			
		||||
  --name wan2.1-cpu \
 | 
			
		||||
  -e CUDA_VISIBLE_DEVICES="" \
 | 
			
		||||
  -v $(pwd)/models:/app/models \
 | 
			
		||||
  -v $(pwd)/outputs:/app/outputs \
 | 
			
		||||
  -v $(pwd)/cache:/app/cache \
 | 
			
		||||
  -p 7860:7860 \
 | 
			
		||||
  wan2.1:latest bash
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## Model Download
 | 
			
		||||
 | 
			
		||||
Download models **before** running inference. Models should be placed in the `./models` directory.
 | 
			
		||||
 | 
			
		||||
### Using Hugging Face CLI (Inside Container)
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
# Enter the container
 | 
			
		||||
docker compose exec wan2-1 bash
 | 
			
		||||
 | 
			
		||||
# Download T2V-14B model
 | 
			
		||||
pip install "huggingface_hub[cli]"
 | 
			
		||||
huggingface-cli download Wan-AI/Wan2.1-T2V-14B --local-dir /app/models/Wan2.1-T2V-14B
 | 
			
		||||
 | 
			
		||||
# Download T2V-1.3B model
 | 
			
		||||
huggingface-cli download Wan-AI/Wan2.1-T2V-1.3B --local-dir /app/models/Wan2.1-T2V-1.3B
 | 
			
		||||
 | 
			
		||||
# Download I2V-14B-720P model
 | 
			
		||||
huggingface-cli download Wan-AI/Wan2.1-I2V-14B-720P --local-dir /app/models/Wan2.1-I2V-14B-720P
 | 
			
		||||
 | 
			
		||||
# Download I2V-14B-480P model
 | 
			
		||||
huggingface-cli download Wan-AI/Wan2.1-I2V-14B-480P --local-dir /app/models/Wan2.1-I2V-14B-480P
 | 
			
		||||
 | 
			
		||||
# Download FLF2V-14B model
 | 
			
		||||
huggingface-cli download Wan-AI/Wan2.1-FLF2V-14B-720P --local-dir /app/models/Wan2.1-FLF2V-14B-720P
 | 
			
		||||
 | 
			
		||||
# Download VACE models
 | 
			
		||||
huggingface-cli download Wan-AI/Wan2.1-VACE-1.3B --local-dir /app/models/Wan2.1-VACE-1.3B
 | 
			
		||||
huggingface-cli download Wan-AI/Wan2.1-VACE-14B --local-dir /app/models/Wan2.1-VACE-14B
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Using ModelScope (Alternative for Chinese Users)
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
pip install modelscope
 | 
			
		||||
modelscope download Wan-AI/Wan2.1-T2V-14B --local_dir /app/models/Wan2.1-T2V-14B
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Download from Host Machine
 | 
			
		||||
 | 
			
		||||
You can also download models on your host machine and they will be accessible in the container:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
# On host machine (outside Docker)
 | 
			
		||||
cd Wan2.1/models
 | 
			
		||||
huggingface-cli download Wan-AI/Wan2.1-T2V-1.3B --local-dir ./Wan2.1-T2V-1.3B
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## Running Inference
 | 
			
		||||
 | 
			
		||||
All commands below should be run **inside the container**.
 | 
			
		||||
 | 
			
		||||
### Text-to-Video Generation
 | 
			
		||||
 | 
			
		||||
#### 1.3B Model (480P) - Consumer GPU Friendly
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
python generate.py \
 | 
			
		||||
  --task t2v-1.3B \
 | 
			
		||||
  --size 832*480 \
 | 
			
		||||
  --ckpt_dir /app/models/Wan2.1-T2V-1.3B \
 | 
			
		||||
  --offload_model True \
 | 
			
		||||
  --t5_cpu \
 | 
			
		||||
  --sample_shift 8 \
 | 
			
		||||
  --sample_guide_scale 6 \
 | 
			
		||||
  --prompt "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage."
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
#### 14B Model (720P) - High-End GPU
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
python generate.py \
 | 
			
		||||
  --task t2v-14B \
 | 
			
		||||
  --size 1280*720 \
 | 
			
		||||
  --ckpt_dir /app/models/Wan2.1-T2V-14B \
 | 
			
		||||
  --prompt "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage."
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
#### With Prompt Extension (Better Quality)
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
# Using local Qwen model
 | 
			
		||||
python generate.py \
 | 
			
		||||
  --task t2v-14B \
 | 
			
		||||
  --size 1280*720 \
 | 
			
		||||
  --ckpt_dir /app/models/Wan2.1-T2V-14B \
 | 
			
		||||
  --use_prompt_extend \
 | 
			
		||||
  --prompt_extend_method 'local_qwen' \
 | 
			
		||||
  --prompt "A beautiful sunset over the ocean"
 | 
			
		||||
 | 
			
		||||
# Using Dashscope API (requires DASH_API_KEY)
 | 
			
		||||
DASH_API_KEY=your_key python generate.py \
 | 
			
		||||
  --task t2v-14B \
 | 
			
		||||
  --size 1280*720 \
 | 
			
		||||
  --ckpt_dir /app/models/Wan2.1-T2V-14B \
 | 
			
		||||
  --use_prompt_extend \
 | 
			
		||||
  --prompt_extend_method 'dashscope' \
 | 
			
		||||
  --prompt "A beautiful sunset over the ocean"
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Image-to-Video Generation
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
python generate.py \
 | 
			
		||||
  --task i2v-14B \
 | 
			
		||||
  --size 1280*720 \
 | 
			
		||||
  --ckpt_dir /app/models/Wan2.1-I2V-14B-720P \
 | 
			
		||||
  --image /app/examples/i2v_input.JPG \
 | 
			
		||||
  --prompt "Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard."
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### First-Last-Frame-to-Video
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
python generate.py \
 | 
			
		||||
  --task flf2v-14B \
 | 
			
		||||
  --size 1280*720 \
 | 
			
		||||
  --ckpt_dir /app/models/Wan2.1-FLF2V-14B-720P \
 | 
			
		||||
  --first_frame /app/examples/flf2v_input_first_frame.png \
 | 
			
		||||
  --last_frame /app/examples/flf2v_input_last_frame.png \
 | 
			
		||||
  --prompt "CG animation style, a small blue bird takes off from the ground"
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Text-to-Image Generation
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
python generate.py \
 | 
			
		||||
  --task t2i-14B \
 | 
			
		||||
  --size 1024*1024 \
 | 
			
		||||
  --ckpt_dir /app/models/Wan2.1-T2V-14B \
 | 
			
		||||
  --prompt "A serene mountain landscape at dawn"
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### VACE (Video Creation and Editing)
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
python generate.py \
 | 
			
		||||
  --task vace-1.3B \
 | 
			
		||||
  --size 832*480 \
 | 
			
		||||
  --ckpt_dir /app/models/Wan2.1-VACE-1.3B \
 | 
			
		||||
  --src_ref_images /app/examples/girl.png,/app/examples/snake.png \
 | 
			
		||||
  --prompt "Your detailed prompt here"
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## Gradio Web Interface
 | 
			
		||||
 | 
			
		||||
### Start Gradio Interface
 | 
			
		||||
 | 
			
		||||
#### Text-to-Video (14B)
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
cd gradio
 | 
			
		||||
python t2v_14B_singleGPU.py \
 | 
			
		||||
  --ckpt_dir /app/models/Wan2.1-T2V-14B \
 | 
			
		||||
  --prompt_extend_method 'local_qwen'
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
#### Image-to-Video (14B)
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
cd gradio
 | 
			
		||||
python i2v_14B_singleGPU.py \
 | 
			
		||||
  --ckpt_dir_720p /app/models/Wan2.1-I2V-14B-720P \
 | 
			
		||||
  --prompt_extend_method 'local_qwen'
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
#### VACE (All-in-One)
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
cd gradio
 | 
			
		||||
python vace.py --ckpt_dir /app/models/Wan2.1-VACE-1.3B
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Access the Web Interface
 | 
			
		||||
 | 
			
		||||
1. Open your web browser
 | 
			
		||||
2. Navigate to: `http://localhost:7860`
 | 
			
		||||
3. Use the intuitive interface to generate videos
 | 
			
		||||
 | 
			
		||||
### For Remote Access
 | 
			
		||||
 | 
			
		||||
If running on a remote server:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
# Start with public URL (Gradio share feature)
 | 
			
		||||
python gradio/t2v_14B_singleGPU.py \
 | 
			
		||||
  --ckpt_dir /app/models/Wan2.1-T2V-14B \
 | 
			
		||||
  --server_name 0.0.0.0 \
 | 
			
		||||
  --server_port 7860 \
 | 
			
		||||
  --share
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Then access via: `http://your-server-ip:7860`
 | 
			
		||||
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## Advanced Configuration
 | 
			
		||||
 | 
			
		||||
### Multi-GPU Inference (FSDP + xDiT)
 | 
			
		||||
 | 
			
		||||
For 8-GPU setup using Ulysses or Ring attention strategies:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
# Install xDiT
 | 
			
		||||
pip install "xfuser>=0.4.1"
 | 
			
		||||
 | 
			
		||||
# Run with Ulysses strategy (8 GPUs)
 | 
			
		||||
torchrun --nproc_per_node=8 generate.py \
 | 
			
		||||
  --task t2v-14B \
 | 
			
		||||
  --size 1280*720 \
 | 
			
		||||
  --ckpt_dir /app/models/Wan2.1-T2V-14B \
 | 
			
		||||
  --dit_fsdp \
 | 
			
		||||
  --t5_fsdp \
 | 
			
		||||
  --ulysses_size 8 \
 | 
			
		||||
  --prompt "Your prompt here"
 | 
			
		||||
 | 
			
		||||
# Run with Ring strategy (for sequence parallelism)
 | 
			
		||||
torchrun --nproc_per_node=8 generate.py \
 | 
			
		||||
  --task t2v-14B \
 | 
			
		||||
  --size 1280*720 \
 | 
			
		||||
  --ckpt_dir /app/models/Wan2.1-T2V-14B \
 | 
			
		||||
  --dit_fsdp \
 | 
			
		||||
  --t5_fsdp \
 | 
			
		||||
  --ring_size 8 \
 | 
			
		||||
  --prompt "Your prompt here"
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Memory Optimization Flags
 | 
			
		||||
 | 
			
		||||
For limited VRAM:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
python generate.py \
 | 
			
		||||
  --task t2v-1.3B \
 | 
			
		||||
  --size 832*480 \
 | 
			
		||||
  --ckpt_dir /app/models/Wan2.1-T2V-1.3B \
 | 
			
		||||
  --offload_model True \  # Offload model to CPU when not in use
 | 
			
		||||
  --t5_cpu \               # Keep T5 encoder on CPU
 | 
			
		||||
  --sample_shift 8 \
 | 
			
		||||
  --sample_guide_scale 6 \
 | 
			
		||||
  --prompt "Your prompt"
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Custom Output Directory
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
python generate.py \
 | 
			
		||||
  --task t2v-14B \
 | 
			
		||||
  --size 1280*720 \
 | 
			
		||||
  --ckpt_dir /app/models/Wan2.1-T2V-14B \
 | 
			
		||||
  --output_dir /app/outputs/my_generation \
 | 
			
		||||
  --prompt "Your prompt"
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Batch Generation
 | 
			
		||||
 | 
			
		||||
Generate multiple variations:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
python generate.py \
 | 
			
		||||
  --task t2v-14B \
 | 
			
		||||
  --size 1280*720 \
 | 
			
		||||
  --ckpt_dir /app/models/Wan2.1-T2V-14B \
 | 
			
		||||
  --base_seed 0 \
 | 
			
		||||
  --num_samples 4 \  # Generate 4 variations
 | 
			
		||||
  --prompt "Your prompt"
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## Troubleshooting
 | 
			
		||||
 | 
			
		||||
### Issue: "CUDA out of memory"
 | 
			
		||||
 | 
			
		||||
**Solutions:**
 | 
			
		||||
1. Use smaller model (1.3B instead of 14B)
 | 
			
		||||
2. Reduce resolution (480P instead of 720P)
 | 
			
		||||
3. Enable memory optimization flags:
 | 
			
		||||
   ```bash
 | 
			
		||||
   --offload_model True --t5_cpu
 | 
			
		||||
   ```
 | 
			
		||||
4. Increase Docker shared memory:
 | 
			
		||||
   ```bash
 | 
			
		||||
   docker run --shm-size=32g ...
 | 
			
		||||
   ```
 | 
			
		||||
 | 
			
		||||
### Issue: "nvidia-smi not found" inside container
 | 
			
		||||
 | 
			
		||||
**Solutions:**
 | 
			
		||||
1. Verify NVIDIA Docker runtime is installed on host
 | 
			
		||||
2. Check Docker daemon configuration:
 | 
			
		||||
   ```bash
 | 
			
		||||
   # Edit /etc/docker/daemon.json
 | 
			
		||||
   {
 | 
			
		||||
     "runtimes": {
 | 
			
		||||
       "nvidia": {
 | 
			
		||||
         "path": "nvidia-container-runtime",
 | 
			
		||||
         "runtimeArgs": []
 | 
			
		||||
       }
 | 
			
		||||
     },
 | 
			
		||||
     "default-runtime": "nvidia"
 | 
			
		||||
   }
 | 
			
		||||
   ```
 | 
			
		||||
3. Restart Docker daemon:
 | 
			
		||||
   ```bash
 | 
			
		||||
   sudo systemctl restart docker
 | 
			
		||||
   ```
 | 
			
		||||
 | 
			
		||||
### Issue: "Flash attention installation failed"
 | 
			
		||||
 | 
			
		||||
**Solution:**
 | 
			
		||||
Flash attention is optional. The Dockerfile continues even if it fails. For better performance, install manually:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
# Inside container
 | 
			
		||||
pip install flash-attn --no-build-isolation
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Issue: Model download fails
 | 
			
		||||
 | 
			
		||||
**Solutions:**
 | 
			
		||||
1. Check internet connection
 | 
			
		||||
2. Use mirror sites (ModelScope for Chinese users)
 | 
			
		||||
3. Download models on host machine and mount them
 | 
			
		||||
4. Increase Docker download timeout
 | 
			
		||||
 | 
			
		||||
### Issue: "RuntimeError: CUDA error: device-side assert triggered"
 | 
			
		||||
 | 
			
		||||
**Solutions:**
 | 
			
		||||
1. Check CUDA compatibility:
 | 
			
		||||
   ```bash
 | 
			
		||||
   python -c "import torch; print(torch.cuda.is_available())"
 | 
			
		||||
   ```
 | 
			
		||||
2. Update NVIDIA drivers
 | 
			
		||||
3. Rebuild Docker image with matching CUDA version
 | 
			
		||||
 | 
			
		||||
### Issue: Gradio interface not accessible
 | 
			
		||||
 | 
			
		||||
**Solutions:**
 | 
			
		||||
1. Check if port is exposed:
 | 
			
		||||
   ```bash
 | 
			
		||||
   docker ps | grep 7860
 | 
			
		||||
   ```
 | 
			
		||||
2. Ensure firewall allows port 7860
 | 
			
		||||
3. Try binding to all interfaces:
 | 
			
		||||
   ```bash
 | 
			
		||||
   python gradio/app.py --server_name 0.0.0.0
 | 
			
		||||
   ```
 | 
			
		||||
 | 
			
		||||
### Issue: Permission denied errors
 | 
			
		||||
 | 
			
		||||
**Solution:**
 | 
			
		||||
```bash
 | 
			
		||||
# Fix ownership of mounted volumes
 | 
			
		||||
sudo chown -R $(id -u):$(id -g) models outputs cache
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## Performance Optimization
 | 
			
		||||
 | 
			
		||||
### 1. Use SSD Storage
 | 
			
		||||
- Store models and cache on SSD for faster loading
 | 
			
		||||
- Use NVMe for best performance
 | 
			
		||||
 | 
			
		||||
### 2. Increase Shared Memory
 | 
			
		||||
```bash
 | 
			
		||||
# In docker-compose.yml
 | 
			
		||||
shm_size: '32gb'
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### 3. Use Mixed Precision
 | 
			
		||||
- The model uses bfloat16 by default (optimal for modern GPUs)
 | 
			
		||||
 | 
			
		||||
### 4. Enable Xformers (if available)
 | 
			
		||||
```bash
 | 
			
		||||
pip install xformers
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### 5. Multi-GPU Best Practices
 | 
			
		||||
- Use NVLink/NVSwitch for GPU communication
 | 
			
		||||
- Balance model sharding with Ulysses + Ring strategies
 | 
			
		||||
- Monitor GPU utilization: `watch -n 1 nvidia-smi`
 | 
			
		||||
 | 
			
		||||
### 6. Optimize Inference Parameters
 | 
			
		||||
```bash
 | 
			
		||||
# For T2V-1.3B
 | 
			
		||||
--sample_shift 8 \        # Adjust 8-12 based on quality
 | 
			
		||||
--sample_guide_scale 6    # Lower = faster, higher = better quality
 | 
			
		||||
 | 
			
		||||
# For T2V-14B
 | 
			
		||||
--sample_guide_scale 5.0  # Default recommended
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### 7. Use Persistent Cache
 | 
			
		||||
```bash
 | 
			
		||||
# Models and transformers will be cached in ./cache
 | 
			
		||||
# Reusing the cache speeds up subsequent runs
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## Container Management
 | 
			
		||||
 | 
			
		||||
### Stop Container
 | 
			
		||||
```bash
 | 
			
		||||
docker compose down
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Restart Container
 | 
			
		||||
```bash
 | 
			
		||||
docker compose restart wan2-1
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### View Logs
 | 
			
		||||
```bash
 | 
			
		||||
docker compose logs -f wan2-1
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Clean Up
 | 
			
		||||
```bash
 | 
			
		||||
# Remove containers
 | 
			
		||||
docker compose down -v
 | 
			
		||||
 | 
			
		||||
# Remove images
 | 
			
		||||
docker rmi wan2.1:latest
 | 
			
		||||
 | 
			
		||||
# Clean up Docker system
 | 
			
		||||
docker system prune -a
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Update Container
 | 
			
		||||
```bash
 | 
			
		||||
# Pull latest code
 | 
			
		||||
git pull origin main
 | 
			
		||||
 | 
			
		||||
# Rebuild image
 | 
			
		||||
docker compose build --no-cache
 | 
			
		||||
 | 
			
		||||
# Restart containers
 | 
			
		||||
docker compose up -d
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## Security Best Practices
 | 
			
		||||
 | 
			
		||||
1. **Do not commit API keys** to version control
 | 
			
		||||
2. **Use .env files** for sensitive environment variables
 | 
			
		||||
3. **Limit container privileges**: Avoid running as root
 | 
			
		||||
4. **Keep Docker updated** for security patches
 | 
			
		||||
5. **Scan images** for vulnerabilities:
 | 
			
		||||
   ```bash
 | 
			
		||||
   docker scan wan2.1:latest
 | 
			
		||||
   ```
 | 
			
		||||
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## Support and Resources
 | 
			
		||||
 | 
			
		||||
- **GitHub Issues**: [https://github.com/Wan-Video/Wan2.1/issues](https://github.com/Wan-Video/Wan2.1/issues)
 | 
			
		||||
- **Discord**: [Join the community](https://discord.gg/AKNgpMK4Yj)
 | 
			
		||||
- **Technical Report**: [arXiv:2503.20314](https://arxiv.org/abs/2503.20314)
 | 
			
		||||
- **Docker Documentation**: [https://docs.docker.com/](https://docs.docker.com/)
 | 
			
		||||
- **NVIDIA Container Toolkit**: [https://github.com/NVIDIA/nvidia-docker](https://github.com/NVIDIA/nvidia-docker)
 | 
			
		||||
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## License
 | 
			
		||||
 | 
			
		||||
This Docker setup follows the same Apache 2.0 License as the Wan2.1 project. See [LICENSE.txt](LICENSE.txt) for details.
 | 
			
		||||
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
**Last Updated**: 2025-10-26
 | 
			
		||||
**Version**: 1.0.0
 | 
			
		||||
**Maintainer**: Wan2.1 Community
 | 
			
		||||
							
								
								
									
										69
									
								
								Dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										69
									
								
								Dockerfile
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,69 @@
 | 
			
		||||
# Wan2.1 Docker Image
 | 
			
		||||
# Professional-grade Docker setup for Wan2.1 video generation models
 | 
			
		||||
# Supports both CPU and GPU (NVIDIA CUDA) environments
 | 
			
		||||
 | 
			
		||||
FROM nvidia/cuda:12.1.1-cudnn8-devel-ubuntu22.04
 | 
			
		||||
 | 
			
		||||
# Prevent interactive prompts during build
 | 
			
		||||
ENV DEBIAN_FRONTEND=noninteractive
 | 
			
		||||
 | 
			
		||||
# Set working directory
 | 
			
		||||
WORKDIR /app
 | 
			
		||||
 | 
			
		||||
# Install system dependencies
 | 
			
		||||
RUN apt-get update && apt-get install -y \
 | 
			
		||||
    python3.10 \
 | 
			
		||||
    python3-pip \
 | 
			
		||||
    python3.10-dev \
 | 
			
		||||
    git \
 | 
			
		||||
    wget \
 | 
			
		||||
    curl \
 | 
			
		||||
    ffmpeg \
 | 
			
		||||
    libsm6 \
 | 
			
		||||
    libxext6 \
 | 
			
		||||
    libxrender-dev \
 | 
			
		||||
    libgomp1 \
 | 
			
		||||
    libgl1-mesa-glx \
 | 
			
		||||
    && rm -rf /var/lib/apt/lists/*
 | 
			
		||||
 | 
			
		||||
# Upgrade pip and install build tools
 | 
			
		||||
RUN pip3 install --no-cache-dir --upgrade pip setuptools wheel
 | 
			
		||||
 | 
			
		||||
# Set Python 3.10 as default
 | 
			
		||||
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.10 1 && \
 | 
			
		||||
    update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1
 | 
			
		||||
 | 
			
		||||
# Copy requirements first for better layer caching
 | 
			
		||||
COPY requirements.txt /app/requirements.txt
 | 
			
		||||
 | 
			
		||||
# Install PyTorch with CUDA support
 | 
			
		||||
RUN pip install --no-cache-dir torch==2.4.0 torchvision==0.19.0 --index-url https://download.pytorch.org/whl/cu121
 | 
			
		||||
 | 
			
		||||
# Install flash-attention separately (can be problematic)
 | 
			
		||||
RUN pip install --no-cache-dir flash-attn --no-build-isolation || echo "Flash attention installation failed, continuing..."
 | 
			
		||||
 | 
			
		||||
# Install remaining Python dependencies
 | 
			
		||||
RUN pip install --no-cache-dir -r requirements.txt
 | 
			
		||||
 | 
			
		||||
# Copy application code
 | 
			
		||||
COPY . /app/
 | 
			
		||||
 | 
			
		||||
# Create directories for models and outputs
 | 
			
		||||
RUN mkdir -p /app/models /app/outputs /app/cache
 | 
			
		||||
 | 
			
		||||
# Set environment variables
 | 
			
		||||
ENV PYTHONUNBUFFERED=1
 | 
			
		||||
ENV TORCH_HOME=/app/cache
 | 
			
		||||
ENV HF_HOME=/app/cache/huggingface
 | 
			
		||||
ENV TRANSFORMERS_CACHE=/app/cache/transformers
 | 
			
		||||
ENV CUDA_VISIBLE_DEVICES=0
 | 
			
		||||
 | 
			
		||||
# Expose port for Gradio
 | 
			
		||||
EXPOSE 7860
 | 
			
		||||
 | 
			
		||||
# Health check
 | 
			
		||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
 | 
			
		||||
    CMD python -c "import torch; print('CUDA available:', torch.cuda.is_available())" || exit 1
 | 
			
		||||
 | 
			
		||||
# Default command (can be overridden)
 | 
			
		||||
CMD ["/bin/bash"]
 | 
			
		||||
							
								
								
									
										55
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										55
									
								
								Makefile
									
									
									
									
									
								
							@ -1,5 +1,58 @@
 | 
			
		||||
.PHONY: format
 | 
			
		||||
.PHONY: format docker-build docker-up docker-down docker-shell docker-logs docker-clean help
 | 
			
		||||
 | 
			
		||||
# Code formatting
 | 
			
		||||
format:
 | 
			
		||||
	isort generate.py gradio wan
 | 
			
		||||
	yapf -i -r *.py generate.py gradio wan
 | 
			
		||||
 | 
			
		||||
# Docker commands
 | 
			
		||||
docker-build:
 | 
			
		||||
	@echo "Building Docker image..."
 | 
			
		||||
	docker compose build
 | 
			
		||||
 | 
			
		||||
docker-up:
 | 
			
		||||
	@echo "Starting Wan2.1 container..."
 | 
			
		||||
	@mkdir -p models outputs cache
 | 
			
		||||
	docker compose up -d wan2-1
 | 
			
		||||
 | 
			
		||||
docker-down:
 | 
			
		||||
	@echo "Stopping Wan2.1 container..."
 | 
			
		||||
	docker compose down
 | 
			
		||||
 | 
			
		||||
docker-restart: docker-down docker-up
 | 
			
		||||
 | 
			
		||||
docker-shell:
 | 
			
		||||
	@echo "Entering container shell..."
 | 
			
		||||
	docker compose exec wan2-1 bash
 | 
			
		||||
 | 
			
		||||
docker-logs:
 | 
			
		||||
	@echo "Showing container logs..."
 | 
			
		||||
	docker compose logs -f wan2-1
 | 
			
		||||
 | 
			
		||||
docker-clean:
 | 
			
		||||
	@echo "Cleaning up Docker resources..."
 | 
			
		||||
	docker compose down -v
 | 
			
		||||
	docker system prune -f
 | 
			
		||||
 | 
			
		||||
docker-status:
 | 
			
		||||
	@echo "Container status:"
 | 
			
		||||
	docker compose ps
 | 
			
		||||
 | 
			
		||||
# Help command
 | 
			
		||||
help:
 | 
			
		||||
	@echo "Wan2.1 Makefile Commands:"
 | 
			
		||||
	@echo ""
 | 
			
		||||
	@echo "Code Formatting:"
 | 
			
		||||
	@echo "  make format          - Format Python code with isort and yapf"
 | 
			
		||||
	@echo ""
 | 
			
		||||
	@echo "Docker Management:"
 | 
			
		||||
	@echo "  make docker-build    - Build Docker image"
 | 
			
		||||
	@echo "  make docker-up       - Start container (with GPU support)"
 | 
			
		||||
	@echo "  make docker-down     - Stop container"
 | 
			
		||||
	@echo "  make docker-restart  - Restart container"
 | 
			
		||||
	@echo "  make docker-shell    - Enter container shell"
 | 
			
		||||
	@echo "  make docker-logs     - View container logs"
 | 
			
		||||
	@echo "  make docker-status   - Show container status"
 | 
			
		||||
	@echo "  make docker-clean    - Remove containers and clean up"
 | 
			
		||||
	@echo ""
 | 
			
		||||
	@echo "For detailed Docker setup, see DOCKER_SETUP.md"
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										99
									
								
								docker-compose.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										99
									
								
								docker-compose.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,99 @@
 | 
			
		||||
version: '3.8'
 | 
			
		||||
 | 
			
		||||
services:
 | 
			
		||||
  wan2-1:
 | 
			
		||||
    build:
 | 
			
		||||
      context: .
 | 
			
		||||
      dockerfile: Dockerfile
 | 
			
		||||
    image: wan2.1:latest
 | 
			
		||||
    container_name: wan2.1-gpu
 | 
			
		||||
 | 
			
		||||
    # GPU support - requires NVIDIA Docker runtime
 | 
			
		||||
    deploy:
 | 
			
		||||
      resources:
 | 
			
		||||
        reservations:
 | 
			
		||||
          devices:
 | 
			
		||||
            - driver: nvidia
 | 
			
		||||
              count: all
 | 
			
		||||
              capabilities: [gpu]
 | 
			
		||||
 | 
			
		||||
    # Environment variables
 | 
			
		||||
    environment:
 | 
			
		||||
      - NVIDIA_VISIBLE_DEVICES=all
 | 
			
		||||
      - NVIDIA_DRIVER_CAPABILITIES=compute,utility
 | 
			
		||||
      - CUDA_VISIBLE_DEVICES=0
 | 
			
		||||
      - PYTHONUNBUFFERED=1
 | 
			
		||||
      - TORCH_HOME=/app/cache
 | 
			
		||||
      - HF_HOME=/app/cache/huggingface
 | 
			
		||||
      - TRANSFORMERS_CACHE=/app/cache/transformers
 | 
			
		||||
      # Optional: Set your Dashscope API key for prompt extension
 | 
			
		||||
      # - DASH_API_KEY=your_api_key_here
 | 
			
		||||
      # - DASH_API_URL=https://dashscope.aliyuncs.com/api/v1
 | 
			
		||||
 | 
			
		||||
    # Volume mounts
 | 
			
		||||
    volumes:
 | 
			
		||||
      # Mount models directory (download models here)
 | 
			
		||||
      - ./models:/app/models
 | 
			
		||||
      # Mount outputs directory
 | 
			
		||||
      - ./outputs:/app/outputs
 | 
			
		||||
      # Mount cache directory for model downloads
 | 
			
		||||
      - ./cache:/app/cache
 | 
			
		||||
      # Optional: Mount examples directory if you modify it
 | 
			
		||||
      - ./examples:/app/examples
 | 
			
		||||
 | 
			
		||||
    # Port mapping for Gradio interface
 | 
			
		||||
    ports:
 | 
			
		||||
      - "7860:7860"
 | 
			
		||||
 | 
			
		||||
    # Shared memory size (important for DataLoader workers)
 | 
			
		||||
    shm_size: '16gb'
 | 
			
		||||
 | 
			
		||||
    # Keep container running
 | 
			
		||||
    stdin_open: true
 | 
			
		||||
    tty: true
 | 
			
		||||
 | 
			
		||||
    # Network mode
 | 
			
		||||
    network_mode: bridge
 | 
			
		||||
 | 
			
		||||
    # Restart policy
 | 
			
		||||
    restart: unless-stopped
 | 
			
		||||
 | 
			
		||||
  # CPU-only service (for systems without GPU)
 | 
			
		||||
  wan2-1-cpu:
 | 
			
		||||
    build:
 | 
			
		||||
      context: .
 | 
			
		||||
      dockerfile: Dockerfile
 | 
			
		||||
    image: wan2.1:latest
 | 
			
		||||
    container_name: wan2.1-cpu
 | 
			
		||||
    profiles:
 | 
			
		||||
      - cpu
 | 
			
		||||
 | 
			
		||||
    environment:
 | 
			
		||||
      - PYTHONUNBUFFERED=1
 | 
			
		||||
      - TORCH_HOME=/app/cache
 | 
			
		||||
      - HF_HOME=/app/cache/huggingface
 | 
			
		||||
      - TRANSFORMERS_CACHE=/app/cache/transformers
 | 
			
		||||
      - CUDA_VISIBLE_DEVICES=""
 | 
			
		||||
 | 
			
		||||
    volumes:
 | 
			
		||||
      - ./models:/app/models
 | 
			
		||||
      - ./outputs:/app/outputs
 | 
			
		||||
      - ./cache:/app/cache
 | 
			
		||||
      - ./examples:/app/examples
 | 
			
		||||
 | 
			
		||||
    ports:
 | 
			
		||||
      - "7860:7860"
 | 
			
		||||
 | 
			
		||||
    shm_size: '8gb'
 | 
			
		||||
 | 
			
		||||
    stdin_open: true
 | 
			
		||||
    tty: true
 | 
			
		||||
 | 
			
		||||
    network_mode: bridge
 | 
			
		||||
 | 
			
		||||
    restart: unless-stopped
 | 
			
		||||
 | 
			
		||||
volumes:
 | 
			
		||||
  models:
 | 
			
		||||
  outputs:
 | 
			
		||||
  cache:
 | 
			
		||||
							
								
								
									
										183
									
								
								docker-run.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										183
									
								
								docker-run.sh
									
									
									
									
									
										Executable file
									
								
							@ -0,0 +1,183 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
# Wan2.1 Docker Helper Script
 | 
			
		||||
# Quick start script for running Wan2.1 in Docker
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
 | 
			
		||||
# Colors for output
 | 
			
		||||
GREEN='\033[0;32m'
 | 
			
		||||
BLUE='\033[0;34m'
 | 
			
		||||
RED='\033[0;31m'
 | 
			
		||||
YELLOW='\033[1;33m'
 | 
			
		||||
NC='\033[0m' # No Color
 | 
			
		||||
 | 
			
		||||
# Configuration
 | 
			
		||||
CONTAINER_NAME="wan2.1-gpu"
 | 
			
		||||
IMAGE_NAME="wan2.1:latest"
 | 
			
		||||
MODELS_DIR="./models"
 | 
			
		||||
OUTPUTS_DIR="./outputs"
 | 
			
		||||
CACHE_DIR="./cache"
 | 
			
		||||
 | 
			
		||||
# Print header
 | 
			
		||||
echo -e "${BLUE}========================================${NC}"
 | 
			
		||||
echo -e "${BLUE}   Wan2.1 Docker Helper Script${NC}"
 | 
			
		||||
echo -e "${BLUE}========================================${NC}\n"
 | 
			
		||||
 | 
			
		||||
# Check if Docker is installed
 | 
			
		||||
if ! command -v docker &> /dev/null; then
 | 
			
		||||
    echo -e "${RED}Error: Docker is not installed${NC}"
 | 
			
		||||
    echo "Please install Docker first: https://docs.docker.com/engine/install/"
 | 
			
		||||
    exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Check if Docker Compose is installed
 | 
			
		||||
if ! command -v docker compose &> /dev/null; then
 | 
			
		||||
    echo -e "${RED}Error: Docker Compose is not installed${NC}"
 | 
			
		||||
    echo "Please install Docker Compose: https://docs.docker.com/compose/install/"
 | 
			
		||||
    exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Function to check NVIDIA Docker runtime
 | 
			
		||||
check_nvidia_runtime() {
 | 
			
		||||
    if ! docker run --rm --gpus all nvidia/cuda:12.1.1-base-ubuntu22.04 nvidia-smi &> /dev/null; then
 | 
			
		||||
        echo -e "${YELLOW}Warning: NVIDIA Docker runtime not available${NC}"
 | 
			
		||||
        echo "GPU acceleration will not be available"
 | 
			
		||||
        echo "Install NVIDIA Container Toolkit: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html"
 | 
			
		||||
        return 1
 | 
			
		||||
    fi
 | 
			
		||||
    return 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# Function to create directories
 | 
			
		||||
create_directories() {
 | 
			
		||||
    echo -e "${BLUE}Creating required directories...${NC}"
 | 
			
		||||
    mkdir -p "$MODELS_DIR" "$OUTPUTS_DIR" "$CACHE_DIR"
 | 
			
		||||
    echo -e "${GREEN}✓ Directories created${NC}\n"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# Function to build Docker image
 | 
			
		||||
build_image() {
 | 
			
		||||
    echo -e "${BLUE}Building Docker image...${NC}"
 | 
			
		||||
    docker compose build
 | 
			
		||||
    echo -e "${GREEN}✓ Docker image built successfully${NC}\n"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# Function to start container
 | 
			
		||||
start_container() {
 | 
			
		||||
    echo -e "${BLUE}Starting Wan2.1 container...${NC}"
 | 
			
		||||
 | 
			
		||||
    if check_nvidia_runtime; then
 | 
			
		||||
        echo "Starting with GPU support..."
 | 
			
		||||
        docker compose up -d wan2-1
 | 
			
		||||
    else
 | 
			
		||||
        echo "Starting in CPU-only mode..."
 | 
			
		||||
        docker compose --profile cpu up -d wan2-1-cpu
 | 
			
		||||
        CONTAINER_NAME="wan2.1-cpu"
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    echo -e "${GREEN}✓ Container started successfully${NC}\n"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# Function to show status
 | 
			
		||||
show_status() {
 | 
			
		||||
    echo -e "${BLUE}Container Status:${NC}"
 | 
			
		||||
    docker compose ps
 | 
			
		||||
    echo ""
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# Function to show logs
 | 
			
		||||
show_logs() {
 | 
			
		||||
    echo -e "${BLUE}Showing container logs (Ctrl+C to exit)...${NC}"
 | 
			
		||||
    docker compose logs -f "$CONTAINER_NAME"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# Function to enter container
 | 
			
		||||
enter_container() {
 | 
			
		||||
    echo -e "${BLUE}Entering container shell...${NC}"
 | 
			
		||||
    docker compose exec wan2-1 bash || docker compose exec wan2-1-cpu bash
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# Function to stop container
 | 
			
		||||
stop_container() {
 | 
			
		||||
    echo -e "${BLUE}Stopping container...${NC}"
 | 
			
		||||
    docker compose down
 | 
			
		||||
    echo -e "${GREEN}✓ Container stopped${NC}\n"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# Function to show help
 | 
			
		||||
show_help() {
 | 
			
		||||
    cat << EOF
 | 
			
		||||
Usage: ./docker-run.sh [COMMAND]
 | 
			
		||||
 | 
			
		||||
Commands:
 | 
			
		||||
    build       Build the Docker image
 | 
			
		||||
    start       Start the container
 | 
			
		||||
    stop        Stop the container
 | 
			
		||||
    restart     Restart the container
 | 
			
		||||
    status      Show container status
 | 
			
		||||
    logs        Show container logs
 | 
			
		||||
    shell       Enter container shell
 | 
			
		||||
    clean       Stop container and clean up
 | 
			
		||||
    help        Show this help message
 | 
			
		||||
 | 
			
		||||
Examples:
 | 
			
		||||
    ./docker-run.sh build        # Build the image
 | 
			
		||||
    ./docker-run.sh start        # Start the container
 | 
			
		||||
    ./docker-run.sh shell        # Enter the container
 | 
			
		||||
    ./docker-run.sh logs         # View logs
 | 
			
		||||
 | 
			
		||||
For detailed documentation, see DOCKER_SETUP.md
 | 
			
		||||
EOF
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# Main script logic
 | 
			
		||||
case "${1:-start}" in
 | 
			
		||||
    build)
 | 
			
		||||
        create_directories
 | 
			
		||||
        build_image
 | 
			
		||||
        ;;
 | 
			
		||||
    start)
 | 
			
		||||
        create_directories
 | 
			
		||||
        if ! docker images | grep -q "$IMAGE_NAME"; then
 | 
			
		||||
            build_image
 | 
			
		||||
        fi
 | 
			
		||||
        start_container
 | 
			
		||||
        show_status
 | 
			
		||||
        echo -e "${GREEN}Container is running!${NC}"
 | 
			
		||||
        echo -e "Run ${BLUE}./docker-run.sh shell${NC} to enter the container"
 | 
			
		||||
        echo -e "Run ${BLUE}./docker-run.sh logs${NC} to view logs"
 | 
			
		||||
        ;;
 | 
			
		||||
    stop)
 | 
			
		||||
        stop_container
 | 
			
		||||
        ;;
 | 
			
		||||
    restart)
 | 
			
		||||
        stop_container
 | 
			
		||||
        start_container
 | 
			
		||||
        ;;
 | 
			
		||||
    status)
 | 
			
		||||
        show_status
 | 
			
		||||
        ;;
 | 
			
		||||
    logs)
 | 
			
		||||
        show_logs
 | 
			
		||||
        ;;
 | 
			
		||||
    shell)
 | 
			
		||||
        enter_container
 | 
			
		||||
        ;;
 | 
			
		||||
    clean)
 | 
			
		||||
        echo -e "${YELLOW}This will stop the container and remove volumes${NC}"
 | 
			
		||||
        read -p "Are you sure? (y/N) " -n 1 -r
 | 
			
		||||
        echo
 | 
			
		||||
        if [[ $REPLY =~ ^[Yy]$ ]]; then
 | 
			
		||||
            docker compose down -v
 | 
			
		||||
            echo -e "${GREEN}✓ Cleanup complete${NC}"
 | 
			
		||||
        fi
 | 
			
		||||
        ;;
 | 
			
		||||
    help|--help|-h)
 | 
			
		||||
        show_help
 | 
			
		||||
        ;;
 | 
			
		||||
    *)
 | 
			
		||||
        echo -e "${RED}Unknown command: $1${NC}"
 | 
			
		||||
        show_help
 | 
			
		||||
        exit 1
 | 
			
		||||
        ;;
 | 
			
		||||
esac
 | 
			
		||||
		Loading…
	
		Reference in New Issue
	
	Block a user