fixed bug while loading fp16 model

This commit is contained in:
deepbeepmeep 2025-06-14 10:05:11 +02:00 committed by GitHub
parent d5f9170454
commit 6c8779def6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

2
wgp.py
View File

@ -1746,7 +1746,7 @@ def get_model_filename(model_type, quantization ="int8", dtype_policy = ""):
else:
raw_filename = choices[0]
if dtype == torch.float16 and not any("fp16","FP16") in raw_filename and model_family == "wan" and finetune_def == None :
if dtype == torch.float16 and not any(x in raw_filename for x in ["fp16", "FP16"]) and model_family == "wan" and finetune_def == None:
if "quanto_int8" in raw_filename:
raw_filename = raw_filename.replace("quanto_int8", "quanto_fp16_int8")
elif "quanto_bf16_int8" in raw_filename: