mirror of
https://github.com/Wan-Video/Wan2.1.git
synced 2025-11-04 22:26:36 +00:00
Added support for EXIF rotated images prompt
This commit is contained in:
parent
71697bd7c5
commit
c282662bd5
@ -949,6 +949,25 @@ def one_more_video(state):
|
|||||||
|
|
||||||
return state
|
return state
|
||||||
|
|
||||||
|
def convert_image(image):
|
||||||
|
from PIL import ExifTags
|
||||||
|
|
||||||
|
image = image.convert('RGB')
|
||||||
|
for orientation in ExifTags.TAGS.keys():
|
||||||
|
if ExifTags.TAGS[orientation]=='Orientation':
|
||||||
|
break
|
||||||
|
exif = image.getexif()
|
||||||
|
if not orientation in exif:
|
||||||
|
return image
|
||||||
|
if exif[orientation] == 3:
|
||||||
|
image=image.rotate(180, expand=True)
|
||||||
|
elif exif[orientation] == 6:
|
||||||
|
image=image.rotate(270, expand=True)
|
||||||
|
elif exif[orientation] == 8:
|
||||||
|
image=image.rotate(90, expand=True)
|
||||||
|
return image
|
||||||
|
|
||||||
|
|
||||||
def prepare_generate_video():
|
def prepare_generate_video():
|
||||||
|
|
||||||
return gr.Button(visible= False), gr.Checkbox(visible= True)
|
return gr.Button(visible= False), gr.Checkbox(visible= True)
|
||||||
@ -1000,10 +1019,6 @@ def generate_video(
|
|||||||
yield f"Model loaded"
|
yield f"Model loaded"
|
||||||
reload_needed= False
|
reload_needed= False
|
||||||
|
|
||||||
from PIL import Image
|
|
||||||
import numpy as np
|
|
||||||
import tempfile
|
|
||||||
|
|
||||||
if wan_model == None:
|
if wan_model == None:
|
||||||
raise gr.Error("Unable to generate a Video while a new configuration is being applied.")
|
raise gr.Error("Unable to generate a Video while a new configuration is being applied.")
|
||||||
if attention_mode == "auto":
|
if attention_mode == "auto":
|
||||||
@ -1076,17 +1091,18 @@ def generate_video(
|
|||||||
image_to_end = None
|
image_to_end = None
|
||||||
if image_to_continue is not None:
|
if image_to_continue is not None:
|
||||||
if isinstance(image_to_continue, list):
|
if isinstance(image_to_continue, list):
|
||||||
image_to_continue = [ tup[0] for tup in image_to_continue ]
|
image_to_continue = [ convert_image(tup[0]) for tup in image_to_continue ]
|
||||||
else:
|
else:
|
||||||
image_to_continue = [image_to_continue]
|
image_to_continue = [convert_image(image_to_continue)]
|
||||||
if image_to_end != None:
|
if image_to_end != None:
|
||||||
if isinstance(image_to_end , list):
|
if isinstance(image_to_end , list):
|
||||||
image_to_end = [ tup[0] for tup in image_to_end ]
|
image_to_end = [ convert_image(tup[0]) for tup in image_to_end ]
|
||||||
else:
|
else:
|
||||||
image_to_end = [image_to_end ]
|
image_to_end = [convert_image(image_to_end) ]
|
||||||
if len(image_to_continue) != len(image_to_end):
|
if len(image_to_continue) != len(image_to_end):
|
||||||
gr.Info("The number of start and end images should be the same ")
|
gr.Info("The number of start and end images should be the same ")
|
||||||
return
|
return
|
||||||
|
|
||||||
if multi_images_gen_type == 0:
|
if multi_images_gen_type == 0:
|
||||||
new_prompts = []
|
new_prompts = []
|
||||||
new_image_to_continue = []
|
new_image_to_continue = []
|
||||||
@ -1279,8 +1295,8 @@ def generate_video(
|
|||||||
if image2video:
|
if image2video:
|
||||||
samples = wan_model.generate(
|
samples = wan_model.generate(
|
||||||
prompt,
|
prompt,
|
||||||
image_to_continue[no].convert('RGB'),
|
image_to_continue[no],
|
||||||
image_to_end[no].convert('RGB') if image_to_end != None else None,
|
image_to_end[no] if image_to_end != None else None,
|
||||||
frame_num=(video_length // 4)* 4 + 1,
|
frame_num=(video_length // 4)* 4 + 1,
|
||||||
max_area=MAX_AREA_CONFIGS[resolution],
|
max_area=MAX_AREA_CONFIGS[resolution],
|
||||||
shift=flow_shift,
|
shift=flow_shift,
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user