mirror of
https://github.com/Wan-Video/Wan2.1.git
synced 2025-11-04 14:16:57 +00:00
fixed bugs
This commit is contained in:
parent
f5dc6d0f4d
commit
eb811e0c52
@ -150,16 +150,17 @@ LOGFORMAT = "[%(log_color)s%(levelname)-8s%(reset)s]: %(log_color)s%(message)s%(
|
||||
|
||||
|
||||
def setup_eval_logging(log_level: int = logging.INFO):
|
||||
logging.root.setLevel(log_level)
|
||||
# formatter = ColoredFormatter(LOGFORMAT)
|
||||
formatter = None
|
||||
log = logging.getLogger(__name__)
|
||||
if not log.handlers:
|
||||
formatter = None # or your ColoredFormatter
|
||||
stream = logging.StreamHandler()
|
||||
stream.setLevel(log_level)
|
||||
stream.setFormatter(formatter)
|
||||
log = logging.getLogger()
|
||||
log.setLevel(log_level)
|
||||
log.addHandler(stream)
|
||||
log.setLevel(log_level)
|
||||
log.propagate = False # Prevent propagation to root logger
|
||||
|
||||
return log
|
||||
|
||||
_CLIP_SIZE = 384
|
||||
_CLIP_FPS = 8.0
|
||||
|
||||
@ -151,6 +151,7 @@ class WanI2V:
|
||||
audio_proj=None,
|
||||
audio_context_lens=None,
|
||||
model_filename = None,
|
||||
offloadobj = None,
|
||||
**bbargs
|
||||
):
|
||||
r"""
|
||||
@ -263,7 +264,7 @@ class WanI2V:
|
||||
clip_context = self.clip.visual([image_start[:, None, :, :]])
|
||||
|
||||
from mmgp import offload
|
||||
offload.last_offload_obj.unload_all()
|
||||
offloadobj.unload_all()
|
||||
if any_end_frame:
|
||||
mean2 = 0
|
||||
enc= torch.concat([
|
||||
|
||||
9
wgp.py
9
wgp.py
@ -3553,11 +3553,11 @@ def edit_video(
|
||||
if repeat_no >= total_generation: break
|
||||
repeat_no +=1
|
||||
gen["repeat_no"] = repeat_no
|
||||
|
||||
suffix = "" if "_post" in video_source else "_post"
|
||||
if any_mmaudio:
|
||||
send_cmd("progress", [0, get_latest_status(state,"MMAudio Soundtrack Generation")])
|
||||
from postprocessing.mmaudio.mmaudio import video_to_audio
|
||||
new_video_path = get_available_filename(save_path, video_source, "_post")
|
||||
new_video_path = get_available_filename(save_path, video_source, suffix)
|
||||
video_to_audio(video_path, prompt = MMAudio_prompt, negative_prompt = MMAudio_neg_prompt, seed = seed, num_steps = 25, cfg_strength = 4.5, duration= frames_count /output_fps, video_save_path = new_video_path , persistent_models = server_config.get("mmaudio_enabled", 0) == 2, verboseLevel = verbose_level)
|
||||
configs["MMAudio_setting"] = MMAudio_setting
|
||||
configs["MMAudio_prompt"] = MMAudio_prompt
|
||||
@ -3566,7 +3566,7 @@ def edit_video(
|
||||
any_change = True
|
||||
elif tempAudioFileName != None:
|
||||
# combine audio file and new video file
|
||||
new_video_path = get_available_filename(save_path, video_source, "_post")
|
||||
new_video_path = get_available_filename(save_path, video_source, suffix)
|
||||
os.system('ffmpeg -v quiet -y -i "{}" -i "{}" -c copy "{}"'.format(video_path, tempAudioFileName, new_video_path))
|
||||
else:
|
||||
new_video_path = video_path
|
||||
@ -4205,6 +4205,7 @@ def generate_video(
|
||||
model_filename = model_filename,
|
||||
model_type = base_model_type,
|
||||
loras_slists = loras_slists,
|
||||
offloadobj = offloadobj,
|
||||
)
|
||||
except Exception as e:
|
||||
if temp_filename!= None and os.path.isfile(temp_filename):
|
||||
@ -7033,7 +7034,7 @@ def generate_configuration_tab(state, blocks, header, model_choice, prompt_enhan
|
||||
)
|
||||
|
||||
save_path_choice = gr.Textbox(
|
||||
label="Output Folder for Generated Videos",
|
||||
label="Output Folder for Generated Videos (need to restart app to be taken into account)",
|
||||
value=server_config.get("save_path", save_path)
|
||||
)
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user