fixed bugs

This commit is contained in:
DeepBeepMeep 2025-07-03 02:20:44 +02:00
parent f5dc6d0f4d
commit eb811e0c52
3 changed files with 18 additions and 15 deletions

View File

@ -150,16 +150,17 @@ LOGFORMAT = "[%(log_color)s%(levelname)-8s%(reset)s]: %(log_color)s%(message)s%(
def setup_eval_logging(log_level: int = logging.INFO): def setup_eval_logging(log_level: int = logging.INFO):
logging.root.setLevel(log_level) log = logging.getLogger(__name__)
# formatter = ColoredFormatter(LOGFORMAT) if not log.handlers:
formatter = None formatter = None # or your ColoredFormatter
stream = logging.StreamHandler() stream = logging.StreamHandler()
stream.setLevel(log_level) stream.setLevel(log_level)
stream.setFormatter(formatter) stream.setFormatter(formatter)
log = logging.getLogger()
log.setLevel(log_level)
log.addHandler(stream) log.addHandler(stream)
log.setLevel(log_level)
log.propagate = False # Prevent propagation to root logger
return log
_CLIP_SIZE = 384 _CLIP_SIZE = 384
_CLIP_FPS = 8.0 _CLIP_FPS = 8.0

View File

@ -151,6 +151,7 @@ class WanI2V:
audio_proj=None, audio_proj=None,
audio_context_lens=None, audio_context_lens=None,
model_filename = None, model_filename = None,
offloadobj = None,
**bbargs **bbargs
): ):
r""" r"""
@ -263,7 +264,7 @@ class WanI2V:
clip_context = self.clip.visual([image_start[:, None, :, :]]) clip_context = self.clip.visual([image_start[:, None, :, :]])
from mmgp import offload from mmgp import offload
offload.last_offload_obj.unload_all() offloadobj.unload_all()
if any_end_frame: if any_end_frame:
mean2 = 0 mean2 = 0
enc= torch.concat([ enc= torch.concat([

9
wgp.py
View File

@ -3553,11 +3553,11 @@ def edit_video(
if repeat_no >= total_generation: break if repeat_no >= total_generation: break
repeat_no +=1 repeat_no +=1
gen["repeat_no"] = repeat_no gen["repeat_no"] = repeat_no
suffix = "" if "_post" in video_source else "_post"
if any_mmaudio: if any_mmaudio:
send_cmd("progress", [0, get_latest_status(state,"MMAudio Soundtrack Generation")]) send_cmd("progress", [0, get_latest_status(state,"MMAudio Soundtrack Generation")])
from postprocessing.mmaudio.mmaudio import video_to_audio from postprocessing.mmaudio.mmaudio import video_to_audio
new_video_path = get_available_filename(save_path, video_source, "_post") new_video_path = get_available_filename(save_path, video_source, suffix)
video_to_audio(video_path, prompt = MMAudio_prompt, negative_prompt = MMAudio_neg_prompt, seed = seed, num_steps = 25, cfg_strength = 4.5, duration= frames_count /output_fps, video_save_path = new_video_path , persistent_models = server_config.get("mmaudio_enabled", 0) == 2, verboseLevel = verbose_level) video_to_audio(video_path, prompt = MMAudio_prompt, negative_prompt = MMAudio_neg_prompt, seed = seed, num_steps = 25, cfg_strength = 4.5, duration= frames_count /output_fps, video_save_path = new_video_path , persistent_models = server_config.get("mmaudio_enabled", 0) == 2, verboseLevel = verbose_level)
configs["MMAudio_setting"] = MMAudio_setting configs["MMAudio_setting"] = MMAudio_setting
configs["MMAudio_prompt"] = MMAudio_prompt configs["MMAudio_prompt"] = MMAudio_prompt
@ -3566,7 +3566,7 @@ def edit_video(
any_change = True any_change = True
elif tempAudioFileName != None: elif tempAudioFileName != None:
# combine audio file and new video file # combine audio file and new video file
new_video_path = get_available_filename(save_path, video_source, "_post") new_video_path = get_available_filename(save_path, video_source, suffix)
os.system('ffmpeg -v quiet -y -i "{}" -i "{}" -c copy "{}"'.format(video_path, tempAudioFileName, new_video_path)) os.system('ffmpeg -v quiet -y -i "{}" -i "{}" -c copy "{}"'.format(video_path, tempAudioFileName, new_video_path))
else: else:
new_video_path = video_path new_video_path = video_path
@ -4205,6 +4205,7 @@ def generate_video(
model_filename = model_filename, model_filename = model_filename,
model_type = base_model_type, model_type = base_model_type,
loras_slists = loras_slists, loras_slists = loras_slists,
offloadobj = offloadobj,
) )
except Exception as e: except Exception as e:
if temp_filename!= None and os.path.isfile(temp_filename): if temp_filename!= None and os.path.isfile(temp_filename):
@ -7033,7 +7034,7 @@ def generate_configuration_tab(state, blocks, header, model_choice, prompt_enhan
) )
save_path_choice = gr.Textbox( save_path_choice = gr.Textbox(
label="Output Folder for Generated Videos", label="Output Folder for Generated Videos (need to restart app to be taken into account)",
value=server_config.get("save_path", save_path) value=server_config.get("save_path", save_path)
) )