fix broken queue states (again)

This commit is contained in:
Chris Malone 2025-03-25 15:59:42 +11:00
parent 3cd0dbf4dd
commit 3d9f4c4326

View File

@ -215,8 +215,8 @@ def update_queue_data():
for item in queue: for item in queue:
data.append([ data.append([
str(item['id']), str(item['id']),
item['status'], item.get('status', "Starting"),
item['repeats'], item.get('repeats', "0/0"),
item.get('progress', "0.0%"), item.get('progress', "0.0%"),
item.get('steps', ''), item.get('steps', ''),
item.get('time', '--'), item.get('time', '--'),
@ -998,7 +998,7 @@ def save_video(final_frames, output_path, fps=24):
final_frames = (final_frames * 255).astype(np.uint8) final_frames = (final_frames * 255).astype(np.uint8)
ImageSequenceClip(list(final_frames), fps=fps).write_videofile(output_path, verbose= False, logger = None) ImageSequenceClip(list(final_frames), fps=fps).write_videofile(output_path, verbose= False, logger = None)
def build_callback(state, pipe, num_inference_steps, status): def build_callback(taskid, state, pipe, num_inference_steps, repeats):
start_time = time.time() start_time = time.time()
def update_progress(step_idx, latents, read_state = False): def update_progress(step_idx, latents, read_state = False):
with tracker_lock: with tracker_lock:
@ -1011,12 +1011,12 @@ def build_callback(state, pipe, num_inference_steps, status):
else: else:
phase = "Denoising" phase = "Denoising"
elapsed = time.time() - start_time elapsed = time.time() - start_time
progress_tracker[task_id] = { progress_tracker[taskid] = {
'current_step': step_idx, 'current_step': step_idx,
'total_steps': num_inference_steps, 'total_steps': num_inference_steps,
'start_time': start_time, 'start_time': start_time,
'last_update': time.time(), 'last_update': time.time(),
'repeats': status, 'repeats': repeats,
'status': phase 'status': phase
} }
return update_progress return update_progress
@ -1262,16 +1262,8 @@ def generate_video(
trans.previous_residual_uncond = None trans.previous_residual_uncond = None
trans.previous_residual_cond = None trans.previous_residual_cond = None
video_no = 0 video_no = 0
status = f"{video_no}/{repeat_generation}" repeats = f"{video_no}/{repeat_generation}"
with tracker_lock: callback = build_callback(task_id, state, trans, num_inference_steps, repeats)
if task_id in progress_tracker:
progress_tracker[task_id]['status'] = "Encoding Prompt"
progress_tracker[task_id]['repeats'] = status
progress_tracker[task_id]['current_step'] = 0
progress_tracker[task_id]['total_steps'] = num_inference_steps
progress_tracker[task_id]['start_time'] = time.time()
progress_tracker[task_id]['last_update'] = time.time()
callback = build_callback(state, trans, num_inference_steps, status)
offload.shared_state["callback"] = callback offload.shared_state["callback"] = callback
gc.collect() gc.collect()
torch.cuda.empty_cache() torch.cuda.empty_cache()
@ -1279,8 +1271,14 @@ def generate_video(
for i in range(repeat_generation): for i in range(repeat_generation):
try: try:
with tracker_lock: with tracker_lock:
if task_id in progress_tracker: progress_tracker[task_id] = {
progress_tracker[task_id]['repeats'] = video_no 'current_step': 0,
'total_steps': num_inference_steps,
'start_time': time.time(),
'last_update': time.time(),
'repeats': f"0/{repeat_generation}",
'status': "Encoding Prompt"
}
video_no += 1 video_no += 1
if image2video: if image2video:
samples = wan_model.generate( samples = wan_model.generate(