Skip to content

Commit 9c3cc60

Browse files
committed
Fix and improvements
GUI: - Added a timer to keep track of how long "Auto Sub ReTimer" will take to complete all the "Phases." (Not implemented in "Whisper" and "Whisper ReTimer") Auto Sub ReTimer: Fase3: - Adjusted the progress bar. It will no longer "go crazy" during scene change detection. (Slightly faster because it no longer has to manage multiple progress bars for each video segment). Whisper ReTimer: - Updated "Fase3" as in the latest version 1.6 of "Auto Sub ReTimer." - Updated "Fase4" as in the latest version 1.6 of "Auto Sub ReTimer."
1 parent 41d3978 commit 9c3cc60

4 files changed

Lines changed: 228 additions & 64 deletions

File tree

Scripts/GUI/GUI.py

Lines changed: 36 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
from tkinter import messagebox
88
import re
99
import json
10+
import time
1011

1112
# Configurazione della GUI
1213
ctk.set_appearance_mode("dark")
@@ -19,6 +20,8 @@
1920
is_running = False
2021
funzione_selezionata = None
2122
paths = None
23+
start_time = None
24+
execution_time_label = None
2225

2326
# Finestra principale
2427
root = ctk.CTk()
@@ -452,6 +455,8 @@ def _update_display(self):
452455

453456
progress_label = ctk.CTkLabel(progress_frame, text="Completion: 0%", font=("Arial", 12))
454457
progress_label.pack(anchor="e")
458+
execution_time_label = ctk.CTkLabel(progress_frame, text="Time: 00:00:00", font=("Arial", 12))
459+
execution_time_label.pack(anchor="e")
455460

456461
# Pulsanti configurazione
457462
config_buttons_frame = ctk.CTkFrame(frame_center)
@@ -519,6 +524,15 @@ def _update_display(self):
519524
# --------------------------------------------------
520525
# GESTIONE ESECUZIONE
521526
# --------------------------------------------------
527+
# Timer
528+
def update_timer():
529+
if start_time:
530+
elapsed = int(time.time() - start_time)
531+
hours, remainder = divmod(elapsed, 3600)
532+
minutes, seconds = divmod(remainder, 60)
533+
execution_time_label.configure(text=f"Time: {hours:02}:{minutes:02}:{seconds:02}")
534+
root.after(1000, update_timer)
535+
522536
def update_log():
523537
while not output_queue.empty():
524538
msg = output_queue.get()
@@ -566,6 +580,12 @@ def run_interactive_phase(phase_num, phase_path):
566580
if "cartelli" in output.lower() or "scegli" in output.lower():
567581
question = output.strip()
568582
options = ["Sì", "No"] if "cartelli" in output.lower() else ["Option 1", "Option 2"]
583+
584+
# Per Fase6, ferma il timer prima del popup
585+
if phase_num == 6:
586+
global start_time
587+
start_time = None
588+
569589
answer = input_handler.ask_question(question, options)
570590

571591
if answer is not None:
@@ -619,7 +639,7 @@ def run_normal_phase(phase_num, phase_path):
619639
return False
620640

621641
def esegui_auto_sub_retimer():
622-
global is_running
642+
global is_running, start_time
623643

624644
if is_running:
625645
return
@@ -644,21 +664,33 @@ def esegui_auto_sub_retimer():
644664
log_message("🚀 Starting Auto Sub ReTimer process\n")
645665

646666
for phase_num, phase_path, is_interactive in fasi:
647-
if is_interactive:
667+
# Per Fase0, avvia il timer solo dopo che la finestra di selezione file è chiusa
668+
if phase_num == 0:
648669
success = run_interactive_phase(phase_num, phase_path)
670+
if success:
671+
start_time = time.time() # Avvia timer solo dopo selezione file
649672
else:
650-
success = run_normal_phase(phase_num, phase_path)
673+
if is_interactive:
674+
success = run_interactive_phase(phase_num, phase_path)
675+
else:
676+
success = run_normal_phase(phase_num, phase_path)
651677

652678
if not success:
653679
log_message("❌ Process interrupted\n")
654680
break
681+
682+
# Ferma il timer dopo Fase5 invece che Fase6
683+
if phase_num == 5:
684+
start_time = None
685+
655686
else:
656687
log_message("🎉 Process completed successfully!\n")
657688

658689
progress_manager.complete_all()
659690

660691
finally:
661692
is_running = False
693+
start_time = None
662694
button_avvia.configure(state="normal")
663695
status_label.configure(text="Status: Completed")
664696

@@ -959,4 +991,5 @@ def seleziona_funzione(funzione):
959991
# --------------------------------------------------
960992
seleziona_funzione("Auto Sub ReTimer")
961993
root.after(100, update_log)
994+
root.after(1000, update_timer)
962995
root.mainloop()

Scripts/Migliora il Timing Dei Sub/Fase3.py

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import pysrt
55
from concurrent.futures import ThreadPoolExecutor
66
from multiprocessing import cpu_count
7+
from concurrent.futures import as_completed
78

89
# Percorso della directory principale del progetto (relativa)
910
project_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
@@ -103,7 +104,7 @@ def process_segment(args):
103104
)
104105
scene_manager.add_detector(adaptive_detector)
105106

106-
scene_manager.detect_scenes(video_manager, end_time=end_time + 0.5, show_progress=True)
107+
scene_manager.detect_scenes(video_manager, end_time=end_time + 0.5)
107108

108109
segment_scenes = []
109110
for scene in scene_manager.get_scene_list():
@@ -139,12 +140,22 @@ def main():
139140

140141
# Rilevamento parallelo delle scene
141142
print("Analisi parallela delle scene in corso...")
143+
total_segments = len(segments)
144+
num_threads = min(cpu_count(), len(segments)) if segments else 1 # Mantieni la tua logica originale
142145

143-
# num_processes
144-
num_threads = min(cpu_count(), len(segments)) if segments else 1
145146
with ThreadPoolExecutor(max_workers=num_threads) as executor:
146147
futures = [executor.submit(process_segment, arg) for arg in process_args]
147-
results = [future.result() for future in futures]
148+
149+
for i, _ in enumerate(as_completed(futures), 1):
150+
progress = int((i / total_segments) * 50)
151+
print("\rAnalisi scene: [{}{}] {:>3}%".format(
152+
'=' * progress,
153+
' ' * (50 - progress),
154+
int((i / total_segments) * 100)),
155+
end='', flush=True)
156+
157+
print("\nAnalisi completata!")
158+
results = [future.result() for future in futures]
148159

149160
# Unisci i risultati
150161
all_scenes = []

Scripts/Whisper Miglioramento Timing/Fase3.py

Lines changed: 132 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,10 @@
11
import os
22
from scenedetect import open_video, SceneManager
3-
from scenedetect.detectors import AdaptiveDetector, ContentDetector
3+
from scenedetect.detectors import AdaptiveDetector
44
import pysrt
5+
from concurrent.futures import ThreadPoolExecutor
6+
from multiprocessing import cpu_count
7+
from concurrent.futures import as_completed
58

69
# Percorso della directory principale del progetto (relativa)
710
project_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
@@ -53,45 +56,131 @@ def apply_offset(timecode, offset):
5356
sub.end = apply_offset(sub.end, offset)
5457
subs.save(output_path, encoding='utf-8')
5558

56-
# Percorso del file video
57-
video_path = os.path.join(project_path, "ep.mkv")
58-
if not os.path.exists(video_path):
59-
raise FileNotFoundError("Il file video non è stato trovato.")
60-
61-
# Caricamento del video
62-
video_manager = open_video(video_path)
63-
64-
# SceneManager con AdaptiveDetector e ContentDetector
65-
scene_manager = SceneManager()
66-
adaptive_detector = AdaptiveDetector(adaptive_threshold=19)
67-
content_detector = ContentDetector(threshold=19)
68-
scene_manager.add_detector(adaptive_detector)
69-
scene_manager.add_detector(content_detector)
70-
71-
# Rileva le scene
72-
scene_manager.detect_scenes(video_manager)
73-
scene_list = scene_manager.get_scene_list()
74-
75-
# Esporta i risultati in formato SRT
76-
srt_output_path = os.path.join(project_path, "scene_timestamps.srt")
77-
export_srt(scene_list, output_path=srt_output_path)
78-
79-
# Calcola la discrepanza costante
80-
discrepancy = calculate_discrepancy(scene_list, srt_output_path)
81-
82-
# Offset possibili
83-
possible_offsets = [-0.011, -0.021, -0.031, -0.041]
84-
85-
# Trova l'offset più vicino
86-
best_offset = find_closest_offset(discrepancy, possible_offsets)
87-
88-
# Applica l'offset globale al file SRT
89-
adjusted_srt_output_path = os.path.join(project_path, "scene_timestamps_adjusted.srt")
90-
apply_global_offset_to_srt(srt_output_path, adjusted_srt_output_path, best_offset)
91-
92-
# Stampa i risultati
93-
print(f"Scene rilevate: {len(scene_list)}")
94-
for i, scene in enumerate(scene_list):
95-
print(f"Scena {i+1}: Inizio: {scene[0].get_timecode()}, Fine: {scene[1].get_timecode()}")
96-
print(f"File SRT con offset globale applicato creato con successo: scene_timestamps_adjusted.srt")
97-
print(f"Offset applicato: {best_offset:.3f} secondi")
59+
# Funzione per trovare i segmenti del video da analizzare basati sui sottotitoli
60+
def get_segments_to_analyze(srt_path, min_gap=5.0, margin=2.0):
61+
subs = pysrt.open(srt_path, encoding='utf-8')
62+
segments = []
63+
64+
if not subs:
65+
return segments
66+
67+
# Estendi il primo inizio e l'ultima fine per catturare le scene ai bordi
68+
first_start = max(0, subs[0].start.ordinal / 1000 - margin * 2) # Margine doppio all'inizio
69+
last_end = subs[-1].end.ordinal / 1000 + margin * 2 # Margine doppio alla fine
70+
71+
current_start = first_start
72+
last_end = subs[0].end.ordinal / 1000
73+
74+
for i in range(1, len(subs)):
75+
current_sub = subs[i]
76+
gap = (current_sub.start.ordinal / 1000) - (subs[i-1].end.ordinal / 1000)
77+
78+
if gap >= min_gap:
79+
# Estendi il segmento corrente con margine abbondante
80+
segments.append((current_start, subs[i-1].end.ordinal / 1000 + margin))
81+
# Inizia nuovo segmento con margine abbondante
82+
current_start = current_sub.start.ordinal / 1000 - margin
83+
84+
last_end = current_sub.end.ordinal / 1000
85+
86+
# Aggiungi l'ultimo segmento esteso
87+
segments.append((current_start, last_end + margin))
88+
89+
return segments
90+
91+
# Funzione per processare un singolo segmento
92+
def process_segment(args):
93+
segment, video_path, adaptive_threshold = args
94+
start_time, end_time = segment
95+
96+
try:
97+
video_manager = open_video(video_path)
98+
video_manager.seek(max(0, start_time - 0.5))
99+
100+
scene_manager = SceneManager()
101+
adaptive_detector = AdaptiveDetector(
102+
adaptive_threshold=adaptive_threshold,
103+
min_content_val=20
104+
)
105+
scene_manager.add_detector(adaptive_detector)
106+
107+
scene_manager.detect_scenes(video_manager, end_time=end_time + 0.5)
108+
109+
segment_scenes = []
110+
for scene in scene_manager.get_scene_list():
111+
scene_start = scene[0].get_seconds()
112+
scene_end = scene[1].get_seconds()
113+
if scene_end > start_time and scene_start < end_time:
114+
segment_scenes.append(scene)
115+
116+
return segment_scenes
117+
except Exception as e:
118+
print(f"Errore durante l'elaborazione del segmento {start_time}-{end_time}: {str(e)}")
119+
return []
120+
121+
def main():
122+
# Percorso del file video
123+
video_path = os.path.join(project_path, "ep.mkv")
124+
if not os.path.exists(video_path):
125+
raise FileNotFoundError("Il file video non è stato trovato.")
126+
127+
# Percorso del file SRT dei sottotitoli
128+
srt_path = os.path.join(project_path, "whisper_adjusted.srt")
129+
if not os.path.exists(srt_path):
130+
raise FileNotFoundError("Il file SRT dei sottotitoli non è stato trovato.")
131+
132+
# Ottieni i segmenti del video da analizzare
133+
segments = get_segments_to_analyze(srt_path, min_gap=5.0, margin=1.0)
134+
135+
# Parametri per i detector
136+
adaptive_threshold = 3
137+
138+
# Prepara gli argomenti per il pool
139+
process_args = [(segment, video_path, adaptive_threshold) for segment in segments]
140+
141+
# Rilevamento parallelo delle scene
142+
print("Analisi parallela delle scene in corso...")
143+
total_segments = len(segments)
144+
num_threads = min(cpu_count(), len(segments)) if segments else 1 # Mantieni la tua logica originale
145+
146+
with ThreadPoolExecutor(max_workers=num_threads) as executor:
147+
futures = [executor.submit(process_segment, arg) for arg in process_args]
148+
149+
for i, _ in enumerate(as_completed(futures), 1):
150+
progress = int((i / total_segments) * 50)
151+
print("\rAnalisi scene: [{}{}] {:>3}%".format(
152+
'=' * progress,
153+
' ' * (50 - progress),
154+
int((i / total_segments) * 100)),
155+
end='', flush=True)
156+
157+
print("\nAnalisi completata!")
158+
results = [future.result() for future in futures]
159+
160+
# Unisci i risultati
161+
all_scenes = []
162+
for segment_scenes in results:
163+
all_scenes.extend(segment_scenes)
164+
all_scenes.sort(key=lambda x: x[0].get_seconds())
165+
166+
# Esporta i risultati
167+
srt_output_path = os.path.join(project_path, "scene_timestamps.srt")
168+
export_srt(all_scenes, output_path=srt_output_path)
169+
170+
# Calcola e applica offset
171+
discrepancy = calculate_discrepancy(all_scenes, srt_output_path)
172+
possible_offsets = [-0.011, -0.021, -0.031, -0.041]
173+
best_offset = find_closest_offset(discrepancy, possible_offsets)
174+
adjusted_srt_output_path = os.path.join(project_path, "scene_timestamps_adjusted.srt")
175+
apply_global_offset_to_srt(srt_output_path, adjusted_srt_output_path, best_offset)
176+
177+
# Stampa risultati
178+
print(f"Scene rilevate: {len(all_scenes)}")
179+
for i, scene in enumerate(all_scenes):
180+
print(f"Scena {i+1}: Inizio: {scene[0].get_timecode()}, Fine: {scene[1].get_timecode()}")
181+
print(f"File SRT con offset globale applicato creato con successo: scene_timestamps_adjusted.srt")
182+
print(f"Offset applicato: {best_offset:.3f} secondi")
183+
print(f"Segmenti analizzati: {segments}")
184+
185+
if __name__ == '__main__':
186+
main()

0 commit comments

Comments
 (0)