-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathot.py
More file actions
1785 lines (1553 loc) · 86.5 KB
/
ot.py
File metadata and controls
1785 lines (1553 loc) · 86.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import cv2
from ultralytics import YOLO
import argparse
from tqdm import tqdm
import torch
import os
import sys
import subprocess
import threading
import time
import queue
import json
import signal
import re # Added for ffprobe output parsing
import numpy as np
from collections import Counter
from recon_integration import (
append_recon_index_row,
build_recon_index_row,
build_recon_messages,
cleanup_temp_file,
ensure_recon_log_structure,
get_recon_flags,
needs_normalization_fallback,
normalize_video,
run_recon,
save_recon_json,
)
# Attempt to import psutil for CPU/Memory monitoring
try:
import psutil
PSUTIL_AVAILABLE = True
except ImportError:
PSUTIL_AVAILABLE = False
print("⚠️ [WARNING] psutil library not found. RAM usage limiting and CPU/Memory utilization display will not be available.")
# Attempt to import GPUtil for GPU monitoring
try:
import GPUtil
GPUTIL_AVAILABLE = True
except (ImportError, Exception):
GPUTIL_AVAILABLE = False
# --- FFMPEG Check ---
FFMPEG_AVAILABLE = False
def check_ffmpeg():
global FFMPEG_AVAILABLE
try:
subprocess.run(["ffmpeg", "-version"], capture_output=True, text=True, check=True)
print("[INFO] ffmpeg found and seems to be working.")
FFMPEG_AVAILABLE = True
except Exception:
print("⚠️ [WARNING] ffmpeg command not found or not working. Audio processing will be skipped.")
FFMPEG_AVAILABLE = False
return FFMPEG_AVAILABLE
def detect_nvenc():
"""Detect if NVENC hevc_nvenc encoder is available in FFmpeg."""
global NVENC_AVAILABLE
if not USE_NVENC:
print("[INFO] NVENC disabled by config (USE_NVENC = False).")
return False
try:
result = subprocess.run(
["ffmpeg", "-hide_banner", "-encoders"],
capture_output=True, text=True, timeout=10
)
if "hevc_nvenc" in result.stdout:
NVENC_AVAILABLE = True
print("✅ [SUCCESS] NVENC H.265 encoder (hevc_nvenc) detected. Will use hardware encoding.")
return True
else:
print("⚠️ [WARNING] hevc_nvenc not found in FFmpeg. Falling back to libx264.")
return False
except Exception as e:
print(f"⚠️ [WARNING] Could not detect NVENC: {e}. Falling back to libx264.")
return False
def get_video_rotation(video_path: str) -> int:
"""
Uses ffprobe to get the rotation metadata of a video.
Returns the rotation angle (0, 90, 180, 270). Returns 0 if no rotation found or on error.
"""
try:
command = [
"ffprobe",
"-v", "error",
"-select_streams", "v:0",
"-show_entries", "stream_tags=rotate",
"-of", "default=nokey=1:noprint_wrappers=1",
video_path
]
result = subprocess.run(command, capture_output=True, text=True, check=True, timeout=10)
rotation_str = result.stdout.strip()
if rotation_str and rotation_str.isdigit():
rotation_angle = int(rotation_str)
if rotation_angle in [0, 90, 180, 270]:
print(f"[INFO] Detected video rotation: {rotation_angle} degrees.")
return rotation_angle
print("[INFO] No rotation metadata found or invalid rotation value, assuming 0 degrees.")
return 0
except subprocess.CalledProcessError as e:
print(f"⚠️ [WARNING] ffprobe failed to get rotation for '{video_path}': {e.stderr.strip()}")
return 0
except FileNotFoundError:
print("⚠️ [WARNING] ffprobe command not found. Cannot determine video rotation.")
return 0
except subprocess.TimeoutExpired:
print(f"⚠️ [WARNING] ffprobe timed out while getting rotation for '{video_path}'.")
return 0
except Exception as e:
print(f"⚠️ [WARNING] An unexpected error occurred while getting video rotation: {e}")
return 0
# --- Configuration & Setup ---
DEFAULT_MODEL_PATH = "yolov8m.pt"
DEFAULT_OUTPUT_VIDEO_PATH_MARKER = "auto"
DEFAULT_ALLOWED_CLASSES = [
"person", "car", "truck", "bus", "motorcycle", "bicycle", "airplane", "bird", "cat", "dog",
"train", "boat", "bench", "backpack", "umbrella", "handbag", "suitcase", "sports ball",
"bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl",
"chair", "couch", "potted plant", "bed", "dining table",
"tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
"microwave", "refrigerator", "book", "clock", "vase", "scissors"
]
DEFAULT_CONFIDENCE_THRESHOLD = 0.25
TEMP_VIDEO_BASENAME = "temp_video_processed_silent.mp4" # This will now be the silent video generated by ffmpeg
OUTPUT_SUBDIRECTORY = "output"
FRAME_QUEUE_SIZE = 30
UTILIZATION_UPDATE_INTERVAL = 1.0 # Interval for updating CPU/Mem/GPU stats in progress bar
# Increased MAX_RAM_USAGE_PERCENT slightly to reduce frequent pausing, use with caution.
MAX_RAM_USAGE_PERCENT = 100 # Percentage of total RAM to trigger pause in frame reading
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# +++ User Configuration +++
ENABLE_PREVIEW_IN_SCRIPT = False # Set to True to show a live preview window during processing
USE_GPU_IN_SCRIPT = True # Set to True to attempt using GPU (CUDA) for processing if available
# TARGET_PROCESSING_WIDTH: This is the width used for INTERNAL object tracking.
# Set to a lower resolution (e.g., 1920 for 1080p equivalent) to reduce RAM usage and speed up tracking.
# The final output video will be upscaled back to its original resolution.
# Set to None to perform tracking at the original video resolution (higher RAM/slower).
TARGET_PROCESSING_WIDTH = 1920
# FFmpeg Quality Settings (Crucial for High Quality and Reasonable File Size)
# CRF (Constant Rate Factor):
# 0 = truly lossless (pixel-perfect, but results in very large files)
# 16-23 = visually lossless for H.264 (recommended for excellent quality with good compression)
# Lower CRF means higher quality and larger file size.
FFMPEG_CRF_VALUE = 24 # Production default aligned with Adaptive_Temporal benchmark profile.
FFMPEG_VIDEO_CODEC = "libx264" # Recommended for wide compatibility. Use "libx265" for HEVC (smaller files, requires x265 encoder).
FFMPEG_PRESET = "ultrafast" # Production default aligned with Adaptive_Temporal benchmark profile.
# --- NVENC H.265 Hardware Encoding ---
# When True, pipeline will use NVIDIA NVENC hevc_nvenc if available for 40-50% smaller files.
# Falls back silently to libx264 if NVENC is unavailable.
USE_NVENC = True
NVENC_AVAILABLE = False # Runtime flag, set by detect_nvenc() at startup
# --- Watermark Configuration ---
# ENABLE_WATERMARK: Master switch to turn watermark on/off
# Set to True to add "Processed by projectglyphmotion.studio" watermark to output videos
# Set to False to disable watermark completely
ENABLE_WATERMARK = False # ON by default - change to False to disable
# Watermark text - displayed in bottom-right corner
WATERMARK_TEXT = "Processed by projectglyphmotion.studio"
# Watermark appearance settings (dynamic - scales with video resolution)
# Font size is calculated as: video_height / WATERMARK_FONT_DIVISOR
# Higher divisor = smaller text, Lower divisor = larger text
WATERMARK_FONT_DIVISOR = 40 # Results in ~27px for 1080p, ~54px for 4K, ~18px for 720p
# Watermark opacity (0.0 = invisible, 1.0 = fully opaque)
# Recommended: 0.6-0.8 for visible but non-distracting
WATERMARK_OPACITY = 0.7
# Margin from edge (as fraction of video height)
# 0.02 = 2% margin from bottom-right corner
WATERMARK_MARGIN_FRACTION = 0.02
# --- ROI (Region of Interest) Configuration ---
# ROI allows users to select a specific area of the video for object tracking
# Objects outside the ROI will be ignored (not drawn/tracked)
# Default ROI settings (can be overridden by command-line arguments)
# When ENABLE_ROI is False, the entire frame is used for tracking
DEFAULT_ROI_ENABLED = False
# ROI overlay settings
# When enabled, draws a semi-transparent overlay outside the ROI area
DEFAULT_ROI_SHOW_OVERLAY = True
DEFAULT_ROI_OVERLAY_OPACITY = 30 # Percentage (0-100)
# ROI overlay color (BGR format for OpenCV)
ROI_OVERLAY_COLOR = (20, 20, 30) # Dark gray-blue, matches the website theme
# --- Adaptive HFDR Production Defaults (always-on by design) ---
ADAPTIVE_HFDR_ENABLED = True
ADAPTIVE_TARGET_FPS = 20.0
ADAPTIVE_INITIAL_ALPHA = 0.30
ADAPTIVE_HFDR_SIGMA = 2.0
ADAPTIVE_HFDR_CACHE_INTERVAL = 3
ADAPTIVE_TEMPORAL_THRESHOLD = 0.003
ADAPTIVE_TEMPORAL_KEYFRAME_INTERVAL = 30
ADAPTIVE_TEMPORAL_BLEND_ALPHA = 0.85 # Blend ratio (0=prev, 1=current) — keep mostly current
ADAPTIVE_MOTION_GATE_THRESHOLD = 0.025
ADAPTIVE_ROI_MARGIN_PX = 24
ADAPTIVE_UNIFIED_MOTION_THRESHOLD = 0.02
ADAPTIVE_UNIFIED_OBJECT_THRESHOLD = 3
ADAPTIVE_UNIFIED_HYSTERESIS = 5
ADAPTIVE_ROI_MIN_CACHE = 1
ADAPTIVE_ROI_MAX_CACHE = 8
ADAPTIVE_ROI_MOTION_HIGH = 0.025
ADAPTIVE_ROI_MOTION_LOW = 0.005
ADAPTIVE_FFMPEG_UNSHARP_ENABLED = True
ADAPTIVE_FFMPEG_UNSHARP_AMOUNT = 1.5
ADAPTIVE_FFMPEG_UNSHARP_MSIZE = 5
ADAPTIVE_PROFILE_NAME = "Adaptive_Temporal"
# --- Motion-Aware Adaptive FFmpeg Filtering Tiers ---
ADAPTIVE_FILTER_TIERS = {
"low": {"threshold": 0.008, "la": 1.0, "label": "Static"},
"medium": {"threshold": 0.025, "la": 1.5, "label": "Medium"},
"high": {"threshold": 999.0, "la": 2.0, "label": "Dynamic"},
}
# --- Semantic Class-Based Priority ---
ADAPTIVE_SEMANTIC_MAP = {
"person": 3, "car": 3, "truck": 3, "bus": 3,
"motorcycle": 2, "bicycle": 2, "dog": 2, "cat": 2, "bird": 2,
"backpack": 1, "umbrella": 1, "handbag": 1, "suitcase": 1,
}
ADAPTIVE_SEMANTIC_ALPHA_MAP = {3: 1.0, 2: 0.7, 1: 0.4, 0: 0.15}
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
_termination_signal_received = False
def _handle_termination_signal(signum, _frame):
global _termination_signal_received
_termination_signal_received = True
print(f"\n[INFO] Received termination signal ({signum}). Stopping processing...")
raise KeyboardInterrupt
def get_watermark_filter():
"""
Generates an FFmpeg drawtext filter for the watermark.
Uses dynamic expressions so it scales with any video resolution/aspect ratio.
Portrait videos get smaller text to avoid being too intrusive.
Returns:
str: FFmpeg filter string for watermark, or empty string if disabled.
"""
if not ENABLE_WATERMARK:
return ""
# Escape special characters in the text for FFmpeg
# FFmpeg drawtext requires escaping colons and backslashes
escaped_text = WATERMARK_TEXT.replace("\\", "\\\\").replace(":", "\\:")
# Dynamic FFmpeg expressions:
# h = video height, w = video width, th = text height, tw = text width
# gt(h,w) = 1 if portrait (height > width), 0 if landscape
# Font size scales with video dimensions for consistent appearance across resolutions
# Portrait videos use smaller text (1.5x larger divisor) to be less intrusive
# Calculate divisors for portrait vs landscape
portrait_divisor = WATERMARK_FONT_DIVISOR * 1.5 # Smaller text for portrait
landscape_divisor = WATERMARK_FONT_DIVISOR # Normal text for landscape
# For portrait: use width as reference (since it's smaller dimension)
# For landscape: use height as reference
# This ensures watermark is proportional to the smaller dimension
watermark_filter = (
f"drawtext="
f"text='{escaped_text}':"
# Dynamic font size: smaller for portrait, normal for landscape
# Portrait: min(w,h) / (divisor * 1.5), Landscape: h / divisor
f"fontsize=if(gt(h\\,w)\\,min(w\\,h)/{portrait_divisor}\\,h/{landscape_divisor}):"
f"fontcolor=white@{WATERMARK_OPACITY}:" # White text with configured opacity
f"x=w-tw-(h*{WATERMARK_MARGIN_FRACTION}):" # Right-aligned with dynamic margin
f"y=h-th-(h*{WATERMARK_MARGIN_FRACTION}):" # Bottom-aligned with dynamic margin
f"box=1:" # Enable background box
f"boxcolor=black@{WATERMARK_OPACITY * 0.5}:" # Semi-transparent black background
# Dynamic padding: smaller for portrait
f"boxborderw=if(gt(h\\,w)\\,min(w\\,h)/{portrait_divisor}/4\\,h/{landscape_divisor}/4)"
)
print(f"[INFO] Watermark ENABLED: \"{WATERMARK_TEXT}\"")
print(f"[INFO] Watermark sizing: Landscape={landscape_divisor}, Portrait={portrait_divisor} (smaller)")
return watermark_filter
def get_system_utilization(device_to_use):
"""
Retrieves current system utilization statistics (CPU, Memory, GPU).
Uses torch.cuda for GPU monitoring when GPUtil is not available.
"""
global GPUTIL_AVAILABLE
util_stats = {}
if PSUTIL_AVAILABLE:
util_stats['cpu'] = psutil.cpu_percent()
mem_info = psutil.virtual_memory()
util_stats['mem_used_gb'] = mem_info.used / (1024**3)
util_stats['mem_total_gb'] = mem_info.total / (1024**3)
util_stats['mem'] = mem_info.percent
if device_to_use == "cuda":
gpu_stats_collected = False
# Try GPUtil first (more accurate load reporting)
if GPUTIL_AVAILABLE:
try:
gpus = GPUtil.getGPUs()
if gpus:
gpu = gpus[0]
util_stats['gpu_load'] = gpu.load * 100
util_stats['gpu_mem_mb'] = gpu.memoryUsed # Actual MB used
gpu_stats_collected = True
except Exception:
print("⚠️ [WARNING] GPUtil failed to get GPU stats.")
GPUTIL_AVAILABLE = False
# Fallback: use torch.cuda built-in monitoring (always works when CUDA is available)
if not gpu_stats_collected and torch.cuda.is_available():
try:
# VRAM usage from PyTorch (reliable, no extra dependency)
vram_used_mb = torch.cuda.memory_allocated(0) / (1024 * 1024)
vram_reserved_mb = torch.cuda.memory_reserved(0) / (1024 * 1024)
util_stats['gpu_mem_mb'] = round(vram_reserved_mb, 1) # Reserved is more accurate for peak tracking
# GPU utilization from torch (available in PyTorch 2.0+)
try:
gpu_util = torch.cuda.utilization(0) # Returns 0-100 int
util_stats['gpu_load'] = float(gpu_util)
except Exception:
# torch.cuda.utilization not available in older PyTorch — estimate from activity
util_stats['gpu_load'] = 50.0 if vram_used_mb > 100 else 0.0
except Exception:
pass
return util_stats
def format_utilization_string(stats):
"""
Formats system utilization statistics into a human-readable string.
"""
parts = []
if 'cpu' in stats: parts.append(f"CPU:{stats['cpu']:.1f}%")
if 'mem' in stats:
parts.append(f"Mem:{stats['mem']:.1f}% ({stats.get('mem_used_gb',0):.1f}/{stats.get('mem_total_gb',0):.1f}GB)")
if 'gpu_load' in stats: parts.append(f"GPU-L:{stats['gpu_load']:.1f}%")
if 'gpu_mem_mb' in stats: parts.append(f"VRAM:{stats['gpu_mem_mb']:.0f}MB")
return " | ".join(parts) if parts else "Stats N/A"
def frame_reader_thread_func(cap, frame_input_queue, stop_event, original_width, original_height, target_processing_width=None, pass_original=False):
"""
Thread function to read frames from video, resize for processing, and put into a queue.
Includes a RAM usage check to pause reading if memory is high.
Args:
cap (cv2.VideoCapture): OpenCV VideoCapture object.
frame_input_queue (queue.Queue): Queue to put read/resized frames into.
stop_event (threading.Event): Event to signal thread to stop.
original_width (int): Original width of the video as reported by OpenCV.
original_height (int): Original height of the video as reported by OpenCV.
target_processing_width (int, optional): Target width for internal processing.
If None, original resolution is used.
"""
print("[INFO] Frame reader thread started.")
count = 0
# Determine the resolution for internal tracking
# Note: original_width/height here are the raw dimensions from cap.get(), not necessarily display dimensions.
processing_width = original_width
processing_height = original_height
do_resize_for_processing = False
if target_processing_width and target_processing_width > 0 and target_processing_width < original_width:
aspect_ratio = original_height / original_width
processing_width = target_processing_width
# Calculate height to maintain aspect ratio, ensuring it's an even number for codecs
processing_height = int(processing_width * aspect_ratio)
if processing_height % 2 != 0:
processing_height = processing_height - 1 if processing_height > 1 else 2 # Ensure even height
do_resize_for_processing = True
print(f"[INFO] Frame reader: Resizing frames from {original_width}x{original_height} to {processing_width}x{processing_height} for internal processing.")
else:
# If no target_processing_width is set, ensure original dimensions are even for processing compatibility
if processing_width % 2 != 0: processing_width = processing_width - 1 if processing_width > 1 else 2
if processing_height % 2 != 0: processing_height = processing_height - 1 if processing_height > 1 else 2
if original_width != processing_width or original_height != processing_height:
print(f"⚠️ [WARNING] Frame reader: Adjusted original resolution to {processing_width}x{processing_height} for processing codec compatibility (even dimensions).")
print(f"[INFO] Frame reader: Processing at original resolution {processing_width}x{processing_height}.")
while not stop_event.is_set() and cap.isOpened():
if PSUTIL_AVAILABLE:
current_ram_usage = psutil.virtual_memory().percent
ram_check_loops = 0
# Pause frame reading if RAM usage is too high
while current_ram_usage > MAX_RAM_USAGE_PERCENT and not stop_event.is_set():
if ram_check_loops % 5 == 0: # Print warning every 5 seconds of pause
print(f"⚠️ [WARNING] [FRAME_READER] High RAM usage: {current_ram_usage:.1f}%. Pausing frame reading for 1s...")
time.sleep(1.0)
current_ram_usage = psutil.virtual_memory().percent
ram_check_loops +=1
if stop_event.is_set():
print("[INFO] [FRAME_READER] Stop event received during RAM pause.")
# Fix: Ensure stop signal is consistent (2 values)
if not frame_input_queue.full(): frame_input_queue.put((False, None, None))
return
# Only read frame if there's space in the queue
if frame_input_queue.qsize() < FRAME_QUEUE_SIZE:
ret, frame = cap.read()
if not ret: # End of video or error reading frame
print(f"[INFO] Frame reader: End of video or cannot read frame after {count} frames.")
# Fix: Ensure end signal is consistent (2 values)
frame_input_queue.put((False, None, None))
break
# Resize frame for internal processing if target_processing_width is set
if do_resize_for_processing:
try:
processed_frame = cv2.resize(frame, (processing_width, processing_height), interpolation=cv2.INTER_AREA)
except Exception as e:
print(f"❌ [ERROR] [FRAME_READER] Failed to resize frame for processing: {e}. Using original frame.")
processed_frame = frame # Fallback to original if resize fails
else:
processed_frame = frame
# Put the processed frame and (optionally) the original full-res frame into queue
original_to_pass = frame if pass_original else None
frame_input_queue.put((True, processed_frame, original_to_pass))
count += 1
else:
time.sleep(0.005) # Small delay to prevent busy-waiting if queue is full
# Ensure stop signal is sent if loop finishes naturally
if not stop_event.is_set() and not frame_input_queue.full():
frame_input_queue.put((False, None, None))
print(f"[INFO] Frame reader thread finished after reading {count} frames.")
def frame_writer_thread_func(ffmpeg_stdin_pipe, frame_output_queue, stop_event):
"""
Writes processed and (upscaled) frames from queue directly to FFmpeg's stdin pipe.
Args:
ffmpeg_stdin_pipe (subprocess.PIPE): stdin pipe of the FFmpeg process.
frame_output_queue (queue.Queue): Queue to get (upscaled) frames from.
stop_event (threading.Event): Event to signal thread to stop.
"""
print("[INFO] Frame writer thread started for FFmpeg pipe.")
count = 0
try:
while not stop_event.is_set():
try:
# Get frame from queue with a timeout to allow checking stop_event
ret, frame_to_write = frame_output_queue.get(timeout=0.1)
if not ret: # End signal received
print(f"[INFO] Frame writer: End signal received after writing {count} frames.")
break # Exit loop
if frame_to_write is not None:
# Convert frame to raw bytes for FFmpeg (BGR is OpenCV default)
ffmpeg_stdin_pipe.write(frame_to_write.tobytes())
count += 1
frame_output_queue.task_done()
except queue.Empty:
# If queue is empty, check stop_event and if the main loop finished
if stop_event.is_set() and frame_output_queue.empty():
print("[INFO] Frame writer: Stop event set and queue empty.")
break
continue # Keep waiting for frames
except BrokenPipeError:
print("❌ [ERROR] [FRAME_WRITER] Broken pipe to FFmpeg. FFmpeg likely exited unexpectedly.")
break
except Exception as e:
print(f"❌ [ERROR] [FRAME_WRITER] Error writing frame to FFmpeg pipe: {e}")
break
finally:
# Crucially, close the pipe to signal FFmpeg that no more frames are coming
if ffmpeg_stdin_pipe:
try:
ffmpeg_stdin_pipe.close()
print("[INFO] FFmpeg stdin pipe closed.")
except Exception as e:
print(f"⚠️ [WARNING] Error closing FFmpeg stdin pipe: {e}")
# Mark remaining tasks as done if any are left (e.g. if an error occurred)
while not frame_output_queue.empty():
try:
frame_output_queue.get_nowait()
frame_output_queue.task_done()
except queue.Empty:
break # Queue is now truly empty
print(f"[INFO] Frame writer thread finished. Total frames written to FFmpeg: {count}")
def parse_arguments():
"""Parses command-line arguments for the script."""
parser = argparse.ArgumentParser(description="Object Tracking: YOLOv8, Threaded I/O, GPU/CPU, Progress, ffmpeg Audio, Utilization")
parser.add_argument("--model",type=str,default=DEFAULT_MODEL_PATH,help="Path to YOLOv8 model.")
parser.add_argument("--output_video",type=str,default=DEFAULT_OUTPUT_VIDEO_PATH_MARKER,help="Path for final video. Default: 'auto' (derived from input name, saved in 'output/' subdir).")
parser.add_argument("--allowed_classes",nargs="+",default=DEFAULT_ALLOWED_CLASSES,help=f"Classes to track.")
parser.add_argument("--confidence_threshold",type=float,default=DEFAULT_CONFIDENCE_THRESHOLD,help=f"Min confidence.")
parser.add_argument("--input_video", type=str, required=True, help="Path to the input video file.")
parser.add_argument("--codec", type=str, default=FFMPEG_VIDEO_CODEC, help="FFmpeg video codec (e.g. libx264, libx265).")
parser.add_argument("--preset", type=str, default=FFMPEG_PRESET, help="FFmpeg preset (ultrafast, fast, medium, etc.).")
parser.add_argument("--crf", type=int, default=FFMPEG_CRF_VALUE, help="FFmpeg CRF value (lower = higher quality).")
# ROI (Region of Interest) arguments
parser.add_argument("--roi_enabled", type=str, default="false", help="Enable ROI filtering (true/false).")
parser.add_argument("--roi_x", type=float, default=0.0, help="ROI X position (0-1 normalized).")
parser.add_argument("--roi_y", type=float, default=0.0, help="ROI Y position (0-1 normalized).")
parser.add_argument("--roi_width", type=float, default=1.0, help="ROI width (0-1 normalized).")
parser.add_argument("--roi_height", type=float, default=1.0, help="ROI height (0-1 normalized).")
parser.add_argument("--roi_show_overlay", type=str, default="true", help="Show semi-transparent overlay outside ROI (true/false).")
parser.add_argument("--roi_overlay_opacity", type=int, default=DEFAULT_ROI_OVERLAY_OPACITY, help="ROI overlay opacity (0-100).")
parser.add_argument("--gdrive_auto_transcoded", type=str, default="false", help="Whether uploaded output auto-transcoded on Google Drive (true/false).")
return parser.parse_args()
def is_box_in_roi(box_coords, roi_coords, frame_width, frame_height):
"""
Check if a bounding box center is within the ROI.
Args:
box_coords: Tuple of (x1, y1, x2, y2) - bounding box pixel coordinates
roi_coords: Dict with 'x', 'y', 'width', 'height' - normalized ROI (0-1)
frame_width: Width of the frame in pixels
frame_height: Height of the frame in pixels
Returns:
bool: True if box center is within ROI, False otherwise
"""
if roi_coords is None:
return True # No ROI means all boxes are valid
x1, y1, x2, y2 = box_coords
# Calculate box center
box_center_x = (x1 + x2) / 2
box_center_y = (y1 + y2) / 2
# Convert ROI from normalized to pixel coordinates
roi_x1 = roi_coords['x'] * frame_width
roi_y1 = roi_coords['y'] * frame_height
roi_x2 = (roi_coords['x'] + roi_coords['width']) * frame_width
roi_y2 = (roi_coords['y'] + roi_coords['height']) * frame_height
# Check if box center is within ROI
return (roi_x1 <= box_center_x <= roi_x2) and (roi_y1 <= box_center_y <= roi_y2)
def draw_roi_overlay(frame, roi_coords, opacity):
"""
Draw a semi-transparent overlay outside the ROI area.
Args:
frame: The frame to draw on (will be modified in-place)
roi_coords: Dict with 'x', 'y', 'width', 'height' - normalized ROI (0-1)
opacity: Overlay opacity (0-100)
Returns:
The modified frame with ROI overlay
"""
if roi_coords is None or opacity <= 0:
return frame
h, w = frame.shape[:2]
# Convert normalized ROI to pixel coordinates
roi_x1 = int(roi_coords['x'] * w)
roi_y1 = int(roi_coords['y'] * h)
roi_x2 = int((roi_coords['x'] + roi_coords['width']) * w)
roi_y2 = int((roi_coords['y'] + roi_coords['height']) * h)
# Clamp values to frame bounds
roi_x1 = max(0, min(roi_x1, w))
roi_y1 = max(0, min(roi_y1, h))
roi_x2 = max(0, min(roi_x2, w))
roi_y2 = max(0, min(roi_y2, h))
# Create overlay
overlay = frame.copy()
alpha = opacity / 100.0
# Fill areas outside ROI with dark overlay
# Top region
if roi_y1 > 0:
cv2.rectangle(overlay, (0, 0), (w, roi_y1), ROI_OVERLAY_COLOR, -1)
# Bottom region
if roi_y2 < h:
cv2.rectangle(overlay, (0, roi_y2), (w, h), ROI_OVERLAY_COLOR, -1)
# Left region (between top and bottom)
if roi_x1 > 0:
cv2.rectangle(overlay, (0, roi_y1), (roi_x1, roi_y2), ROI_OVERLAY_COLOR, -1)
# Right region (between top and bottom)
if roi_x2 < w:
cv2.rectangle(overlay, (roi_x2, roi_y1), (w, roi_y2), ROI_OVERLAY_COLOR, -1)
# Draw ROI border (orange to match theme)
border_color = (54, 161, 253) # BGR for #FDA136 (orange)
cv2.rectangle(overlay, (roi_x1, roi_y1), (roi_x2, roi_y2), border_color, 2)
# Blend overlay with original frame
cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame)
return frame
class PipelineController:
"""
Feedback-driven PID controller for adaptive parameter tuning.
Monitors real-time pipeline signals (FPS, motion, queue depth, tracking
confidence) and continuously adjusts filter strength and HFDR alpha.
"""
def __init__(self, target_fps=20.0, initial_filter_strength=2.0, initial_alpha=0.3):
self.target_fps = target_fps
self.filter_strength = initial_filter_strength
self.hfdr_alpha = initial_alpha
self._fps_window = []
self._adjustment_count = 0
self._window_size = 10
def update(self, current_fps, motion_score, queue_depth, tracking_confidence):
self._fps_window.append(current_fps)
if len(self._fps_window) > self._window_size:
self._fps_window.pop(0)
avg_fps = sum(self._fps_window) / len(self._fps_window)
fps_error = avg_fps - self.target_fps
if fps_error < -3:
self.filter_strength = max(0.5, self.filter_strength - 0.3)
self.hfdr_alpha = max(0.1, self.hfdr_alpha - 0.05)
self._adjustment_count += 1
elif fps_error < -1:
self.filter_strength = max(0.8, self.filter_strength - 0.1)
self.hfdr_alpha = max(0.15, self.hfdr_alpha - 0.02)
self._adjustment_count += 1
elif fps_error > 5:
self.filter_strength = min(3.0, self.filter_strength + 0.2)
self.hfdr_alpha = min(0.5, self.hfdr_alpha + 0.03)
self._adjustment_count += 1
elif fps_error > 2:
self.filter_strength = min(2.5, self.filter_strength + 0.1)
self.hfdr_alpha = min(0.4, self.hfdr_alpha + 0.01)
self._adjustment_count += 1
# Motion override: high-motion scenes need full filter strength
if motion_score > 0.05:
self.filter_strength = max(self.filter_strength, 1.5)
# Queue pressure: if output queue is backing up, reduce work
queue_ratio = queue_depth / max(1, FRAME_QUEUE_SIZE)
if queue_ratio > 0.8:
self.filter_strength = max(0.5, self.filter_strength - 0.2)
self.hfdr_alpha = max(0.1, self.hfdr_alpha - 0.05)
self._adjustment_count += 1
# Tracking confidence: if tracker is struggling, boost detail
if tracking_confidence < 0.4 and tracking_confidence > 0:
self.hfdr_alpha = min(0.5, self.hfdr_alpha + 0.1)
self._adjustment_count += 1
return self.filter_strength, self.hfdr_alpha
@property
def adjustment_count(self):
return self._adjustment_count
@property
def avg_fps(self):
return sum(self._fps_window) / max(1, len(self._fps_window))
def compute_roi_refresh_interval(self, motion_score):
if motion_score >= ADAPTIVE_ROI_MOTION_HIGH:
return ADAPTIVE_ROI_MIN_CACHE
elif motion_score <= ADAPTIVE_ROI_MOTION_LOW:
return ADAPTIVE_ROI_MAX_CACHE
else:
ratio = (motion_score - ADAPTIVE_ROI_MOTION_LOW) / max(0.001, (ADAPTIVE_ROI_MOTION_HIGH - ADAPTIVE_ROI_MOTION_LOW))
interval = ADAPTIVE_ROI_MAX_CACHE - ratio * (ADAPTIVE_ROI_MAX_CACHE - ADAPTIVE_ROI_MIN_CACHE)
return max(ADAPTIVE_ROI_MIN_CACHE, int(round(interval)))
class UnifiedModeController:
def __init__(self, motion_threshold=0.02, object_threshold=3, hysteresis=5):
self.motion_threshold = motion_threshold
self.object_threshold = object_threshold
self.hysteresis = hysteresis
self.current_mode = "A"
self._hold_counter = 0
self._mode_switches = 0
def decide_mode(self, motion_score, object_count, avg_confidence=0.5):
wants_mode_b = (motion_score > self.motion_threshold or
object_count >= self.object_threshold or
avg_confidence < 0.4) # Low confidence = need more detail
desired_mode = "B" if wants_mode_b else "A"
if desired_mode != self.current_mode:
self._hold_counter += 1
if self._hold_counter >= self.hysteresis:
self.current_mode = desired_mode
self._hold_counter = 0
self._mode_switches += 1
else:
self._hold_counter = 0
return self.current_mode
@property
def mode_switches(self):
return self._mode_switches
def should_reuse_frame(current_gray, reference_gray, threshold=0.003):
if reference_gray is None:
return False, 1.0, False
diff = cv2.absdiff(current_gray, reference_gray)
diff_ratio = float(np.mean(diff)) / 255.0
skip_hfdr = diff_ratio < threshold * 0.3 # Ultra-static only (<0.1% change)
should_blend = diff_ratio < threshold and not skip_hfdr # Slight change → blend
return skip_hfdr, diff_ratio, should_blend
def extract_hf_detail_only(frame, alpha=0.3, sigma=2.0):
blurred = cv2.GaussianBlur(frame, (0, 0), sigmaX=sigma, sigmaY=sigma)
return cv2.addWeighted(frame, alpha, blurred, -alpha, 128.0)
def apply_hf_detail(frame, hf_detail):
return cv2.addWeighted(frame, 1.0, hf_detail, 1.0, -128.0)
def build_roi_mask(frame_shape, detections, margin=24, priorities=None):
"""
Create a uint8 mask from YOLO bounding boxes.
If priorities dict given, returns per-pixel priority map instead of binary mask.
"""
h, w = frame_shape[:2]
mask = np.zeros((h, w), dtype=np.uint8)
for det in detections:
x1, y1, x2, y2 = int(det[0]), int(det[1]), int(det[2]), int(det[3])
class_name = det[4] if len(det) > 4 else ""
x1 = max(0, x1 - margin)
y1 = max(0, y1 - margin)
x2 = min(w, x2 + margin)
y2 = min(h, y2 + margin)
if priorities is not None:
priority = priorities.get(class_name, 0)
region = mask[y1:y2, x1:x2]
mask[y1:y2, x1:x2] = np.maximum(region, priority)
else:
mask[y1:y2, x1:x2] = 255
return mask
def apply_roi_hfdr(frame, hf_detail, roi_mask, alpha_multipliers=None, priority_mask=None):
"""
Apply HFDR only inside ROI regions. Supports semantic mode with per-class alpha scaling.
"""
result = frame.copy()
if priority_mask is not None and alpha_multipliers is not None:
for priority_level, alpha_scale in alpha_multipliers.items():
if alpha_scale <= 0.05:
continue
level_mask = (priority_mask == priority_level)
if not np.any(level_mask):
continue
blended = cv2.addWeighted(frame, 1.0, hf_detail, alpha_scale, -128.0 * alpha_scale)
result[level_mask] = blended[level_mask]
else:
roi_bool = roi_mask > 0
if np.any(roi_bool):
blended = apply_hf_detail(frame, hf_detail)
result[roi_bool] = blended[roi_bool]
return result
def get_adaptive_filter_tier(motion_score):
"""Map a spatial_complexity score to a 3-tier FFmpeg filter strength."""
tiers = ADAPTIVE_FILTER_TIERS
if motion_score < tiers["low"]["threshold"]:
return tiers["low"]["la"], tiers["low"]["label"]
elif motion_score < tiers["medium"]["threshold"]:
return tiers["medium"]["la"], tiers["medium"]["label"]
else:
return tiers["high"]["la"], tiers["high"]["label"]
def get_semantic_priority(class_name, priority_map=None):
"""Get the semantic priority level for a detected object class."""
if priority_map is None:
priority_map = ADAPTIVE_SEMANTIC_MAP
return priority_map.get(class_name, 0)
def start_ffmpeg_video_encoder(output_path, width, height, fps, vf_filter_string=""):
"""
Starts an FFmpeg subprocess to encode raw video frames from stdin.
Uses NVENC H.265 if available, otherwise falls back to libx264.
"""
ffmpeg_cmd = [
"ffmpeg",
"-y",
"-hide_banner",
"-loglevel", "error",
"-stats",
"-f", "rawvideo",
"-pix_fmt", "bgr24",
"-s", f"{width}x{height}",
"-r", str(fps),
"-i", "-",
]
if vf_filter_string:
ffmpeg_cmd.extend(["-vf", vf_filter_string])
# Choose encoder: NVENC H.265 or libx264 fallback
if NVENC_AVAILABLE:
ffmpeg_cmd.extend([
"-c:v", "hevc_nvenc",
"-preset", "p4",
"-rc", "vbr",
"-cq", str(FFMPEG_CRF_VALUE),
"-b:v", "0",
"-pix_fmt", "yuv420p",
"-metadata", "major_brand=mp42",
"-metadata", "compatible_brands=isomiso2avc1mp41",
output_path
])
else:
ffmpeg_cmd.extend([
"-c:v", FFMPEG_VIDEO_CODEC,
"-preset", FFMPEG_PRESET,
"-crf", str(FFMPEG_CRF_VALUE),
"-pix_fmt", "yuv420p",
"-metadata", "major_brand=mp42",
"-metadata", "compatible_brands=isomiso2avc1mp41",
output_path
])
encoder_name = "hevc_nvenc" if NVENC_AVAILABLE else FFMPEG_VIDEO_CODEC
print(f"[INFO] Starting FFmpeg encoder ({encoder_name}). Command: {' '.join(ffmpeg_cmd)}")
process = subprocess.Popen(ffmpeg_cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
return process
def process_audio_ffmpeg(video_source_path, temp_silent_video_path, final_output_video_path):
"""
Copies audio from original video and merges it with the processed silent video.
Args:
video_source_path (str): Path to the original input video (for audio).
temp_silent_video_path (str): Path to the processed video (without audio, but now correctly oriented).
final_output_video_path (str): Desired path for the final video with audio.
Returns:
bool: True if audio merge was successful, False otherwise.
"""
if not FFMPEG_AVAILABLE:
print("⚠️ [WARNING] [AUDIO_MERGE] ffmpeg not available. Skipping audio merge.")
# If no ffmpeg, and silent video is the direct output from tracking, rename it.
if os.path.exists(temp_silent_video_path) and not os.path.exists(final_output_video_path):
try:
os.rename(temp_silent_video_path, final_output_video_path)
print(f"✅ [SUCCESS] Processed silent video saved as final output: '{final_output_video_path}'.")
except OSError as e:
print(f"❌ [ERROR] Could not rename temp silent file: {e}.")
return False
if not os.path.exists(temp_silent_video_path):
print(f"❌ [ERROR] [AUDIO_MERGE] Temporary silent video '{temp_silent_video_path}' not found. Cannot merge audio.")
return False
print(f"\n[AUDIO_MERGE] Attempting to extract and merge audio using ffmpeg.")
if os.path.exists(final_output_video_path):
print(f"⚠️ [WARNING] Output file {final_output_video_path} exists. Overwriting for audio merge.")
# Use a temporary name for the audio-merged file
final_output_video_temp = final_output_video_path + ".temp_audio_merged.mp4"
# Command to copy video stream and audio stream (if present)
# -map 0:v:0 ensures only the video stream from the first input (temp_silent_video_path) is used
# -map 1:a:0? tries to use the first audio stream from the second input (video_source_path), '?' makes it optional
# -shortest ensures output duration is limited by the shortest input stream
# -c:v copy copies the video stream without re-encoding (since it was just encoded by FFmpeg pipe)
# -c:a aac re-encodes audio to AAC for wide compatibility
# No -vf filter needed here as the temp_silent_video_path is already correctly oriented.
ffmpeg_audio_merge_cmd = [
"ffmpeg", "-y", "-i", temp_silent_video_path, "-i", video_source_path,
"-map", "0:v:0", "-map", "1:a:0?", # Map video from first input, audio from second
"-c:v", "copy",
"-c:a", "aac",
"-strict", "experimental", # Needed for some AAC encoders
"-b:a", "192k", # Audio bitrate, adjust as needed
"-metadata", "major_brand=mp42",
"-metadata", "compatible_brands=isomiso2avc1mp41",
"-shortest",
final_output_video_temp
]
try:
print(f"[AUDIO_MERGE] Executing audio merge: {' '.join(ffmpeg_audio_merge_cmd)}")
# Capture output but don't print unless there's an error
result = subprocess.run(ffmpeg_audio_merge_cmd, check=True, capture_output=True, text=True)
# If check=True, CalledProcessError is raised for non-zero exit codes.
# So, if we reach here, it was successful, no need to print stdout/stderr.
if os.path.exists(final_output_video_temp):
# If the temporary file was created, move it to the final destination
if os.path.exists(final_output_video_path):
os.remove(final_output_video_path)
os.rename(final_output_video_temp, final_output_video_path)
print(f"✅ [SUCCESS] Audio merged and final video saved: '{final_output_video_path}'")
return True
else:
print(f"❌ [ERROR] [AUDIO_MERGE] FFmpeg did not create expected output file '{final_output_video_temp}'.")
return False
except subprocess.CalledProcessError as e:
print(f"❌ [ERROR] [AUDIO_MERGE] FFmpeg audio merge failed with exit code {e.returncode}.")
print(f"STDOUT:\n{e.stdout}")
print(f"STDERR:\n{e.stderr}")
except Exception as e_ffmpeg:
print(f"❌ [ERROR] [AUDIO_MERGE] An unexpected error occurred during FFmpeg audio merge: {e_ffmpeg}")
return False
def main():
global FFMPEG_VIDEO_CODEC, FFMPEG_PRESET, FFMPEG_CRF_VALUE
start_time_total = time.time()
signal.signal(signal.SIGTERM, _handle_termination_signal)
signal.signal(signal.SIGINT, _handle_termination_signal)
project_root = os.path.dirname(os.path.abspath(__file__))
recon_report = None
recon_flags = {}
fallback_mode_used = False
recon_normalized_temp_path = None
# Define the absolute path for the temporary silent video output
temp_silent_video_abs_path = os.path.abspath(os.path.join(OUTPUT_SUBDIRECTORY, TEMP_VIDEO_BASENAME))
# Initialize all critical variables at the top to prevent UnboundLocalError
processing_loop_active = True
frame_count = 0
last_printed_percentage = -1 # Not used with tqdm
original_width = 0
original_height = 0
fps = 0.0
total_frames = 0
model = None # Initialize model to None, assigned later in try/except
telemetry = {
"mode_a_frames": 0,
"mode_b_frames": 0,
"hfdr_fresh_count": 0,
"hfdr_cached_count": 0,
"hfdr_gated_count": 0,
"ffmpeg_unsharp_amount": ADAPTIVE_FFMPEG_UNSHARP_AMOUNT if ADAPTIVE_FFMPEG_UNSHARP_ENABLED else 0.0,
}
# --- Stats tracking for .stats.json (matches ot_benchmark.py format) ---
tracking_stats = {
"total_detections": 0,
"sum_confidence": 0.0,
"unique_ids": set(),
"id_appearances": {},
"motion_vectors": [],
"frame_latencies": [],
"objects_per_frame": [],
"spatial_complexities": [],
}
prev_centers_for_motion = {} # track_id -> (cx, cy) for motion vector calculation
peak_cpu = 0.0
peak_ram_mb = 0.0
peak_gpu_util = 0.0
peak_vram_mb = 0.0
# --- PyTorch CPU Threading Configuration ---
# Attempts to optimize PyTorch's CPU operations by setting the number of threads.
try:
cpu_cores = os.cpu_count()
if cpu_cores:
num_threads_to_set = max(1, cpu_cores // 2) # Use half of logical cores for balance
torch.set_num_threads(num_threads_to_set)
print(f"[INFO] Suggested {num_threads_to_set} threads for PyTorch CPU operations.")
else:
print("[INFO] Could not determine CPU core count for PyTorch thread setting.")