-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathgreeter.py
More file actions
1598 lines (1336 loc) · 66.2 KB
/
greeter.py
File metadata and controls
1598 lines (1336 loc) · 66.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import torch
from ultralytics import YOLO
import cv2
import numpy as np
from pathlib import Path
import logging
import sqlite3
from datetime import datetime
from transformers import pipeline
import time
import os
import threading
from deepface import DeepFace
import traceback
from queue import Queue, Full
import aiohttp
import json
from threading import Thread
import tensorflow as tf
import json
import aiohttp
import traceback
import asyncio
from urllib.parse import urlencode
import base64
# Configure webhook debug logging while keeping other loggers at INFO
webhook_logger = logging.getLogger('greeter')
webhook_logger.setLevel(logging.INFO)
# Add a debug handler if needed
debug_handler = logging.StreamHandler()
debug_handler.setLevel(logging.DEBUG)
webhook_logger.addHandler(debug_handler)
class GreeterAgent:
def __init__(self, config):
"""Initialize the GreeterAgent"""
self.logger = logging.getLogger("greeter")
self.logger.info("Initializing GreeterAgent")
# Set up logging
self.logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
# Silence YOLO's logger
logging.getLogger('ultralytics').setLevel(logging.WARNING)
self.logger.info("Initializing GreeterAgent")
self.config = config
self.cap = None
self.frame_interval = 1.0 / 30
self.last_frame_time = 0
# Log config contents
self.logger.debug(f"Received config: {self.config}")
# Initialize AI models first
try:
self.logger.info("Loading YOLO model...")
self.detector = YOLO('yolov8n.pt', verbose=False)
# Check for available hardware acceleration
if torch.backends.mps.is_available():
self.device = 'mps' # Use Metal Performance Shaders on Mac
self.logger.info("YOLO using Apple Metal (MPS)")
elif torch.cuda.is_available():
self.device = 'cuda' # Use CUDA if available
self.logger.info("YOLO using CUDA GPU")
else:
self.device = 'cpu' # Fallback to CPU
self.logger.info("YOLO using CPU")
self.detector.to(self.device)
self.logger.info(f"AI model loaded successfully on {self.device}")
# Define classes of interest (COCO dataset)
self.target_classes = {
0: 'person',
16: 'dog',
17: 'cat',
62: 'chair', # Common false positive for packages
63: 'couch', # Common false positive for packages
64: 'potted plant',
67: 'dining table',
73: 'book', # Can help with package detection
84: 'book bag',
85: 'suitcase',
86: 'handbag',
87: 'tie',
}
# Add detection cooldown to prevent overload
self.last_detection_time = 0
self.detection_interval = 0.2 # Reduce to 5 FPS for AI processing (was 0.066)
# Process frames
self.frame_skip = 2 # Skip frames to maintain performance
self.frame_count = 0
except Exception as e:
self.logger.error(f"Failed to load AI model: {e}")
self.detector = None
# Initialize camera
success = self.initialize_camera()
if not success:
self.logger.error("Failed to initialize any camera source")
self.max_reconnect_attempts = 5
self.reconnect_delay = 2 # seconds between reconnection attempts
self.last_reconnect_attempt = 0
# Add FPS tracking
self.fps = 0
self.fps_frames = 0
self.fps_start = time.time()
self.fps_update_interval = 1.0 # Update FPS every second
# Adjust detection smoothing parameters
self.last_detections = [] # Store recent detections
self.detection_history = 10 # Reduce history length (was 30)
self.confidence_threshold = 0.40 # Lower threshold slightly
self.iou_threshold = 0.4 # Increase IOU threshold (was 0.3)
self.smoothing_weight = 0.8 # Increase smoothing weight (was 0.6)
# Add buffer management
self.max_frame_delay = 1.0 # Increase allowed delay (was 0.5)
self.frame_skip = 1 # Process every frame initially
self.frame_buffer_size = 3 # Was 1
# Adjust detection parameters for better performance
self.detection_interval = 0.1 # Reduce AI processing frequency
self.confidence_threshold = 0.75 # Slightly lower confidence threshold
# Add performance monitoring
self.processing_times = []
self.max_processing_times = 30 # Track last 30 processing times
# Add frame sync parameters
self.max_latency = 1.0 # Maximum acceptable latency in seconds
self.sync_check_interval = 1.0 # How often to check sync
self.last_sync_check = time.time() # Initialize sync check time
self.frame_timestamp = time.time() # Initialize frame timestamp
# Add frame management
self.latest_frame = None
self.latest_processed_frame = None
self.frame_lock = threading.Lock()
self.running = True
self.last_detection_time = 0
self.detection_interval = 0.033 # Run detection at ~30fps
self.frame_thread = threading.Thread(target=self._frame_grabber, daemon=True)
self.frame_thread.start()
# Initialize detection state tracking
self.last_detection_state = {}
self.last_detections = []
self.detection_history = 10 # Number of frames to keep track of
self.iou_threshold = 0.3
self.smoothing_weight = 0.3
# Add face analysis settings
self.face_analysis_enabled = True
self.last_face_result = None
self.last_face_time = 0
self.face_cache_duration = 5.0 # Increased to 5 seconds
self.last_face_save_time = 0
self.face_save_cooldown = 10.0
self.last_emotion_state = None
self.last_sentiment_time = 0
self.sentiment_cooldown = 2.0
self.sentiment_threshold = 0.80
# Add face directories
self.unknown_faces_dir = Path("unknown_faces")
self.known_faces_dir = Path("known_faces")
self.unknown_faces_dir.mkdir(exist_ok=True)
self.known_faces_dir.mkdir(exist_ok=True)
# Enhanced face tracking
self.face_cache_duration = 5.0 # Base duration
self.face_max_duration = 30.0 # Maximum time to show name without re-verification
self.last_face_result = None
self.face_tracking_enabled = True
self.consecutive_matches = 0 # Track consecutive matches for the same person
# Add face tracking for multiple faces
self.face_tracks = {} # Dictionary to store face tracks
self.face_tracks_lock = threading.Lock()
self.track_id_counter = 0
self.track_timeout = 5.0 # Seconds before a track is considered expired
self.max_tracking_distance = 100 # Pixels - max distance for track association
# Initialize face analysis queue with logging
self.logger.info("Initializing face analysis queue...")
self.face_analysis_queue = Queue(maxsize=10)
# Start face analysis thread with logging
self.logger.info("Starting face analysis thread...")
self.face_analysis_thread = Thread(
target=self._face_analysis_worker,
name="FaceAnalysisWorker",
daemon=True
)
self.face_analysis_thread.start()
self.logger.info("Face analysis thread started")
self.action_config = {}
#self.load_action_config()
self.emotion_update_queue = Queue(maxsize=100) # New queue for emotion updates
self.last_emotion_updates = {} # Cache of last known emotions
# Add aiohttp session as instance variable
self.session = None
# Add webhook throttling
self.last_webhook_time = 0
self.webhook_cooldown = 8.0 # 15 seconds between webhooks
# Add queues for async processing
self.webhook_queue = Queue(maxsize=100)
self.detection_queue = Queue(maxsize=100)
# Start worker threads
self.webhook_thread = Thread(target=self._webhook_worker, daemon=True)
self.detection_thread = Thread(target=self._detection_worker, daemon=True)
self.webhook_thread.start()
self.detection_thread.start()
self.logger.info("Worker threads started")
# Add background processing task
self.processing_task = None
self.should_process = True
# Add frame freeze detection parameters
self.last_frame_content = None
self.last_frame_change_time = time.time()
self.max_freeze_duration = 3.0 # Maximum time (seconds) before considering frame frozen
self.frame_similarity_threshold = 1.00 # Threshold for considering frames identical
# Add queues for async processing
self.webhook_queue = Queue(maxsize=100)
self.detection_queue = Queue(maxsize=100)
# Start worker threads
self.webhook_thread = Thread(target=self._webhook_worker, daemon=True)
self.detection_thread = Thread(target=self._detection_worker, daemon=True)
self.webhook_thread.start()
self.detection_thread.start()
async def initialize_aiohttp_session(self):
"""Initialize aiohttp session"""
if self.session is None:
self.session = aiohttp.ClientSession()
async def cleanup(self):
"""Cleanup resources including background task"""
self.should_process = False
if self.processing_task:
self.processing_task.cancel()
try:
await self.processing_task
except asyncio.CancelledError:
pass
await super().cleanup() # Call existing cleanup
async def send_webhook_notification(self, notification_type: str, data: dict):
"""Send webhook notification based on action configuration"""
try:
self.logger.debug(f"[WEBHOOK] TRACE: Called with type '{notification_type}' and data: {data}")
current_time = time.time()
# Check webhook cooldown
if current_time - self.last_webhook_time < self.webhook_cooldown:
self.logger.debug("[WEBHOOK] Skipped due to cooldown")
return
self.last_webhook_time = current_time
# Load action configuration
if not Path('actions.config').exists():
self.logger.warning("[WEBHOOK] No actions.config file found")
return
with open('actions.config', 'r') as f:
config = json.loads(f.read())
self.logger.debug(f"[WEBHOOK] Loaded config: {config}")
if notification_type not in config:
self.logger.warning(f"[WEBHOOK] No configuration found for {notification_type}")
self.logger.warning(f"[WEBHOOK] Available types: {list(config.keys())}")
return
webhook_config = config[notification_type]
self.logger.debug(f"[WEBHOOK] Using config: {webhook_config}")
# Get URL and method
method = webhook_config.get('method', 'POST')
url = webhook_config.get('url', '').strip()
if not url:
self.logger.warning("[WEBHOOK] No URL configured")
return
# Ensure session is initialized
if self.session is None:
self.logger.info("[WEBHOOK] Initializing new aiohttp session")
self.session = aiohttp.ClientSession()
# Parse and process body template
if webhook_config.get('body'):
try:
# Parse the body template string into a dict
body_template = json.loads(webhook_config['body'])
self.logger.debug(f"[WEBHOOK] Body template: {body_template}")
# Replace variables in the text field
if 'text' in body_template:
text = body_template['text']
for var_name, var_value in data.items():
placeholder = f"${{{var_name}}}"
if placeholder in text:
text = text.replace(placeholder, str(var_value))
body_template['text'] = text
# Convert to form data
form_data = {
'text': body_template.get('text', ''),
'color': body_template.get('color', '#00FF00'),
'repeat': str(body_template.get('repeat', 0)) # Convert to string
}
self.logger.info(f"[WEBHOOK] Attempting to send to {url} with form data: {form_data}")
# Create and run the task
async def send_request():
try:
async with self.session.post(
url=url,
data=form_data,
headers={'Content-Type': 'application/x-www-form-urlencoded'},
timeout=aiohttp.ClientTimeout(total=5)
) as response:
response_text = await response.text()
self.logger.info(f"[WEBHOOK] Response status: {response.status}")
self.logger.info(f"[WEBHOOK] Response text: {response_text}")
if response.status >= 400:
self.logger.error(f"[WEBHOOK] Failed: {response.status} - {response_text}")
else:
self.logger.info(f"[WEBHOOK] Success: {response_text}")
except Exception as e:
self.logger.error(f"[WEBHOOK] Request failed: {str(e)}")
self.logger.error(traceback.format_exc())
# Run the task
self.logger.debug("[WEBHOOK] Creating task")
await send_request()
self.logger.debug("[WEBHOOK] Task completed")
except json.JSONDecodeError as e:
self.logger.error(f"[WEBHOOK] Invalid JSON in body template: {str(e)}")
except Exception as e:
self.logger.error(f"[WEBHOOK] Processing failed: {str(e)}")
self.logger.error(traceback.format_exc())
except Exception as e:
self.logger.error(f"[WEBHOOK] Error: {str(e)}")
self.logger.error(traceback.format_exc())
def start_face_analysis_thread(self):
"""Start the face analysis worker thread"""
# Create and start the worker thread
self.face_analysis_queue = Queue(maxsize=10)
self.face_analysis_thread = Thread(
target=self._face_analysis_worker,
name="FaceAnalysisWorker",
daemon=True
)
try:
self.face_analysis_thread.start()
self.logger.info("Face analysis thread started")
except Exception as e:
self.logger.error(f"Failed to start face analysis thread: {e}")
def _frame_grabber(self):
"""Background thread to continuously grab frames with auto-reconnect and freeze detection"""
consecutive_errors = 0
max_errors = 5
error_reset_time = 10
last_error_time = time.time()
while self.running:
try:
if not self.cap or not self.cap.isOpened():
self.logger.warning("Camera connection lost, attempting reconnection...")
if self.attempt_reconnection():
consecutive_errors = 0
continue
time.sleep(1)
continue
# Grab the next frame
ret = self.cap.grab()
if not ret:
consecutive_errors += 1
last_error_time = time.time()
self.logger.warning(f"Frame grab failed (errors: {consecutive_errors}/{max_errors})")
if consecutive_errors >= max_errors:
self.logger.error("Too many consecutive errors, forcing reconnection...")
self.cap.release()
self.cap = None
continue
time.sleep(0.1)
continue
# Retrieve and check for frozen frame
ret, frame = self.cap.retrieve()
if ret:
frame = np.asarray(frame, dtype=np.uint8)
# Check for frozen frame
if self._is_frame_frozen(frame):
self.logger.warning("Detected frozen frame, forcing RTSP reconnection...")
self.cap.release()
self.cap = None
continue
with self.frame_lock:
self.latest_frame = frame
self.frame_timestamp = time.time()
consecutive_errors = 0
else:
consecutive_errors += 1
last_error_time = time.time()
except Exception as e:
self.logger.error(f"Frame grabber error: {str(e)}")
consecutive_errors += 1
last_error_time = time.time()
if consecutive_errors >= max_errors:
self.logger.error("Too many consecutive errors, forcing reconnection...")
if self.cap:
self.cap.release()
self.cap = None
time.sleep(1)
time.sleep(0.001)
def _is_frame_frozen(self, current_frame):
"""Check if the frame is frozen by comparing with previous frame"""
try:
current_time = time.time()
# Initialize if this is the first frame
if self.last_frame_content is None:
self.last_frame_content = current_frame.copy()
self.last_frame_change_time = current_time
return False
# Calculate frame similarity
try:
# Downscale frames for faster comparison
small_current = cv2.resize(current_frame, (32, 32))
small_last = cv2.resize(self.last_frame_content, (32, 32))
# Calculate mean squared error
mse = np.mean((small_current - small_last) ** 2)
similarity = 1 - (mse / 255**2) # Normalize to 0-1 range
# Check if frames are too similar for too long
if similarity > self.frame_similarity_threshold:
if current_time - self.last_frame_change_time > self.max_freeze_duration:
self.logger.warning(f"Frame frozen for {current_time - self.last_frame_change_time:.1f} seconds")
return True
else:
# Frame has changed, update reference
self.last_frame_content = current_frame.copy()
self.last_frame_change_time = current_time
return False
except Exception as e:
self.logger.error(f"Frame comparison error: {str(e)}")
return False
except Exception as e:
self.logger.error(f"Frame freeze detection error: {str(e)}")
return False
def initialize_camera(self):
"""Initialize camera with better error handling and logging"""
self.logger.info("Starting camera initialization")
if self.cap:
self.logger.info("Releasing existing camera")
self.cap.release()
# First try RTSP
if self.try_rtsp_stream():
# Verify connection by reading a test frame
ret, _ = self.cap.read()
if ret:
self.logger.info("Camera initialization successful")
return True
else:
self.logger.error("Camera initialized but failed to read test frame")
return False
# If RTSP fails, try fallback sources
return self.fallback_to_test_source()
def try_rtsp_stream(self):
"""Attempt to connect to RTSP stream with better error handling"""
try:
if not self.config.get("rtsp_stream"):
self.logger.error("No RTSP stream URL in config")
return False
# Release existing capture if any
if self.cap is not None:
self.cap.release()
self.cap = None
stream_url = self.config["rtsp_stream"]
# Enhanced FFMPEG options for better stability and auto-reconnect
os.environ['OPENCV_FFMPEG_CAPTURE_OPTIONS'] = (
'rtsp_transport;tcp' # Use TCP for RTSP
'|rtsp_flags;prefer_tcp' # Prefer TCP over UDP
'|stimeout;5000000' # Socket timeout in microseconds (5 seconds)
'|fflags;nobuffer' # Reduce buffering
'|flags;low_delay' # Minimize latency
'|reorder_queue_size;0' # Disable reordering
'|max_delay;500000' # Maximum demux-decode delay (500ms)
)
self.logger.info(f"Connecting to RTSP stream: {stream_url}")
self.cap = cv2.VideoCapture(stream_url, cv2.CAP_FFMPEG)
if not self.cap.isOpened():
self.logger.error("Failed to open RTSP stream")
return False
# Minimal buffering
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
# Reset error counter on successful connection
self.consecutive_errors = 0
return True
except Exception as e:
self.logger.exception(f"RTSP initialization failed: {str(e)}")
return False
def fallback_to_test_source(self):
"""Try fallback sources with better error handling"""
self.logger.info("Attempting fallback sources")
try:
self.logger.info("Trying webcam (device 0)")
self.cap = cv2.VideoCapture(0)
if self.cap.isOpened():
# Optimize camera settings for low latency
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1024)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 768)
self.cap.set(cv2.CAP_PROP_FPS, 30)
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 1) # Minimize buffer
self.cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
# Set additional OpenCV backend properties for lower latency
self.cap.set(cv2.CAP_PROP_CONVERT_RGB, 0) # Skip color conversion if possible
ret, frame = self.cap.read()
if ret:
self.logger.info("Successfully initialized webcam at 640x480")
return True
else:
self.logger.error("Could not read frame from webcam")
else:
self.logger.error("Could not open webcam")
except Exception as e:
self.logger.exception(f"Webcam initialization failed: {str(e)}")
self.logger.error("All camera initialization attempts failed")
return False
# Get the latest frame with all processing and prepare for web display
# This is the main function that is called to get the latest frame
# It will return the latest frame with all processing and prepare for web display
# It will also return the frame as a numpy array
def get_frame(self):
"""Get the latest frame with all processing and prepare for web display"""
try:
current_time = time.time()
webhook_data = None # Initialize webhook_data at the start
# Update FPS calculation
self.fps_frames += 1
if current_time - self.fps_start >= self.fps_update_interval:
self.fps = self.fps_frames / (current_time - self.fps_start)
self.fps_frames = 0
self.fps_start = current_time
# Get frame from grabber thread
with self.frame_lock:
if self.latest_frame is None:
self.logger.warning("No frame available")
return False, None
frame = self.latest_frame.copy() # Make copy to avoid conflicts
# Queue detection instead of running it directly
should_run_detection = False
if not hasattr(self, 'last_detection_time') or \
current_time - self.last_detection_time >= 0.5: # 500ms = 2 FPS
should_run_detection = True
self.last_detection_time = current_time
try:
self.detection_queue.put_nowait({
'frame': frame.copy(),
'timestamp': current_time
})
except Full:
self.logger.debug("Detection queue full, skipping frame")
# Use last known results if available
results = None
with self.frame_lock:
if hasattr(self, 'last_results'):
results = self.last_results
if results is None:
return True, frame # Return unprocessed frame if no results
# Process detections with smoothing (existing code)
for box in results.boxes:
try:
x1, y1, x2, y2 = box.xyxy[0].tolist()[:4]
confidence = float(box.conf[0])
class_id = int(box.cls[0])
# Initialize track_id before using it
track_id = None
# Process person detections by class_id
if confidence >= self.confidence_threshold and class_id == 0:
try:
track_id = str(self._get_track_id((x1, y1, x2, y2)))
except Exception as e:
self.logger.error(f"Tracking failed: {str(e)}")
continue # Skip this detection if tracking fails
# Extract face region
try:
face_region = frame[int(y1):int(y2), int(x1):int(x2)].copy()
except Exception as e:
self.logger.error(f"Face extraction failed: {str(e)}")
continue
# Queue analysis if we have a valid face region
if face_region.size > 0:
should_analyze = True
if track_id in self.face_tracks:
last_analysis = self.face_tracks[track_id].get('last_analysis_time', 0)
if current_time - last_analysis < 2.0:
should_analyze = False
if should_analyze:
try:
self.face_analysis_queue.put_nowait({
'faces': [{
'face': face_region,
'bbox': (x1, y1, x2, y2)
}],
'track_id': track_id,
'timestamp': current_time
})
if track_id in self.face_tracks:
self.face_tracks[track_id]['last_analysis_time'] = current_time
except Full:
self.logger.debug("Face analysis queue is full, skipping frame")
# Draw bounding box with smoothing
try:
if not hasattr(self, 'box_positions'):
self.box_positions = {}
# Only draw if we have a valid track_id
if track_id:
# Smooth box positions
if track_id not in self.box_positions:
self.box_positions[track_id] = {
'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'last_update': current_time
}
else:
# Only update positions every 300ms
if current_time - self.box_positions[track_id]['last_update'] >= 0.3:
# Smooth movement
alpha = 0.3
pos = self.box_positions[track_id]
pos['x1'] = pos['x1'] * (1-alpha) + x1 * alpha
pos['y1'] = pos['y1'] * (1-alpha) + y1 * alpha
pos['x2'] = pos['x2'] * (1-alpha) + x2 * alpha
pos['y2'] = pos['y2'] * (1-alpha) + y2 * alpha
pos['last_update'] = current_time
# Draw using smoothed positions
smooth_pos = self.box_positions[track_id]
cv2.rectangle(frame,
(int(smooth_pos['x1']), int(smooth_pos['y1'])),
(int(smooth_pos['x2']), int(smooth_pos['y2'])),
(0, 255, 0),
2)
# Add label with identity and emotion
label = f"Person ({confidence:.2f})"
if track_id in self.face_tracks:
track = self.face_tracks[track_id]
label_parts = []
# Add identity if available
identity = track.get('identity')
if identity and identity != 'unknown':
label_parts.append(f"Name: {identity}")
# Add emotion if available
emotion = track.get('emotion')
emotion_conf = track.get('emotion_confidence')
if emotion and emotion_conf and emotion_conf > 40:
label_parts.append(f"Emotion: {emotion} ({emotion_conf:.1f}%)")
if label_parts:
label += " | " + " | ".join(label_parts)
cv2.putText(frame,
label,
(int(smooth_pos['x1']), int(smooth_pos['y1']) - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.6,
(0, 255, 0),
2)
except Exception as e:
self.logger.error(f"Drawing error: {str(e)}")
continue
except Exception as e:
self.logger.error(f"Box processing error: {str(e)}")
continue
return True, frame
except Exception as e:
self.logger.error(f"Frame processing error: {str(e)}")
if 'frame' in locals():
return False, frame
return False, None
def _drop_frames_keep_detections(self):
"""Drop frames while maintaining detection boxes"""
frames_dropped = 0
start_time = time.time()
last_frame = None
self.logger.info("Starting frame drop sequence...")
# Drop frames until we catch up
while frames_dropped < 300: # Reduced limit for faster response
if not self.cap.grab():
self.logger.error("Failed to grab frame during dropping")
break
frames_dropped += 1
# Check every few frames
if frames_dropped % 3 == 0: # Check more frequently
ret, frame = self.cap.retrieve()
if not ret:
continue
last_frame = frame
current_time = time.time()
# If we've dropped enough frames, return
if frames_dropped > 30: # Ensure we drop at least some frames
self.logger.info(f"Dropped {frames_dropped} frames")
return True, last_frame
self.logger.warning(f"Frame dropping limit reached after {frames_dropped} frames")
if last_frame is not None:
return True, last_frame
return False, None
def _clear_buffer(self):
"""Improved buffer clearing"""
if not self.cap or not self.cap.isOpened():
return
max_frames_to_clear = 5 # Limit how many frames we'll clear
frames_cleared = 0
while frames_cleared < max_frames_to_clear:
ret = self.cap.grab()
if not ret:
break
frames_cleared += 1
# Stop if we're caught up
if time.time() - self.last_frame_time < self.max_frame_delay:
break
if frames_cleared > 0:
self.logger.debug(f"Cleared {frames_cleared} frames from buffer")
def attempt_reconnection(self):
"""Attempt to reconnect to the camera with backoff"""
current_time = time.time()
# Enforce delay between reconnection attempts
if current_time - self.last_reconnect_attempt < self.reconnect_delay:
return False
self.last_reconnect_attempt = current_time
self.logger.info("Attempting camera reconnection...")
# Release existing connection if any
if self.cap:
self.cap.release()
self.cap = None
# Try to reinitialize
for attempt in range(self.max_reconnect_attempts):
self.logger.info(f"Reconnection attempt {attempt + 1}/{self.max_reconnect_attempts}")
if self.initialize_camera():
self.logger.info("Successfully reconnected to camera")
return True
# Wait before next attempt (exponential backoff)
time.sleep(min(self.reconnect_delay * (2 ** attempt), 30))
self.logger.error("Failed to reconnect after maximum attempts")
return False
def setup_logging(self):
"""Setup logging with DEBUG level"""
logging.basicConfig(
level=logging.DEBUG, # Changed from INFO to DEBUG
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
self.logger = logging.getLogger("greeter")
self.logger.setLevel(logging.DEBUG) # Explicitly set logger level to DEBUG
def setup_database(self):
self.conn = sqlite3.connect('greeter.db')
self.create_tables()
def create_tables(self):
cursor = self.conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS interactions (
id INTEGER PRIMARY KEY AUTOINCREMENT,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
person_name TEXT,
category TEXT,
sentiment TEXT,
action TEXT,
image_path TEXT
)
''')
self.conn.commit()
def _get_track_id(self, bbox):
"""Get or create track ID for detection"""
current_time = time.time()
# Clean up old tracks
self.face_tracks = {
track_id: track for track_id, track in self.face_tracks.items()
if current_time - track['last_seen'] < self.track_timeout
}
# Try to match with existing track
for track_id, track in self.face_tracks.items():
if self._calculate_iou(bbox, track['bbox']) > self.iou_threshold:
# Update existing track
track['bbox'] = bbox
track['last_seen'] = current_time
return track_id
# Create new track
new_track_id = str(int(time.time() * 1000))
self.face_tracks[new_track_id] = {
'bbox': bbox,
'last_seen': current_time,
'last_processed': 0, # Add this field
'name': None,
'emotion': None,
'emotion_confidence': 0,
'consecutive_matches': 0
}
return new_track_id
def _calculate_iou(self, box1, box2):
"""Calculate Intersection over Union between two boxes"""
# Convert boxes to [x1, y1, x2, y2] format
box1 = [float(x) for x in box1]
box2 = [float(x) for x in box2]
# Calculate intersection coordinates
x1 = max(box1[0], box2[0])
y1 = max(box1[1], box2[1])
x2 = min(box1[2], box2[2])
y2 = min(box1[3], box2[3])
# Calculate intersection area
intersection = max(0, x2 - x1) * max(0, y2 - y1)
# Calculate union area
box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1])
box2_area = (box2[2] - box2[0]) * (box2[3] - box2[1])
union = box1_area + box2_area - intersection
# Return IoU
return intersection / union if union > 0 else 0
def _update_face_tracks(self, face_locations, current_time):
"""Update face tracking with new detections"""
# Remove expired tracks
self.face_tracks = {
track_id: track for track_id, track in self.face_tracks.items()
if current_time - track['last_seen'] < self.track_timeout
}
# Match new detections to existing tracks
unmatched_detections = []
matched_track_ids = set()
for face_loc in face_locations:
bbox = face_loc['bbox']
center = ((bbox[0] + bbox[2]) // 2, (bbox[1] + bbox[3]) // 2)
# Find closest track
best_track_id = None
min_distance = float('inf')
for track_id, track in self.face_tracks.items():
if track_id in matched_track_ids:
continue
track_center = track['center']
distance = ((center[0] - track_center[0]) ** 2 +
(center[1] - track_center[1]) ** 2) ** 0.5
if distance < min_distance and distance < self.max_tracking_distance:
min_distance = distance
best_track_id = track_id
if best_track_id is not None:
# Update existing track
self.face_tracks[best_track_id].update({
'bbox': bbox,
'center': center,
'last_seen': current_time
})
matched_track_ids.add(best_track_id)
else:
# Create new track
unmatched_detections.append({
'bbox': bbox,
'center': center,
'face_img': face_loc['face_img']
})
# Create new tracks for unmatched detections
for detection in unmatched_detections:
track_id = self.track_id_counter
self.track_id_counter += 1
self.face_tracks[track_id] = {
'bbox': detection['bbox'],
'center': detection['center'],
'last_seen': current_time,
'name': None,
'confidence': 0,
'consecutive_matches': 0
}
def handle_pet(self, region, pet_type, confidence):
"""Handle pet detections"""
pass