1- from combined_detection_utils import *
1+ from combined_detection_utils import add_bounding_box
22import numpy as np
33from scipy .spatial .transform import Rotation as R
44import rospy
@@ -50,25 +50,24 @@ def pc2_to_numpy_with_intensity(pc2_msg, want_rgb=False):
5050 return pts [mask ]
5151
5252
53- class PointPillarsNode () :
53+ class PointPillarsNode :
5454 """
5555 Detects Pedestrians with PointPillars and publishes the results in vehicle frame.
5656 """
5757
58- def __init__ (
59- self ,
60- ):
61- self .latest_image = None
62- self .latest_lidar = None
63- self .bridge = CvBridge ()
58+ def __init__ (self ):
59+ self .latest_image = None
60+ self .latest_lidar = None
61+ self .bridge = CvBridge ()
6462 self .camera_name = 'front'
65- self .camera_front = (self .camera_name == 'front' )
63+ self .camera_front = (self .camera_name == 'front' )
6664 self .score_threshold = 0.4
6765 self .debug = True
6866 self .initialize ()
6967
7068 def initialize (self ):
71- # --- Determine the correct RGB topic for this camera ---
69+ """Initialize the PointPillars node with model and ROS connections."""
70+ # Determine the correct RGB topic for this camera
7271 rgb_topic_map = {
7372 'front' : '/oak/rgb/image_raw' ,
7473 'front_right' : '/camera_fr/arena_camera_node/image_raw' ,
@@ -81,52 +80,61 @@ def initialize(self):
8180
8281 # Initialize PointPillars node
8382 rospy .init_node ('pointpillars_box_publisher' )
83+
8484 # Create bounding box publisher
8585 self .pub = rospy .Publisher ('/pointpillars_boxes' , BoundingBoxArray , queue_size = 1 )
8686 rospy .loginfo ("PointPillars node initialized and waiting for messages." )
8787
8888 # Initialize PointPillars
8989 device = torch .device ('cuda' if torch .cuda .is_available () else 'cpu' )
9090 self .pointpillars = PointPillars (
91- nclasses = 3 ,
92- voxel_size = [0.16 , 0.16 , 4 ],
93- point_cloud_range = [0 , - 39.68 , - 3 , 69.12 , 39.68 , 1 ],
94- max_num_points = 32 ,
95- max_voxels = (16000 , 40000 )
96- )
91+ nclasses = 3 ,
92+ voxel_size = [0.16 , 0.16 , 4 ],
93+ point_cloud_range = [0 , - 39.68 , - 3 , 69.12 , 39.68 , 1 ],
94+ max_num_points = 32 ,
95+ max_voxels = (16000 , 40000 )
96+ )
9797 self .pointpillars .to (device )
9898
99+ # Load model weights
99100 model_path = 'epoch_160.pth'
100- checkpoint = torch .load (model_path ) #, map_location='cuda' if torch.cuda.is_available() else 'cpu')
101+ checkpoint = torch .load (model_path )
101102 self .pointpillars .load_state_dict (checkpoint )
102-
103103 self .pointpillars .eval ()
104104 rospy .loginfo ("PointPillars model loaded successfully" )
105105
106- self .T_l2v = np .array ([[0.99939639 , 0.02547917 , 0.023615 , 1.1 ],
107- [- 0.02530848 , 0.99965156 , - 0.00749882 , 0.03773583 ],
108- [- 0.02379784 , 0.00689664 , 0.999693 , 1.95320223 ],
109- [0. , 0. , 0. , 1. ]])
106+ # Transformation matrix from LiDAR to vehicle frame
107+ self .T_l2v = np .array ([
108+ [0.99939639 , 0.02547917 , 0.023615 , 1.1 ],
109+ [- 0.02530848 , 0.99965156 , - 0.00749882 , 0.03773583 ],
110+ [- 0.02379784 , 0.00689664 , 0.999693 , 1.95320223 ],
111+ [0. , 0. , 0. , 1. ]
112+ ])
110113
111114 # Subscribe to the RGB and LiDAR streams
112115 self .rgb_sub = Subscriber (rgb_topic , Image )
113116 self .lidar_sub = Subscriber ('/ouster/points' , PointCloud2 )
114- self .sync = ApproximateTimeSynchronizer ([
115- self .rgb_sub , self .lidar_sub
116- ], queue_size = 10 , slop = 0.1 )
117+ self .sync = ApproximateTimeSynchronizer (
118+ [self .rgb_sub , self .lidar_sub ],
119+ queue_size = 10 ,
120+ slop = 0.1
121+ )
117122 self .sync .registerCallback (self .synchronized_callback )
118123
119124 def synchronized_callback (self , image_msg , lidar_msg ):
125+ """Process synchronized RGB and LiDAR messages to detect pedestrians."""
120126 rospy .loginfo ("Received synchronized RGB and LiDAR messages" )
127+
128+ # Convert LiDAR message to numpy array
121129 self .latest_lidar = pc2_to_numpy_with_intensity (lidar_msg , want_rgb = False )
122130
123131 downsample = False
124-
125132 if downsample :
126133 lidar_down = downsample_points (self .latest_lidar , voxel_size = 0.1 )
127134 else :
128135 lidar_down = self .latest_lidar .copy ()
129136
137+ # Create empty list of bounding boxes to fill
130138 boxes = BoundingBoxArray ()
131139 boxes .header .frame_id = 'currentVehicleFrame'
132140 boxes .header .stamp = lidar_msg .header .stamp
@@ -167,24 +175,27 @@ def synchronized_callback(self, image_msg, lidar_msg):
167175 R_vehicle = self .T_l2v [:3 , :3 ] @ R_lidar
168176 vehicle_yaw , vehicle_pitch , vehicle_roll = R .from_matrix (R_vehicle ).as_euler ('zyx' , degrees = False )
169177
170- print ("printing" )
171- print (z_vehicle )
172- print (h )
178+ # print("printing")
179+ # print(z_vehicle)
180+ # print(h)
181+ # Ensure the box doesn't go below ground level
173182 if (z_vehicle - h / 2 ) < 0.0 :
174183 z_vehicle = h / 2
175184
176- boxes = add_bounding_box (boxes = boxes ,
185+ # Add the bounding box
186+ boxes = add_bounding_box (
187+ boxes = boxes ,
177188 frame_id = 'currentVehicleFrame' ,
178189 stamp = lidar_msg .header .stamp ,
179190 x = x_vehicle ,
180191 y = y_vehicle ,
181192 z = z_vehicle ,
182- l = l , # length
183- w = w , # width
184- h = h , # height
193+ l = l , # length
194+ w = w , # width
195+ h = h , # height
185196 yaw = vehicle_yaw ,
186197 conf_score = score ,
187- label = label # person/pedestrian class
198+ label = label # person/pedestrian class
188199 )
189200
190201 rospy .loginfo (f"Pedestrian detected at ({ x :.2f} , { y :.2f} , { z :.2f} ) with score { score :.2f} " )
@@ -193,9 +204,10 @@ def synchronized_callback(self, image_msg, lidar_msg):
193204 rospy .loginfo (f"Publishing { len (boxes .boxes )} pedestrian bounding boxes" )
194205 self .pub .publish (boxes )
195206
207+
196208if __name__ == '__main__' :
197209 try :
198210 node = PointPillarsNode ()
199211 rospy .spin ()
200212 except rospy .ROSInterruptException :
201- pass
213+ pass
0 commit comments