Skip to content

Commit 2545b85

Browse files
committed
cleaned up code and fixed obj_centers issue in agent creation
1 parent 32f0025 commit 2545b85

1 file changed

Lines changed: 2 additions & 21 deletions

File tree

GEMstack/onboard/perception/pedestrian_detection.py

Lines changed: 2 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -107,19 +107,10 @@ def __init__(self, vehicle_interface : GEMInterface) -> None:
107107
self.t_vehicle_to_world = None
108108
self.t_vehicle_to_start = None
109109

110-
# Rospy Subscribers and sychronizers
111-
# self.rgb_rosbag = message_filters.Subscriber('/oak/rgb/image_raw', Image)
112-
# self.top_lidar_rosbag = message_filters.Subscriber('/ouster/points', PointCloud2)
113-
# self.sync = message_filters.ApproximateTimeSynchronizer([self.rgb_rosbag, self.top_lidar_rosbag], queue_size=10, slop=0.1)
114-
# self.sync.registerCallback(self.ouster_oak_callback)
115-
116110
# GEMStack Subscribers and sychronizers
117111
# LIDAR Camera fusion
118112
self.vehicle_interface.subscribe_sensor('sensor_fusion_Lidar_Camera',self.ouster_oak_callback)
119113

120-
# LIDAR Camera GNSS fusion
121-
# self.vehicle_interface.subscribe_sensor('sensor_fusion_Lidar_Camera_GNSS',self.ouster_oak_callback)
122-
123114
# TF listener to get transformation from LiDAR to Camera
124115
self.tf_listener = tf.TransformListener()
125116

@@ -142,13 +133,6 @@ def init_debug(self,) -> None:
142133
self.pub_bboxes_markers = rospy.Publisher("/markers/bboxes", MarkerArray, queue_size=10)
143134
self.pub_image = rospy.Publisher("/camera/image_detection", Image, queue_size=1)
144135

145-
146-
# Test code to check gnss , can be removed
147-
# Debugging
148-
def gnss_test(self, cv_image: cv2.Mat, lidar_points: np.ndarray, vehicle_state: GNSSReading):
149-
print(f"vehicle global state: {vehicle_state}")
150-
151-
152136
def update(self, vehicle : VehicleState) -> Dict[str,AgentState]:
153137

154138
# Edge cases
@@ -168,7 +152,7 @@ def update(self, vehicle : VehicleState) -> Dict[str,AgentState]:
168152
return self.current_agents
169153

170154

171-
(f"Global state : {vehicle}")
155+
# (f"Global state : {vehicle}")
172156

173157
# Convert pose to start state. Need to use previous_vehicle state as pedestrian info is delayed
174158
vehicle_start_pose = vehicle.pose.to_frame(ObjectFrameEnum.START,self.previous_vehicle_state.pose,self.start_pose_abs)
@@ -432,7 +416,7 @@ def create_agent_states(self, obj_centers: List[np.ndarray], obj_dims: List[np.n
432416
# [-l/2,l/2] x [-w/2,w/2] x [-h/2,h/2]
433417
# (l, w, h)
434418
# TODO: confirm (z -> l, x -> w, y -> h)
435-
dimensions=(obj_dims[idx][2], obj_dims[idx][0], obj_centers[idx][1]), # obj_dims[idx][0], obj_dims[idx][1], obj_centers[idx][2] + obj_dims[idx][2]
419+
dimensions=(obj_dims[idx][0], obj_dims[idx][1], obj_centers[idx][2] + obj_dims[idx][2]), # obj_dims[idx][0], obj_dims[idx][1], obj_centers[idx][2] + obj_dims[idx][2]
436420
outline=None,
437421
type=AgentEnum.PEDESTRIAN,
438422
activity=AgentActivityEnum.UNDETERMINED, # Temporary
@@ -516,15 +500,12 @@ def ouster_oak_callback(self, cv_image: cv2.Mat, lidar_points: np.ndarray):
516500

517501
# Downsample xyz point clouds
518502
downsampled_points = downsample_points(lidar_points)
519-
# print("len downsampled_points", len(downsampled_points))
520503

521504
# Transform LiDAR points into the camera coordinate frame.
522505
lidar_in_camera = transform_lidar_points(downsampled_points, self.R_lidar_to_oak, self.t_lidar_to_oak)
523-
# print("len lidar_in_camera", len(lidar_in_camera))
524506

525507
# Project the transformed points into the image plane.
526508
projected_pts = project_points(lidar_in_camera, self.K, downsampled_points)
527-
# print("projected_pts", len(projected_pts))
528509

529510
# Process bboxes
530511
boxes = track_result[0].boxes

0 commit comments

Comments
 (0)