Skip to content

Commit e386be3

Browse files
committed
Added some comments to explain complicated sections of code and moved data saving functionality to seperate function
1 parent 696937c commit e386be3

1 file changed

Lines changed: 59 additions & 42 deletions

File tree

GEMstack/onboard/perception/cone_detection.py

Lines changed: 59 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -157,61 +157,29 @@ def undistort_image(self, image, K, D):
157157
return undistorted, newK
158158

159159
def update(self, vehicle: VehicleState) -> Dict[str, AgentState]:
160-
161-
start = time.time()
162160
downsample = False
161+
# Gate guards against data not being present for both sensors:
163162
if self.latest_image is None or self.latest_lidar is None:
164163
return {}
165164
lastest_image = self.latest_image.copy()
166-
# Ensure data/ exists and build timestamp
165+
166+
# Set up current time variables
167+
start = time.time()
168+
current_time = self.vehicle_interface.time()
169+
167170
if downsample:
168171
lidar_down = downsample_points(self.latest_lidar, voxel_size=0.1)
169172
else:
170173
lidar_down = self.latest_lidar.copy()
171-
current_time = self.vehicle_interface.time()
174+
172175
if self.start_time is None:
173176
self.start_time = current_time
174177
time_elapsed = current_time - self.start_time
175178

179+
# Ensure data/ exists and build timestamp
176180
if self.save_data:
177-
os.makedirs("data", exist_ok=True)
178-
tstamp = int(self.vehicle_interface.time() * 1000)
179-
# 1) Dump raw image
180-
cv2.imwrite(f"data/{tstamp}_image.png", lastest_image)
181-
# 2) Dump raw LiDAR
182-
np.savez(f"data/{tstamp}_lidar.npz", lidar=self.latest_lidar)
183-
# 3) Write BEFORE_TRANSFORM
184-
with open(f"data/{tstamp}_vehstate.txt", "w") as f:
185-
vp = vehicle.pose
186-
f.write(
187-
f"BEFORE_TRANSFORM "
188-
f"x={vp.x:.3f}, y={vp.y:.3f}, z={vp.z:.3f}, "
189-
f"yaw={vp.yaw:.2f}, pitch={vp.pitch:.2f}, roll={vp.roll:.2f}\n"
190-
)
191-
# Compute vehicle_start_pose in either START or CURRENT
192-
if self.use_start_frame:
193-
if self.start_pose_abs is None:
194-
self.start_pose_abs = vehicle.pose
195-
vehicle_start_pose = vehicle.pose.to_frame(
196-
ObjectFrameEnum.START,
197-
vehicle.pose,
198-
self.start_pose_abs
199-
)
200-
mode = "START"
201-
else:
202-
vehicle_start_pose = vehicle.pose
203-
mode = "CURRENT"
204-
with open(f"data/{tstamp}_vehstate.txt", "a") as f:
205-
f.write(
206-
f"AFTER_TRANSFORM "
207-
f"x={vehicle_start_pose.x:.3f}, "
208-
f"y={vehicle_start_pose.y:.3f}, "
209-
f"z={vehicle_start_pose.z:.3f}, "
210-
f"yaw={vehicle_start_pose.yaw:.2f}, "
211-
f"pitch={vehicle_start_pose.pitch:.2f}, "
212-
f"roll={vehicle_start_pose.roll:.2f}, "
213-
f"frame={mode}\n"
214-
)
181+
self.save_sensor_data(vehicle=vehicle, latest_image=latest_image)
182+
215183
if self.camera_front == False:
216184
start = time.time()
217185
undistorted_img, current_K = self.undistort_image(lastest_image, self.K, self.D)
@@ -254,6 +222,8 @@ def update(self, vehicle: VehicleState) -> Dict[str, AgentState]:
254222
cx, cy, w, h = box
255223
combined_boxes.append((cx, cy, w, h, AgentActivityEnum.STANDING))
256224

225+
# Visualize the received images in 2D with their corresponding labels
226+
# It draws rectangles and labels on the images:
257227
if getattr(self, 'visualize_2d', False):
258228
for (cx, cy, w, h, activity) in combined_boxes:
259229
left = int(cx - w / 2)
@@ -278,8 +248,15 @@ def update(self, vehicle: VehicleState) -> Dict[str, AgentState]:
278248
cv2.imshow("Detection - Cone 2D", undistorted_img)
279249

280250
start = time.time()
251+
# Transform the lidar points from lidar frame of reference to camera EXTRINSIC frame of reference.
252+
# Then project the pixels onto the lidar points to "paint them" (essentially determine which points are associated with detected objects)
281253
pts_cam = transform_points_l2c(lidar_down, self.T_l2c)
282254
projected_pts = project_points(pts_cam, self.current_K, lidar_down)
255+
# What is returned:
256+
# projected_pts[:, 0]: u-coordinate in the image (horizontal pixel position)
257+
# projected_pts[:, 1]: v-coordinate in the image (vertical pixel position)
258+
# projected_pts[:, 2:5]: original X, Y, Z coordinates in the LiDAR frame
259+
283260
end = time.time()
284261
# print('-------processing time1---', end -start)
285262

@@ -456,6 +433,46 @@ def update(self, vehicle: VehicleState) -> Dict[str, AgentState]:
456433
# print('-------processing time', end -start)
457434
return self.tracked_agents
458435

436+
def save_sensor_data(self, vehicle: VehicleState, latest_image) -> None:
437+
os.makedirs("data", exist_ok=True)
438+
tstamp = int(self.vehicle_interface.time() * 1000)
439+
# 1) Dump raw image
440+
cv2.imwrite(f"data/{tstamp}_image.png", lastest_image)
441+
# 2) Dump raw LiDAR
442+
np.savez(f"data/{tstamp}_lidar.npz", lidar=self.latest_lidar)
443+
# 3) Write BEFORE_TRANSFORM
444+
with open(f"data/{tstamp}_vehstate.txt", "w") as f:
445+
vp = vehicle.pose
446+
f.write(
447+
f"BEFORE_TRANSFORM "
448+
f"x={vp.x:.3f}, y={vp.y:.3f}, z={vp.z:.3f}, "
449+
f"yaw={vp.yaw:.2f}, pitch={vp.pitch:.2f}, roll={vp.roll:.2f}\n"
450+
)
451+
# Compute vehicle_start_pose in either START or CURRENT
452+
if self.use_start_frame:
453+
if self.start_pose_abs is None:
454+
self.start_pose_abs = vehicle.pose
455+
vehicle_start_pose = vehicle.pose.to_frame(
456+
ObjectFrameEnum.START,
457+
vehicle.pose,
458+
self.start_pose_abs
459+
)
460+
mode = "START"
461+
else:
462+
vehicle_start_pose = vehicle.pose
463+
mode = "CURRENT"
464+
with open(f"data/{tstamp}_vehstate.txt", "a") as f:
465+
f.write(
466+
f"AFTER_TRANSFORM "
467+
f"x={vehicle_start_pose.x:.3f}, "
468+
f"y={vehicle_start_pose.y:.3f}, "
469+
f"z={vehicle_start_pose.z:.3f}, "
470+
f"yaw={vehicle_start_pose.yaw:.2f}, "
471+
f"pitch={vehicle_start_pose.pitch:.2f}, "
472+
f"roll={vehicle_start_pose.roll:.2f}, "
473+
f"frame={mode}\n"
474+
)
475+
459476
# ----- Fake Cone Detector 2D (for Testing Purposes) -----
460477

461478

0 commit comments

Comments
 (0)