Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
d993b06
cleaned. ready to merge
May 1, 2025
d404c3d
cleanup and move constants
KenC1014 May 1, 2025
28dc5a7
delete useless functions
leizhenyu-lzy May 1, 2025
6288dd0
integrating new perception changes
May 7, 2025
4d004b0
Update parking_utils.py
leizhenyu-lzy May 7, 2025
a23e3f4
all new updates from perception added and tested
May 7, 2025
96772ed
Merge branch 'parking_perception_integration_merge' of https://github…
May 7, 2025
25cdc43
fixing component naming + moving MissionPlan to mission.py
May 8, 2025
71ae7ef
1. read transform from yaml config 2. read vehcle geometry from confi…
KenC1014 May 8, 2025
7b50576
include newest changes from perception team
KenC1014 May 8, 2025
0964246
update variable name from agent to obstacle
KenC1014 May 9, 2025
2ce4310
adding latest from perception
May 10, 2025
311ba73
fixing bad import
May 10, 2025
1f3cfbb
fix for klampt sim bug
May 12, 2025
ce83e3f
Merge branch 's2025' into parking_perception_integration_merge
krishauser May 12, 2025
077ec81
fixing obstacle
May 12, 2025
2323add
Merge branch 'parking_perception_integration_merge' of https://github…
May 12, 2025
64a9dc8
making sure everything works
May 12, 2025
18ed1fd
Update cone_detection.py
Ren990420 May 12, 2025
defe862
Update __init__.py
Ren990420 May 12, 2025
f39e80a
Update __init__.py
Ren990420 May 12, 2025
4fbc624
Create cone_detection.yaml
Ren990420 May 12, 2025
dbdc37b
update naming to match perception
May 12, 2025
de9799a
Merge branch 'parking_perception_integration_merge' of https://github…
May 12, 2025
35b11f9
Merge branch 's2025' into parking_perception_integration_merge
krishauser May 12, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 35 additions & 35 deletions GEMstack/knowledge/calibration/cameras.yaml
Original file line number Diff line number Diff line change
@@ -1,35 +1,35 @@
cameras:
front:
K:
- [684.83331299, 0.0, 573.37109375]
- [0.0, 684.60968018, 363.70092773]
- [0.0, 0.0, 1.0]
D: [0.0, 0.0, 0.0, 0.0, 0.0]
T_l2c:
- [ 0.0289748006, -0.999580136, 0.0000368439, -0.0307300513]
- [-0.0094993062, -0.0003122155, -0.999954834, -0.386689354 ]
- [ 0.999534999, 0.0289731321, -0.0095043721, -0.671425124 ]
- [ 0.0, 0.0, 0.0, 1.0 ]
T_l2v:
- [ 0.99939639, 0.02547917, 0.023615, 1.1 ]
- [ -0.02530848, 0.99965156, -0.00749882, 0.03773583 ]
- [ -0.02379784, 0.00689664, 0.999693, 1.95320223 ]
- [ 0.0, 0.0, 0.0, 1.0 ]

front_right:
K:
- [1176.25545, 0.0, 966.432645]
- [0.0, 1175.14569, 608.580326]
- [0.0, 0.0, 1.0 ]
D: [-0.270136325, 0.164393255, -0.00160720782, -0.0000741246708, -0.0619939758]
T_l2c:
- [-0.71836368, -0.69527204, -0.02346088, 0.05718003]
- [-0.09720448, 0.13371206, -0.98624154, -0.15983010]
- [ 0.68884317, -0.70619960, -0.16363744, -1.04767285]
- [ 0.0, 0.0, 0.0, 1.0 ]
T_l2v:
- [0.99939639, 0.02547917, 0.023615, 1.1]
- [-0.02530848, 0.99965156, -0.00749882, 0.03773583]
- [-0.02379784, 0.00689664, 0.999693, 1.95320223]
- [0.0, 0.0, 0.0, 1.0 ]

cameras:
front:
K:
- [684.83331299, 0.0, 573.37109375]
- [0.0, 684.60968018, 363.70092773]
- [0.0, 0.0, 1.0]
D: [0.0, 0.0, 0.0, 0.0, 0.0]
T_l2c:
- [ 0.0289748006, -0.999580136, 0.0000368439, -0.0307300513]
- [-0.0094993062, -0.0003122155, -0.999954834, -0.386689354 ]
- [ 0.999534999, 0.0289731321, -0.0095043721, -0.671425124 ]
- [ 0.0, 0.0, 0.0, 1.0 ]
T_l2v:
- [ 0.99939639, 0.02547917, 0.023615, 1.1 ]
- [ -0.02530848, 0.99965156, -0.00749882, 0.03773583 ]
- [ -0.02379784, 0.00689664, 0.999693, 1.95320223 ]
- [ 0.0, 0.0, 0.0, 1.0 ]
front_right:
K:
- [1176.25545, 0.0, 966.432645]
- [0.0, 1175.14569, 608.580326]
- [0.0, 0.0, 1.0 ]
D: [-0.270136325, 0.164393255, -0.00160720782, -0.0000741246708, -0.0619939758]
T_l2c:
- [-0.71836368, -0.69527204, -0.02346088, 0.05718003]
- [-0.09720448, 0.13371206, -0.98624154, -0.15983010]
- [ 0.68884317, -0.70619960, -0.16363744, -1.04767285]
- [ 0.0, 0.0, 0.0, 1.0 ]
T_l2v:
- [0.99939639, 0.02547917, 0.023615, 1.1]
- [-0.02530848, 0.99965156, -0.00749882, 0.03773583]
- [-0.02379784, 0.00689664, 0.999693, 1.95320223]
- [0.0, 0.0, 0.0, 1.0 ]
11 changes: 10 additions & 1 deletion GEMstack/knowledge/defaults/computation_graph.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@ components:
- lane_detection:
inputs: [vehicle, roadgraph]
outputs: vehicle_lane
- parking_detection:
inputs: obstacles
outputs: [goal, obstacles]
- sign_detection:
inputs: [vehicle, roadgraph]
outputs: roadgraph.signs
Expand Down Expand Up @@ -49,6 +52,12 @@ components:
- driving_logic:
inputs:
outputs: intent
- _mission_planner: # one way
inputs: all
outputs: mission_plan
- _route_planner: # one way
inputs: all
outputs: route
- motion_planning:
inputs: all
outputs: trajectory
Expand All @@ -57,4 +66,4 @@ components:
outputs:
- signaling:
inputs: [intent]
outputs:
outputs:
Binary file added GEMstack/knowledge/detection/cone.pt
Binary file not shown.
205 changes: 205 additions & 0 deletions GEMstack/onboard/perception/parking_detection.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,205 @@
import rospy
import yaml
import numpy as np
from sensor_msgs.msg import PointCloud2
from typing import Dict
from ..component import Component
from ...state import ObjectPose, ObjectFrameEnum, Obstacle, ObstacleMaterialEnum, ObstacleStateEnum
from ..interface.gem import GEMInterface
from .utils.constants import *
from .utils.detection_utils import *
from .utils.parking_utils import *
from .utils.visualization_utils import *


class ParkingSpotsDetector3D(Component):
def __init__(
self,
vehicle_interface: GEMInterface,
camera_name: str,
camera_calib_file: str,
visualize_3d: bool = True
):
# Init Variables
self.vehicle_interface = vehicle_interface
self.cone_pts_3D = []
self.ordered_cone_ground_centers_2D = []
self.goal_parking_spot = None
self.parking_obstacles_pose = []
self.parking_obstacles_dim = []
self.ground_threshold = GROUND_THRESHOLD
self.visualization = visualize_3d

# Load camera lidar to vehicle transform from the supplied YAML
with open(camera_calib_file, 'r') as f:
calib = yaml.safe_load(f)
cam_cfg = calib['cameras'][camera_name]
self.T_l2v = np.array(cam_cfg['T_l2v'])

if self.T_l2v is None:
rospy.logerr("Camera calibration information missing. Please load the correct config.")

# Subscribers (note: we need top lidar only for visualization)
self.lidar_sub = rospy.Subscriber("/ouster/points", PointCloud2, self.callback, queue_size=1)
# self.lidar_sub = self.vehicle_interface.subscribe_sensor("top_lidar", self.callback)

# Publishers (all topics are for visualization purposes)
self.pub_lidar_top_vehicle_pc2 = rospy.Publisher("lidar_top_vehicle/point_cloud", PointCloud2, queue_size=10)
self.pub_vehicle_marker = rospy.Publisher("vehicle/marker", MarkerArray, queue_size=10)
self.pub_cones_centers_pc2 = rospy.Publisher("cones_detection/centers/point_cloud", PointCloud2, queue_size=10)
self.pub_parking_spot_marker = rospy.Publisher("parking_spot_detection/marker", MarkerArray, queue_size=10)
self.pub_polygon_marker = rospy.Publisher("polygon_detection/marker", MarkerArray, queue_size=10)
self.pub_obstacles_marker = rospy.Publisher("obstacle_detection/marker", MarkerArray, queue_size=10)

def callback(self, top_lidar_msg):
# Top lidar points in ouster frame
lidar_ouster_frame = pc2_to_numpy(top_lidar_msg)

# Visualize everything in vehicle frame
if self.visualization:
self.visualize(
self.cone_pts_3D,
self.ordered_cone_ground_centers_2D,
self.goal_parking_spot,
self.parking_obstacles_pose,
self.parking_obstacles_dim,
lidar_ouster_frame
)

def spin(self):
rospy.spin()

def rate(self) -> float:
return 10.0 # Hz

def state_inputs(self) -> list:
return ['obstacles']

def state_outputs(self) -> list:
return ['goal', 'obstacles']


def visualize(self,
cone_pts_3D,
ordered_cone_ground_centers_2D,
goal_parking_spot,
parking_obstacles_pose,
parking_obstacles_dim,
lidar_ouster_frame):
# Transform top lidar pointclouds to vehicle frame for visualization
latest_lidar_vehicle = transform_points(lidar_ouster_frame, self.T_l2v)
latest_lidar_vehicle = filter_ground_points(latest_lidar_vehicle, self.ground_threshold)
ros_lidar_top_vehicle_pc2 = create_point_cloud(latest_lidar_vehicle, LIDAR_PC_COLOR, VEHICLE_FRAME)
self.pub_lidar_top_vehicle_pc2.publish(ros_lidar_top_vehicle_pc2)

# Create vehicle marker
ros_vehicle_marker = create_markers([VEHICLE_FRAME_ORIGIN],
[VEHICLE_MARKER_DIM],
VEHICLE_MARKER_COLOR,
"markers", VEHICLE_FRAME)
self.pub_vehicle_marker.publish(ros_vehicle_marker)

# Delete previous markers
ros_delete_polygon_marker = delete_markers("polygon", MAX_POLYGON_MARKERS)
self.pub_polygon_marker.publish(ros_delete_polygon_marker)
ros_delete_parking_spot_markers = delete_markers("parking_spot", MAX_PARKING_SPOT_MARKERS)
self.pub_parking_spot_marker.publish(ros_delete_parking_spot_markers)
ros_delete_obstacles_markers = delete_markers("obstacles", MAX_OBSTACLE_MARKERS)
self.pub_obstacles_marker.publish(ros_delete_obstacles_markers)

# Draw polygon first
if len(ordered_cone_ground_centers_2D) > 0:
ros_polygon_marker = create_polygon_marker(ordered_cone_ground_centers_2D, ref_frame=VEHICLE_FRAME)
self.pub_polygon_marker.publish(ros_polygon_marker)

# Create parking spot marker
if goal_parking_spot:
ros_parking_spot_marker = create_parking_spot_marker(goal_parking_spot, ref_frame=VEHICLE_FRAME)
self.pub_parking_spot_marker.publish(ros_parking_spot_marker)

# Create parking obstacles marker
if parking_obstacles_pose and parking_obstacles_dim:
ros_obstacles_marker = create_markers(parking_obstacles_pose,
parking_obstacles_dim,
OBSTACLE_MARKER_COLOR,
"obstacles", VEHICLE_FRAME)
self.pub_obstacles_marker.publish(ros_obstacles_marker)

# Draw 3D cone centers
if len(cone_pts_3D) > 0:
cone_ground_centers = np.array(cone_pts_3D)
cone_ground_centers[:, 2] = 0.0
cone_ground_centers = [tuple(point) for point in cone_ground_centers]
ros_cones_centers_pc2 = create_point_cloud(cone_ground_centers, color=CONE_CENTER_PC_COLOR)
self.pub_cones_centers_pc2.publish(ros_cones_centers_pc2)


def update(self, cone_obstacles: Dict[str, Obstacle]):
# Initial variables
goal_parking_spot = None
parking_obstacles_pose = []
parking_obstacles_dim = []
ordered_cone_ground_centers_2D = []

# Populate cone points
cone_pts_3D = []
for cone in cone_obstacles.values():
cone_pt_3D = (cone.pose.x, cone.pose.y, 0.0)
cone_pts_3D.append(cone_pt_3D)

# Detect parking spot
if len(cone_pts_3D) == NUM_CONES_PER_PARKING_SPOT:
goal_parking_spot, parking_obstacles_pose, parking_obstacles_dim, ordered_cone_ground_centers_2D = detect_parking_spot(cone_pts_3D)

# Update local variables for visualization
self.cone_pts_3D = cone_pts_3D
self.ordered_cone_ground_centers_2D = ordered_cone_ground_centers_2D
self.goal_parking_spot = goal_parking_spot
self.parking_obstacles_pose = parking_obstacles_pose
self.parking_obstacles_dim = parking_obstacles_dim

# Return if no goal parking spot is found
if not goal_parking_spot:
return None

# Constructing parking obstacles
current_time = self.vehicle_interface.time()
obstacle_id = 0
parking_obstacles = {}
for o_pose, o_dim in zip(parking_obstacles_pose, parking_obstacles_dim):
x, y, z, yaw = o_pose
obstacle_pose = ObjectPose(
t=current_time,
x=x,
y=y,
z=z,
yaw=yaw,
pitch=0.0,
roll=0.0,
frame=ObjectFrameEnum.CURRENT
)
new_obstacle = Obstacle(
pose=obstacle_pose,
dimensions=o_dim,
outline=None,
material=ObstacleMaterialEnum.BARRIER,
collidable=True, state=ObstacleStateEnum.STANDING
)
parking_obstacles[f"parking_obstacle_{obstacle_id}"] = new_obstacle
obstacle_id += 1

# Constructing goal pose
x, y, yaw = goal_parking_spot
goal_pose = ObjectPose(
t=current_time,
x=x,
y=y,
z=0.0,
yaw=yaw,
pitch=0.0,
roll=0.0,
frame=ObjectFrameEnum.CURRENT
)

new_state = [goal_pose, parking_obstacles]
return new_state
34 changes: 34 additions & 0 deletions GEMstack/onboard/perception/utils/constants.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
import yaml

# Ignore certain yaml tag
def ignore_relative_path(loader, node):
return loader.construct_scalar(node)
yaml.SafeLoader.add_constructor('!relative_path', ignore_relative_path)

# Load vehicle geometry
vehicle_geometry = None
with open('./GEMstack/knowledge/vehicle/gem_e4_geometry.yaml', 'r') as f:
vehicle_geometry = yaml.safe_load(f)

# Vehicle geometry constants
GEM_E4_LENGTH = 3.2
GEM_E4_WIDTH = 1.7
if vehicle_geometry:
GEM_E4_LENGTH = vehicle_geometry['length']
GEM_E4_WIDTH = vehicle_geometry['width']

# Parking constants
GROUND_THRESHOLD = -0.15
NUM_CONES_PER_PARKING_SPOT = 4
VEHICLE_FRAME = "vehicle"

# Visualization constants
VEHICLE_FRAME_ORIGIN = [0.0, 0.0, 0.0, 0.0]
VEHICLE_MARKER_DIM = [0.8, 0.5, 0.3]
VEHICLE_MARKER_COLOR = (0.0, 0.0, 1.0, 1)
OBSTACLE_MARKER_COLOR = (1.0, 0.0, 0.0, 0.4)
LIDAR_PC_COLOR = (255, 0, 0)
CONE_CENTER_PC_COLOR = (255, 0, 255)
MAX_POLYGON_MARKERS = 1
MAX_PARKING_SPOT_MARKERS = 1
MAX_OBSTACLE_MARKERS = 5
25 changes: 25 additions & 0 deletions GEMstack/onboard/perception/utils/detection_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import ros_numpy
import numpy as np

def transform_points(points, transform):
ones_column = np.ones((points.shape[0], 1))
points_extended = np.hstack((points, ones_column))
points_transformed = ((transform @ (points_extended.T)).T)
return points_transformed[:, :3]


def filter_ground_points(lidar_points, ground_threshold = 0):
filtered_array = lidar_points[lidar_points[:, 2] > ground_threshold]
return filtered_array


def pc2_to_numpy(pc2_msg, want_rgb=False):
# Convert the ROS message to a numpy structured array
pc = ros_numpy.point_cloud2.pointcloud2_to_array(pc2_msg)
# Stack x,y,z fields to a (N,3) array
pts = np.stack((np.array(pc['x']).ravel(),
np.array(pc['y']).ravel(),
np.array(pc['z']).ravel()), axis=1)
# Apply filtering (for example, x > 0 and z in a specified range)
mask = (pts[:, 0] > 0) & (pts[:, 2] < -1.5) & (pts[:, 2] > -2.7)
return pts[mask]
Loading
Loading