Skip to content

Commit 430bee1

Browse files
Merge pull request #143 from krishauser/EstCal_A
State estimation HW 1
2 parents c3f0449 + 16cd934 commit 430bee1

11 files changed

Lines changed: 258 additions & 282 deletions

File tree

GEMstack/.DS_Store

2 KB
Binary file not shown.

GEMstack/knowledge/calibration/gem_e4.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
calibration_date: "2024-03-05" # Date of calibration YYYY-MM-DD
1+
calibration_date: "2025-02-16" # Date of calibration YYYY-MM-DD
22
reference: rear_axle_center # rear axle center
33
rear_axle_height: 0.33 # height of rear axle center above flat ground
44
gnss_location: [1.10,0,1.62] # meters, taken from https://github.com/hangcui1201/POLARIS_GEM_e2_Real/blob/main/vehicle_drivers/gem_gnss_control/scripts/gem_gnss_tracker_stanley_rtk.py. Note conflict with pure pursuit location?
Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
11
reference: rear_axle_center # rear axle center
2-
rotation: [[0,0,1],[-1,0,0],[0,-1,0]] # rotation matrix mapping z to forward, x to left, y to down, guesstimated
3-
center_position: [1.78,0,1.58] # meters, center camera, guesstimated
2+
rotation: [[-0.025131, -0.0304479, 0.99922038],
3+
[-0.99892897, 0.0396095, -0.0239167],
4+
[-0.0388504, -0.99875123, -0.03141071]] # rotation matrix mapping z to forward, x to left, y to down, guesstimated
5+
center_position: [1.78251567,0.18649424,1.5399846] # meters, center camera, guesstimated
Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
11
reference: rear_axle_center # rear axle center
2-
position: [1.10,0,2.03] # meters, calibrated by Hang's watchful eye
3-
rotation: [[1,0,0],[0,1,0],[0,0,1]] #rotation matrix mapping lidar frame to vehicle frame
2+
position: [1.1, 0.03773044170906172, 1.9525244316515322]
3+
rotation: [[ 0.99941328, 0.02547416, 0.02289458],
4+
[-0.02530855, 0.99965159, -0.00749488],
5+
[-0.02307753, 0.00691106, 0.99970979]]
Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
# GEMstack Offboard Calibration
2+
3+
This repository contains tools for offline calibration of LiDAR and camera sensors to the vehicle coordinate system on the GEM E4 platform. The calibration pipeline consists of three stages:
4+
5+
1. **LiDAR-to-Vehicle**
6+
2. **Camera-to-Vehicle**
7+
3. **LiDAR-to-Camera**
8+
9+
---
10+
11+
12+
## Calibration Pipeline
13+
14+
### 1. LiDAR-to-Vehicle Calibration (`lidar_to_vehicle.py`)
15+
**Method**:
16+
- **Ground Plane Detection**:
17+
1. Crop LiDAR points near ground (Z ∈ [-3, -2])
18+
2. Use RANSAC to fit a plane to ground points
19+
3. Compute vehicle height (`tz`) using plane equation and axel height offset (0.2794m)
20+
4. Derive pitch (`rx`) and roll (`ry`) from plane normal vector
21+
22+
- **Frontal Object Alignment**:
23+
1. Crop LiDAR data to frontal area (X ∈ [0,20], Y ∈ [-1,1])
24+
2. Fit line to object points for yaw (`rz`) estimation
25+
3. Compute translation (`tx`, `ty`) using vehicle dimensions
26+
27+
**Usage**:
28+
29+
python3 lidar_to_vehicle.py # Edit LiDAR data paths in script
30+
31+
32+
### 2. CAMERA-to-Vehicle Calibration (`camera_to_vehicle_manual.py`)
33+
**Method**:
34+
1. Capture camera intrinsics using camera_info.py (ROS node)
35+
2. Manually select 4 matching points in RGB image and LiDAR cloud
36+
3. Solve PnP problem to compute camera extrinsics
37+
38+
**Usage**:
39+
1. Get camera intrinsics:
40+
rosrun offboard\calibration\camera_info.py # Prints intrinsic matrix
41+
2. Update camera_in in script with intrinsics
42+
3. Update data paths in script
43+
4. Run calibration:
44+
python3 camera_to_vehicle_manual.py
45+
46+
47+
### 3. LIDAR-to-CAMERA Calibration (`lidar_to_camera.py`)
48+
**Method**:
49+
1. Invert Camera-to-Vehicle matrix
50+
2. Multiply with LiDAR-to-Vehicle matrix
51+
52+
**Usage**:
53+
54+
python3 lidar_to_camera.py # Ensure T_lidar_vehicle and T_camera_vehicle matrices are updated
55+
56+
57+
### Visualization Tools
58+
59+
**3D Alignment Check**:
60+
1. Use vis() function in scripts to view calibrated LiDAR/camera clouds
61+
2. Toggle VIS = True in lidar_to_vehicle.py for ground plane/object visualization
62+
3. Use test_transforms.py to visualize lidar point cloud on top of png image. Helps verify accuracy of lidar->camera.
63+
64+
**Projection Validation**:
65+
1. RGB image overlaid with transformed LiDAR points (Z-buffered)
66+
2. Frontal view comparison of camera and LiDAR data
67+
68+
69+
70+
71+
72+
73+
### Assumption and Notes
74+
75+
1. The sensor data should be time-aligned.
76+
2. The flat surface should be detectable in the Lidar scan
77+
3. Magic Numbers:
78+
Axel height (0.2794m) from physical measurements
79+
Frontal crop areas based on vehicle dimensions
80+
4. Validation: Use pyvista visualizations to verify alignment
81+
82+
83+
84+
85+
86+

GEMstack/offboard/calibration/lidar_to_camera_manual.py renamed to GEMstack/offboard/calibration/camera_to_vehicle_manual.py

Lines changed: 24 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -3,18 +3,22 @@
33
from scipy.spatial.transform import Rotation as R
44
import matplotlib.pyplot as plt
55
import numpy as np
6+
from visualizer import visualizer
67

7-
rgb_path = '/mount/wp/GEMstack/data/color21.png'
8-
depth_path = '/mount/wp/GEMstack/data/depth21.tif'
9-
lidar_path = '/mount/wp/GEMstack/data/lidar21.npz'
8+
N = 8 #how many point pairs you want to select
9+
10+
# Update Depending on Where Data Stored
11+
rgb_path = './data/color32.png'
12+
depth_path = './data/depth32.tif'
13+
lidar_path = './data/lidar32.npz'
1014

1115
img = cv2.imread(rgb_path, cv2.IMREAD_UNCHANGED)
1216

1317
lidar_points = np.load(lidar_path)['arr_0']
1418
lidar_points = lidar_points[~np.all(lidar_points== 0, axis=1)] # remove (0,0,0)'s
1519

1620
rx,ry,rz = 0.006898647163954201, 0.023800082245145304, -0.025318355743942974
17-
tx,ty,tz = -1.1, 0.037735827433173136, 1.953202227766785
21+
tx,ty,tz = 1.1, 0.037735827433173136, 1.953202227766785
1822
rot = R.from_euler('xyz',[rx,ry,rz]).as_matrix()
1923
lidar_ex = np.hstack([rot,[[tx],[ty],[tz]]])
2024
lidar_ex = np.vstack([lidar_ex,[0,0,0,1]])
@@ -25,6 +29,7 @@
2529
[ 0. , 0. , 1. ]
2630
], dtype=np.float32)
2731

32+
2833
#%%
2934
# blurred = cv2.GaussianBlur(img, (5, 5), 0)
3035
# edges = cv2.Canny(blurred, threshold1=50, threshold2=150)
@@ -33,43 +38,11 @@
3338

3439
#%%
3540
import pyvista as pv
36-
def vis(title='', ratio=1):
41+
def vis(title='', ratio=1,notebook=False):
3742
print(title)
3843
pv.set_jupyter_backend('client')
39-
plotter = pv.Plotter(notebook=True)
40-
plotter.show_axes()
41-
class foo:
42-
def set_cam(self,pos=(-20*ratio,0,20*ratio),foc=(0,0,0)):
43-
plotter.camera.position = pos
44-
plotter.camera.focal_point = foc
45-
return self
46-
def add_pc(self,pc,ratio=ratio,**kargs):
47-
plotter.add_mesh(
48-
pv.PolyData(pc*ratio),
49-
render_points_as_spheres=True,
50-
point_size=2,
51-
**kargs)
52-
return self
53-
def add_line(self,p1,p2,ratio=ratio,**kargs):
54-
plotter.add_mesh(
55-
pv.Line(p1*ratio,p2*ratio),
56-
**kargs,
57-
line_width=1)
58-
return self
59-
def add_box(self,bound,trans,ratio=ratio):
60-
l,w,h = map(lambda x:x*ratio,bound)
61-
box = pv.Box(bounds=(-l/2,l/2,-w/2,w/2,-h/2,h/2))
62-
box = box.translate(list(map(lambda x:x*ratio,trans)))
63-
plotter.add_mesh(box, color='yellow')
64-
return self
65-
def show(self):
66-
plotter.show()
67-
return self
68-
def close(self):
69-
plotter.close()
70-
return None
7144

72-
return foo().set_cam()
45+
return visualizer().set_cam()
7346
def crop(pc,ix=None,iy=None,iz=None):
7447
# crop a subrectangle in a pointcloud
7548
# usage: crop(pc, ix = (x_min,x_max), ...)
@@ -85,11 +58,11 @@ def crop(pc,ix=None,iy=None,iz=None):
8558

8659

8760
lidar_post = np.pad(lidar_points,((0,0),(0,1)),constant_values=1) @ lidar_ex.T[:,:3]
88-
lidar_post = crop(lidar_post,ix=(0,8),iy=(-5,5))
89-
vis().add_pc(lidar_post).show()
61+
lidar_post = crop(lidar_post,ix=(0,10),iy=(-5,5))
62+
# vis(notebook=False).add_pc(lidar_post).show()
9063

9164
#%%
92-
def pick_4_img(img):
65+
def pick_n_img(img,n=4):
9366
corners = [] # Reset the corners list
9467
def click_event(event, x, y, flags, param):
9568
if event == cv2.EVENT_LBUTTONDOWN:
@@ -101,23 +74,23 @@ def click_event(event, x, y, flags, param):
10174
cv2.setMouseCallback('Image', click_event, img)
10275

10376
while True:
104-
if len(corners) == 4:
77+
if len(corners) == n:
10578
break
10679
if cv2.waitKey(1) & 0xFF == ord('q'):
10780
return None
10881

10982
cv2.destroyAllWindows()
11083

11184
return corners
112-
cpoints = np.array(pick_4_img(img)).astype(float)
85+
cpoints = np.array(pick_n_img(img,N)).astype(float)
11386
print(cpoints)
11487

11588
#%%
116-
def pick_4_pc(point_cloud):
89+
def pick_n_pc(point_cloud,n=4):
11790
points = []
11891
def cb(pt,*args):
11992
points.append(pt)
120-
while len(points)!=4:
93+
while len(points)!=n:
12194
points = []
12295
cloud = pv.PolyData(point_cloud)
12396
plotter = pv.Plotter(notebook=False)
@@ -128,7 +101,7 @@ def cb(pt,*args):
128101
plotter.show()
129102
return points
130103

131-
lpoints = np.array(pick_4_pc(lidar_post))
104+
lpoints = np.array(pick_n_pc(lidar_post,N))
132105
print(lpoints)
133106
# %%
134107
success, rvec, tvec = cv2.solvePnP(lpoints, cpoints, camera_in, None)
@@ -182,10 +155,9 @@ def depth_to_points(depth_img: np.ndarray, intrinsics: np.ndarray):
182155
depth_img = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED)
183156
camera_points = depth_to_points(depth_img, camera_in)
184157

185-
v=vis()
186-
v.add_pc(np.pad(lidar_points,((0,0),(0,1)),constant_values=1)@lidar_ex.T@T.T[:,:3],color='blue')
187-
v.add_pc(camera_points,color='red')
188-
v.show()
189-
190158
#%%
191-
print(np.vstack([(lidar_ex.T@T.T[:,:3]).T,[0,0,0,1]]))
159+
v2c = T
160+
print('vehicle->camera:',v2c)
161+
c2v = np.linalg.inv(v2c)
162+
print('camera->vehicle:',c2v)
163+

0 commit comments

Comments
 (0)