diff --git a/.DS_Store b/.DS_Store deleted file mode 100644 index 043038f0b..000000000 Binary files a/.DS_Store and /dev/null differ diff --git a/GEMstack/.DS_Store b/GEMstack/.DS_Store index 1db5bc5b2..59bdd74a6 100644 Binary files a/GEMstack/.DS_Store and b/GEMstack/.DS_Store differ diff --git a/GEMstack/knowledge/calibration/gem_e4.yaml b/GEMstack/knowledge/calibration/gem_e4.yaml index da971c253..d061ff018 100644 --- a/GEMstack/knowledge/calibration/gem_e4.yaml +++ b/GEMstack/knowledge/calibration/gem_e4.yaml @@ -1,4 +1,4 @@ -calibration_date: "2024-03-05" # Date of calibration YYYY-MM-DD +calibration_date: "2025-02-16" # Date of calibration YYYY-MM-DD reference: rear_axle_center # rear axle center rear_axle_height: 0.33 # height of rear axle center above flat ground gnss_location: [1.10,0.1.62] # meters, taken from https://github.com/hangcui1201/POLARIS_GEM_e2_Real/blob/main/vehicle_drivers/gem_gnss_control/scripts/gem_gnss_tracker_stanley_rtk.py. Note conflict with pure pursuit location? diff --git a/GEMstack/knowledge/calibration/gem_e4_oak.yaml b/GEMstack/knowledge/calibration/gem_e4_oak.yaml index cb9a6c0d0..4f0d0cd53 100644 --- a/GEMstack/knowledge/calibration/gem_e4_oak.yaml +++ b/GEMstack/knowledge/calibration/gem_e4_oak.yaml @@ -1,3 +1,5 @@ reference: rear_axle_center # rear axle center -rotation: [[0,0,1],[-1,0,0],[0,-1,0]] # rotation matrix mapping z to forward, x to left, y to down, guesstimated -center_position: [1.78,0,1.58] # meters, center camera, guesstimated +rotation: [[-0.025131, -0.0304479, 0.99922038], + [-0.99892897, 0.0396095, -0.0239167], + [-0.0388504, -0.99875123, -0.03141071]] # rotation matrix mapping z to forward, x to left, y to down, guesstimated +center_position: [1.78251567,0.18649424,1.5399846] # meters, center camera, guesstimated \ No newline at end of file diff --git a/GEMstack/knowledge/calibration/gem_e4_ouster.yaml b/GEMstack/knowledge/calibration/gem_e4_ouster.yaml index 5987373a6..4ab6b42e0 100644 --- a/GEMstack/knowledge/calibration/gem_e4_ouster.yaml +++ b/GEMstack/knowledge/calibration/gem_e4_ouster.yaml @@ -1,3 +1,5 @@ reference: rear_axle_center # rear axle center -position: [1.10,0,2.03] # meters, calibrated by Hang's watchful eye -rotation: [[1,0,0],[0,1,0],[0,0,1]] #rotation matrix mapping lidar frame to vehicle frame \ No newline at end of file +position: [1.1, 0.03773044170906172, 1.9525244316515322] +rotation: [[ 0.99941328, 0.02547416, 0.02289458], + [-0.02530855, 0.99965159, -0.00749488], + [-0.02307753, 0.00691106, 0.99970979]] \ No newline at end of file diff --git a/GEMstack/offboard/calibration/README.md b/GEMstack/offboard/calibration/README.md new file mode 100644 index 000000000..a7e3644b5 --- /dev/null +++ b/GEMstack/offboard/calibration/README.md @@ -0,0 +1,86 @@ +# GEMstack Offboard Calibration + +This repository contains tools for offline calibration of LiDAR and camera sensors to the vehicle coordinate system on the GEM E4 platform. The calibration pipeline consists of three stages: + +1. **LiDAR-to-Vehicle** +2. **Camera-to-Vehicle** +3. **LiDAR-to-Camera** + +--- + + +## Calibration Pipeline + +### 1. LiDAR-to-Vehicle Calibration (`lidar_to_vehicle.py`) +**Method**: +- **Ground Plane Detection**: + 1. Crop LiDAR points near ground (Z ∈ [-3, -2]) + 2. Use RANSAC to fit a plane to ground points + 3. Compute vehicle height (`tz`) using plane equation and axel height offset (0.2794m) + 4. Derive pitch (`rx`) and roll (`ry`) from plane normal vector + +- **Frontal Object Alignment**: + 1. Crop LiDAR data to frontal area (X ∈ [0,20], Y ∈ [-1,1]) + 2. Fit line to object points for yaw (`rz`) estimation + 3. Compute translation (`tx`, `ty`) using vehicle dimensions + +**Usage**: + +python3 lidar_to_vehicle.py # Edit LiDAR data paths in script + + +### 2. CAMERA-to-Vehicle Calibration (`camera_to_vehicle_manual.py`) +**Method**: + 1. Capture camera intrinsics using camera_info.py (ROS node) + 2. Manually select 4 matching points in RGB image and LiDAR cloud + 3. Solve PnP problem to compute camera extrinsics + +**Usage**: + 1. Get camera intrinsics: + rosrun offboard\calibration\camera_info.py # Prints intrinsic matrix + 2. Update camera_in in script with intrinsics + 3. Update data paths in script + 4. Run calibration: + python3 camera_to_vehicle_manual.py + + +### 3. LIDAR-to-CAMERA Calibration (`lidar_to_camera.py`) +**Method**: + 1. Invert Camera-to-Vehicle matrix + 2. Multiply with LiDAR-to-Vehicle matrix + +**Usage**: + +python3 lidar_to_camera.py # Ensure T_lidar_vehicle and T_camera_vehicle matrices are updated + + +### Visualization Tools + +**3D Alignment Check**: + 1. Use vis() function in scripts to view calibrated LiDAR/camera clouds + 2. Toggle VIS = True in lidar_to_vehicle.py for ground plane/object visualization + 3. Use test_transforms.py to visualize lidar point cloud on top of png image. Helps verify accuracy of lidar->camera. + +**Projection Validation**: + 1. RGB image overlaid with transformed LiDAR points (Z-buffered) + 2. Frontal view comparison of camera and LiDAR data + + + + + + +### Assumption and Notes + +1. The sensor data should be time-aligned. +2. The flat surface should be detectable in the Lidar scan +3. Magic Numbers: + Axel height (0.2794m) from physical measurements + Frontal crop areas based on vehicle dimensions +4. Validation: Use pyvista visualizations to verify alignment + + + + + + diff --git a/GEMstack/offboard/calibration/lidar_to_camera_manual.py b/GEMstack/offboard/calibration/camera_to_vehicle_manual.py similarity index 66% rename from GEMstack/offboard/calibration/lidar_to_camera_manual.py rename to GEMstack/offboard/calibration/camera_to_vehicle_manual.py index 4d3cc9719..b88b22288 100644 --- a/GEMstack/offboard/calibration/lidar_to_camera_manual.py +++ b/GEMstack/offboard/calibration/camera_to_vehicle_manual.py @@ -3,10 +3,14 @@ from scipy.spatial.transform import Rotation as R import matplotlib.pyplot as plt import numpy as np +from visualizer import visualizer -rgb_path = '/mount/wp/GEMstack/data/color21.png' -depth_path = '/mount/wp/GEMstack/data/depth21.tif' -lidar_path = '/mount/wp/GEMstack/data/lidar21.npz' +N = 8 #how many point pairs you want to select + +# Update Depending on Where Data Stored +rgb_path = './data/color32.png' +depth_path = './data/depth32.tif' +lidar_path = './data/lidar32.npz' img = cv2.imread(rgb_path, cv2.IMREAD_UNCHANGED) @@ -14,7 +18,7 @@ lidar_points = lidar_points[~np.all(lidar_points== 0, axis=1)] # remove (0,0,0)'s rx,ry,rz = 0.006898647163954201, 0.023800082245145304, -0.025318355743942974 -tx,ty,tz = -1.1, 0.037735827433173136, 1.953202227766785 +tx,ty,tz = 1.1, 0.037735827433173136, 1.953202227766785 rot = R.from_euler('xyz',[rx,ry,rz]).as_matrix() lidar_ex = np.hstack([rot,[[tx],[ty],[tz]]]) lidar_ex = np.vstack([lidar_ex,[0,0,0,1]]) @@ -25,6 +29,7 @@ [ 0. , 0. , 1. ] ], dtype=np.float32) + #%% # blurred = cv2.GaussianBlur(img, (5, 5), 0) # edges = cv2.Canny(blurred, threshold1=50, threshold2=150) @@ -33,43 +38,11 @@ #%% import pyvista as pv -def vis(title='', ratio=1): +def vis(title='', ratio=1,notebook=False): print(title) pv.set_jupyter_backend('client') - plotter = pv.Plotter(notebook=True) - plotter.show_axes() - class foo: - def set_cam(self,pos=(-20*ratio,0,20*ratio),foc=(0,0,0)): - plotter.camera.position = pos - plotter.camera.focal_point = foc - return self - def add_pc(self,pc,ratio=ratio,**kargs): - plotter.add_mesh( - pv.PolyData(pc*ratio), - render_points_as_spheres=True, - point_size=2, - **kargs) - return self - def add_line(self,p1,p2,ratio=ratio,**kargs): - plotter.add_mesh( - pv.Line(p1*ratio,p2*ratio), - **kargs, - line_width=1) - return self - def add_box(self,bound,trans,ratio=ratio): - l,w,h = map(lambda x:x*ratio,bound) - box = pv.Box(bounds=(-l/2,l/2,-w/2,w/2,-h/2,h/2)) - box = box.translate(list(map(lambda x:x*ratio,trans))) - plotter.add_mesh(box, color='yellow') - return self - def show(self): - plotter.show() - return self - def close(self): - plotter.close() - return None - return foo().set_cam() + return visualizer().set_cam() def crop(pc,ix=None,iy=None,iz=None): # crop a subrectangle in a pointcloud # usage: crop(pc, ix = (x_min,x_max), ...) @@ -85,11 +58,11 @@ def crop(pc,ix=None,iy=None,iz=None): lidar_post = np.pad(lidar_points,((0,0),(0,1)),constant_values=1) @ lidar_ex.T[:,:3] -lidar_post = crop(lidar_post,ix=(0,8),iy=(-5,5)) -vis().add_pc(lidar_post).show() +lidar_post = crop(lidar_post,ix=(0,10),iy=(-5,5)) +# vis(notebook=False).add_pc(lidar_post).show() #%% -def pick_4_img(img): +def pick_n_img(img,n=4): corners = [] # Reset the corners list def click_event(event, x, y, flags, param): if event == cv2.EVENT_LBUTTONDOWN: @@ -101,7 +74,7 @@ def click_event(event, x, y, flags, param): cv2.setMouseCallback('Image', click_event, img) while True: - if len(corners) == 4: + if len(corners) == n: break if cv2.waitKey(1) & 0xFF == ord('q'): return None @@ -109,15 +82,15 @@ def click_event(event, x, y, flags, param): cv2.destroyAllWindows() return corners -cpoints = np.array(pick_4_img(img)).astype(float) +cpoints = np.array(pick_n_img(img,N)).astype(float) print(cpoints) #%% -def pick_4_pc(point_cloud): +def pick_n_pc(point_cloud,n=4): points = [] def cb(pt,*args): points.append(pt) - while len(points)!=4: + while len(points)!=n: points = [] cloud = pv.PolyData(point_cloud) plotter = pv.Plotter(notebook=False) @@ -128,7 +101,7 @@ def cb(pt,*args): plotter.show() return points -lpoints = np.array(pick_4_pc(lidar_post)) +lpoints = np.array(pick_n_pc(lidar_post,N)) print(lpoints) # %% success, rvec, tvec = cv2.solvePnP(lpoints, cpoints, camera_in, None) @@ -182,10 +155,9 @@ def depth_to_points(depth_img: np.ndarray, intrinsics: np.ndarray): depth_img = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED) camera_points = depth_to_points(depth_img, camera_in) -v=vis() -v.add_pc(np.pad(lidar_points,((0,0),(0,1)),constant_values=1)@lidar_ex.T@T.T[:,:3],color='blue') -v.add_pc(camera_points,color='red') -v.show() - #%% -print(np.vstack([(lidar_ex.T@T.T[:,:3]).T,[0,0,0,1]])) \ No newline at end of file +v2c = T +print('vehicle->camera:',v2c) +c2v = np.linalg.inv(v2c) +print('camera->vehicle:',c2v) + diff --git a/GEMstack/offboard/calibration/icp.py b/GEMstack/offboard/calibration/icp.py deleted file mode 100644 index 376c285a2..000000000 --- a/GEMstack/offboard/calibration/icp.py +++ /dev/null @@ -1,179 +0,0 @@ -#!/usr/bin/env python3 - -import os -import glob -import numpy as np -import cv2 -import open3d as o3d - -# --- Adjust these to match your intrinsics and data details --- -INTRINSICS = np.array([ - [684.83331299, 0. , 573.37109375], - [ 0. , 684.60968018, 363.70092773], - [ 0. , 0. , 1. ] -], dtype=np.float32) - -# Depth scale: how to go from raw .tif values to actual "Z" in meters. -# In your capture code, you used: dimage = (dimage/4000*0xffff) -# Possibly each pixel is stored with range 0..65535. Adjust as needed. -DEPTH_SCALE = 4000.0 # Example factor if your depth was in mm or a certain scale. - -def depth_to_points(depth_img: np.ndarray, intrinsics: np.ndarray): - """ - Convert a single-channel depth image into an Nx3 array of 3D points - in the camera coordinate system. - - depth_img: 2D array of type uint16 or float with depth values - - intrinsics: 3x3 camera matrix - """ - fx = intrinsics[0,0] - fy = intrinsics[1,1] - cx = intrinsics[0,2] - cy = intrinsics[1,2] - - # Indices of each pixel - h, w = depth_img.shape - i_range = np.arange(h) - j_range = np.arange(w) - jj, ii = np.meshgrid(j_range, i_range) # shape (h,w) - - # Flatten - ii = ii.flatten().astype(np.float32) - jj = jj.flatten().astype(np.float32) - depth = depth_img.flatten().astype(np.float32) - - # Filter out zero / invalid depths - valid_mask = (depth > 0) - ii = ii[valid_mask] - jj = jj[valid_mask] - depth = depth[valid_mask] - - # Reproject to 3D (camera frame) - # X = (x - cx) * Z / fx, Y = (y - cy) * Z / fy, Z = depth - # Note: Z must be in meters or consistent units - z = depth / DEPTH_SCALE # Convert from your .tif scale to real meters - x = (jj - cx) * z / fx - y = (ii - cy) * z / fy - points3d = np.stack((x, y, z), axis=-1) # shape (N,3) - - return points3d - -def load_lidar_points(npz_file: str): - """ - Load the Nx3 LiDAR points from a .npz file created by 'np.savez'. - """ - data = np.load(npz_file) - # The capture code used: np.savez(lidar_fn, pc) - # So 'data' might have the default key 'arr_0' or 'pc' if named - # Inspect data.files to see. Let's assume 'arr_0' or single key: - arr_key = data.files[0] - points = data[arr_key] - # shape check, must be Nx3 or Nx4, ... - return points - -def make_open3d_pcd(points: np.ndarray): - """ - Convert Nx3 numpy array into an Open3D point cloud object. - """ - pcd = o3d.geometry.PointCloud() - pcd.points = o3d.utility.Vector3dVector(points) - return pcd - -def perform_icp(camera_pcd: o3d.geometry.PointCloud, - lidar_pcd: o3d.geometry.PointCloud): - """ - Perform local ICP alignment of camera_pcd (source) to lidar_pcd (target). - Returns a transformation 4x4 that maps camera -> lidar (or vice versa). - """ - # 1) Estimate normals if you want point-to-plane - lidar_pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid( - radius=1.0, max_nn=30)) - camera_pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid( - radius=1.0, max_nn=30)) - - # 2) Initial guess: identity or something better if you have one - init_guess = np.eye(4) - - # 3) Choose a threshold for inlier distance - threshold = 0.5 # adjust based on your environment scale - - # 4) Run ICP (point-to-plane or point-to-point) - print("[ICP] Running ICP alignment ...") - result = o3d.pipelines.registration.registration_icp( - camera_pcd, # source - lidar_pcd, # target - threshold, - init_guess, - estimation_method=o3d.pipelines.registration.TransformationEstimationPointToPlane() - ) - - print("[ICP] Done. Fitness = %.4f, RMSE = %.4f" % (result.fitness, result.inlier_rmse)) - print("[ICP] Transformation:\n", result.transformation) - return result.transformation - -def main(folder='data'): - color_files = sorted(glob.glob(os.path.join(folder, "color*.png"))) - icp_results = [] # to store (index, transform, fitness, rmse) - - for color_path in color_files: - # Extract index from filename, e.g. color10.png -> 10 - basename = os.path.basename(color_path) - idx_str = basename.replace("color","").replace(".png","") - - if int(idx_str) in range(77): - depth_path = os.path.join(folder, f"depth{idx_str}.tif") - lidar_path = os.path.join(folder, f"lidar{idx_str}.npz") - - if not (os.path.exists(depth_path) and os.path.exists(lidar_path)): - continue - - # Load depth / convert to 3D - depth_img = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED) - if depth_img is None: - continue - camera_points = depth_to_points(depth_img, INTRINSICS) - camera_pcd = make_open3d_pcd(camera_points) - - # Load LiDAR - lidar_points = load_lidar_points(lidar_path) - lidar_pcd = make_open3d_pcd(lidar_points) - - lidar_pcd.estimate_normals( - search_param=o3d.geometry.KDTreeSearchParamHybrid( - radius=1.0, # Adjust based on your scene scale - max_nn=30 - ) - ) - - # Also estimate normals on the Camera (source) cloud (recommended) - camera_pcd.estimate_normals( - search_param=o3d.geometry.KDTreeSearchParamHybrid( - radius=1.0, - max_nn=30 - ) - ) - - # Now you can run Point-to-Plane ICP - init_guess = np.eye(4) - threshold = 0.5 # or some appropriate distance - result_icp = o3d.pipelines.registration.registration_icp( - camera_pcd, - lidar_pcd, - threshold, - init_guess, - o3d.pipelines.registration.TransformationEstimationPointToPlane() - ) - - print("ICP result:", result_icp.transformation) - print("Fitness:", result_icp.fitness, " RMSE:", result_icp.inlier_rmse) - - # After processing all frames, we can analyze or average - if len(icp_results) > 0: - # Example: pick the best frame by highest fitness - best_frame = max(icp_results, key=lambda x: x[2]) # x[2] is fitness - print("\nBest frame by fitness was index=", best_frame[0], - " with fitness=", best_frame[2], " rmse=", best_frame[3]) - else: - print("No frames processed properly.") - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/GEMstack/offboard/calibration/lidar_to_camera.py b/GEMstack/offboard/calibration/lidar_to_camera.py new file mode 100644 index 000000000..d45b7ab3d --- /dev/null +++ b/GEMstack/offboard/calibration/lidar_to_camera.py @@ -0,0 +1,20 @@ +import numpy as np + +T_lidar_vehicle = np.array([[ 0.99939639, 0.02547917, 0.023615, 1.1 ], + [-0.02530848, 0.99965156, -0.00749882, 0.03773583], + [-0.02379784, 0.00689664, 0.999693, 1.95320223], + [ 0., 0., 0., 1. ]]) + +T_camera_vehicle = np.array([[-0.025131, -0.0304479, 0.99922038, 1.78251567], + [-0.99892897, 0.0396095, -0.0239167, 0.18649424], + [-0.0388504, -0.99875123, -0.03141071, 1.5399846 ], + [ 0., 0., 0., 1. ]]) + +# Invert Camera->Vehicle transformation +T_vehicle_camera = np.linalg.inv(T_camera_vehicle) + +T_lidar_camera = T_vehicle_camera @ T_lidar_vehicle + +# Print result +print("Lidar to Camera Transformation Matrix:") +print(T_lidar_camera) diff --git a/GEMstack/offboard/calibration/make_gem_e4_ouster_v2.py b/GEMstack/offboard/calibration/lidar_to_vehicle.py similarity index 79% rename from GEMstack/offboard/calibration/make_gem_e4_ouster_v2.py rename to GEMstack/offboard/calibration/lidar_to_vehicle.py index 6e0b25940..d82446e48 100644 --- a/GEMstack/offboard/calibration/make_gem_e4_ouster_v2.py +++ b/GEMstack/offboard/calibration/lidar_to_vehicle.py @@ -1,11 +1,14 @@ #%% -import os import sys import math import numpy as np from scipy.spatial.transform import Rotation as R -os.getcwd() +import pyvista as pv +import matplotlib.pyplot as plt +from visualizer import visualizer + VIS = False # True to show visuals +VIS = True # True to show visuals #%% things to extract tx,ty,tz,rx,ry,rz = [None] * 6 @@ -13,41 +16,10 @@ #%%============================================================== #======================= util functions ========================= #================================================================ -if VIS: - import pyvista as pv - import matplotlib.pyplot as plt def vis(title='', ratio=1): print(title) pv.set_jupyter_backend('client') - plotter = pv.Plotter(notebook=True) - plotter.camera.position = (-20*ratio,0,20*ratio) - plotter.camera.focal_point = (0,0,0) - plotter.show_axes() - class foo: - def add_pc(self,pc,ratio=ratio,**kargs): - plotter.add_mesh( - pv.PolyData(pc*ratio), - render_points_as_spheres=True, - point_size=2, - **kargs) - return self - def add_line(self,p1,p2,ratio=ratio,**kargs): - plotter.add_mesh( - pv.Line(p1*ratio,p2*ratio), - **kargs, - line_width=1) - return self - def add_box(self,bound,trans,ratio=ratio): - l,w,h = map(lambda x:x*ratio,bound) - box = pv.Box(bounds=(-l/2,l/2,-w/2,w/2,-h/2,h/2)) - box = box.translate(list(map(lambda x:x*ratio,trans))) - plotter.add_mesh(box, color='yellow') - return self - def show(self): - plotter.show() - return self - - return foo() + return visualizer().set_cam() def load_scene(path): sc = np.load(path)['arr_0'] sc = sc[~np.all(sc == 0, axis=1)] # remove (0,0,0)'s @@ -75,7 +47,7 @@ def fit_plane_ransac(pc,tol=0.01): model.fit(pc[:,:-1], pc[:,-1]) a = model.estimator_.coef_ inter = model.estimator_.intercept_ - class foo: + class visual: def plot(self): inliers = pc[model.inlier_mask_] if pc.shape[1] == 2: @@ -92,7 +64,7 @@ def results(self): # return: array(D-1), float, array(N,3) # ^: , coeffs, intercept toward the plane, inliers of the fit return a,inter - return foo() + return visual() from scipy.spatial import cKDTree def pc_diff(pc0,pc1,tol=0.1): @@ -123,7 +95,8 @@ def pc_diff(pc0,pc1,tol=0.1): #================================================================ #%% load scene for ground plane -sc = load_scene('/mount/wp/GEMstack/data/lidar70.npz') +# Update depending on where data is stored +sc = load_scene('./data/lidar1.npz') # %% we crop to keep the ground cropped_sc = crop(sc,iz = (-3,-2)) @@ -161,9 +134,9 @@ def pc_diff(pc0,pc1,tol=0.1): rot = R.from_euler('xyz',[rx,ry,0]).as_matrix() if False: # True to use the diff method to extract object. - # load data - sc0 = load_scene('/mount/wp/GEMstack/data/lidar70.npz') - sc1 = load_scene('/mount/wp/GEMstack/data/lidar78.npz') + # load data: update depending on where data is stored + sc0 = load_scene('./data/lidar70.npz') + sc1 = load_scene('./data/lidar78.npz') sc0 = sc0 @ rot.T + [0,0,tz] sc1 = sc1 @ rot.T + [0,0,tz] @@ -179,7 +152,8 @@ def pc_diff(pc0,pc1,tol=0.1): objects = pc_diff(cropped0,cropped1) else: #False to use only cropping - sc1 = load_scene('/mount/wp/GEMstack/data/lidar1.npz') + # Update depending on where data is stored + sc1 = load_scene('./data/lidar1.npz') objects = sc1 @ rot.T + [0,0,tz] @@ -204,9 +178,9 @@ def pc_diff(pc0,pc1,tol=0.1): # tx = ty = 0 # hx,hy = np.median(diff,axis=0)[:2] # rz = -np.arctan2(hy,hx) -tx = - (2.56 - 1.46) # https://publish.illinois.edu/robotics-autonomy-resources/gem-e4/hardware/ -ty = - inter rz = - math.atan(c) +tx = 2.56 - 1.46 # https://publish.illinois.edu/robotics-autonomy-resources/gem-e4/hardware/ +ty = - inter * math.cos(rz) if VIS: p1 = (0,inter,0) @@ -224,10 +198,9 @@ def pc_diff(pc0,pc1,tol=0.1): rot = R.from_euler('xyz',[rx,ry,rz]).as_matrix() cal_sc1 = sc1 @ rot.T + [tx,ty,tz] - # projection - # cal_sc1[:,1] = 0 v = vis(ratio=100) - v.add_pc(cal_sc1*[0,1,1],color='blue') + proj = [1,1,1] + v.add_pc(cal_sc1*proj,color='blue') v.add_box((2.56,.61*2,2.03+height_axel),[2.56/2,0,(2.03+height_axel)/2]) v.show() # the yellow box should be 11 inches above the ground diff --git a/GEMstack/offboard/calibration/test_transforms.py b/GEMstack/offboard/calibration/test_transforms.py new file mode 100644 index 000000000..69bdefbc9 --- /dev/null +++ b/GEMstack/offboard/calibration/test_transforms.py @@ -0,0 +1,57 @@ +import numpy as np +import cv2 +import matplotlib.pyplot as plt + +# Load LiDAR points +lidar_data = np.load("./data/lidar1.npz") # Update filename if needed +lidar_points = lidar_data['arr_0'] # Ensure the correct key + +# Load Camera Image +image = cv2.imread("./data/color1.png") # Update filename if needed +image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert BGR to RGB + +# Transformation Matrix +T_lidar_camera = np.array([ + [0.001090, -0.999489, -0.031941, 0.149698], + [-0.007664, 0.031932, -0.999461, -0.397813], + [0.999970, 0.001334, -0.007625, -0.691405], + [0.000000, 0.000000, 0.000000, 1.000000] +]) + +# Convert LiDAR points to homogeneous coordinates +num_points = lidar_points.shape[0] +lidar_homogeneous = np.hstack((lidar_points, np.ones((num_points, 1)))) # (N, 4) + +# Transform LiDAR points to Camera Frame +lidar_points_camera = (T_lidar_camera @ lidar_homogeneous.T).T # (N, 4) + +# intrinsic matrix +K = np.array([ + [684.83331299, 0. , 573.37109375], + [ 0. , 684.60968018, 363.70092773], + [ 0. , 0. , 1. ] +], dtype=np.float32) + +# Extract 3D points in camera frame +X_c = lidar_points_camera[:, 0] +Y_c = lidar_points_camera[:, 1] +Z_c = lidar_points_camera[:, 2] # Depth + +# Avoid division by zero +valid = Z_c > 0 +X_c, Y_c, Z_c = X_c[valid], Y_c[valid], Z_c[valid] + +# Project points onto image plane +u = (K[0, 0] * X_c / Z_c) + K[0, 2] +v = (K[1, 1] * Y_c / Z_c) + K[1, 2] + +# Filter points within image bounds +img_h, img_w, _ = image.shape +valid_pts = (u >= 0) & (u < img_w) & (v >= 0) & (v < img_h) +u, v = u[valid_pts], v[valid_pts] + +# Plot projected points on camera image +plt.imshow(image) +plt.scatter(u, v, s=2, c='r') # Red dots for projected points +plt.title("Projected LiDAR Points on Camera Image") +plt.show() diff --git a/GEMstack/offboard/calibration/visualizer.py b/GEMstack/offboard/calibration/visualizer.py new file mode 100644 index 000000000..538172068 --- /dev/null +++ b/GEMstack/offboard/calibration/visualizer.py @@ -0,0 +1,43 @@ +import pyvista as pv + +class visualizer: + def __init__(self): + self.plotter = pv.Plotter(notebook=False) + self.plotter.show_axes() + + def set_cam(self, pos=(-20, 0, 20), foc=(0, 0, 0)): + self.plotter.camera.position = pos + self.plotter.camera.focal_point = foc + return self + + def add_pc(self, pc, ratio=1, **kargs): + self.plotter.add_mesh( + pv.PolyData(pc * ratio), + render_points_as_spheres=True, + point_size=2, + **kargs + ) + return self + + def add_line(self, p1, p2, ratio=1, **kargs): + self.plotter.add_mesh( + pv.Line(p1 * ratio, p2 * ratio), + **kargs, + line_width=1 + ) + return self + + def add_box(self, bound, trans, ratio=1): + l, w, h = map(lambda x: x * ratio, bound) + box = pv.Box(bounds=(-l/2, l/2, -w/2, w/2, -h/2, h/2)) + box = box.translate(list(map(lambda x: x * ratio, trans))) + self.plotter.add_mesh(box, color='yellow') + return self + + def show(self): + self.plotter.show() + return self + + def close(self): + self.plotter.close() + return None