|
| 1 | +#%% |
| 2 | +import cv2 |
| 3 | +import open3d as o3d |
| 4 | +import numpy as np |
| 5 | +import pyvista as pv |
| 6 | +from typing import Literal |
| 7 | +from tqdm import tqdm |
| 8 | +#%% |
| 9 | +import os |
| 10 | +os.chdir('/mnt/GEMstack/GEMstack/offboard/calibration') |
| 11 | +from tools.save_cali import * |
| 12 | +#%% |
| 13 | +N_PAIRS = 20 |
| 14 | +STEP = 3 |
| 15 | +cam = 'rr' |
| 16 | + |
| 17 | +#%% |
| 18 | +def get_shape(): |
| 19 | + return 1920,1200 |
| 20 | + |
| 21 | +def getK(): |
| 22 | + K_path = f"/mnt/GEMstack/GEMstack/knowledge/calibration/gem_e4_{cam}_in.yaml" |
| 23 | + K, distort = load_in(K_path,mode='matrix',return_distort=True) |
| 24 | + w,h = get_shape() |
| 25 | + newK, roi = cv2.getOptimalNewCameraMatrix(K, distort, (w,h), 1, (w,h)) |
| 26 | + return newK |
| 27 | + |
| 28 | +def get_img_np(img_path): |
| 29 | + K_path = f"/mnt/GEMstack/GEMstack/knowledge/calibration/gem_e4_{cam}_in.yaml" |
| 30 | + K, distort = load_in(K_path,mode='matrix',return_distort=True) |
| 31 | + img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) |
| 32 | + undistorted = cv2.undistort(img, K, distort) |
| 33 | + return np.asarray(undistorted) |
| 34 | + |
| 35 | +def get_pc_np(pcd_path): |
| 36 | + pcd = o3d.io.read_point_cloud(pcd_path) |
| 37 | + pc = np.asarray(pcd.points) |
| 38 | + pc = pc[~np.all(pc == 0, axis=1)] # remove (0,0,0)'s |
| 39 | + return pc |
| 40 | + |
| 41 | +def vis(pc): |
| 42 | + plotter = pv.Plotter(notebook=False) |
| 43 | + # plotter = pv.Plotter() |
| 44 | + plotter.add_mesh( |
| 45 | + pv.PolyData(pc), |
| 46 | + render_points_as_spheres=True, |
| 47 | + point_size=2, |
| 48 | + color='red' |
| 49 | + ) |
| 50 | + plotter.show() |
| 51 | + |
| 52 | +#%% |
| 53 | +def pcd_paths(): |
| 54 | + img_root = '/mnt/GEMstack/GEMstack/offboard/calibration/calibration_by_SfM/data/pc/' |
| 55 | + img_format = '.pcd' |
| 56 | + for id in range(0,N_PAIRS*STEP+1): |
| 57 | + path = img_root + str(id).zfill(6) + img_format |
| 58 | + print(path) |
| 59 | + yield path |
| 60 | + |
| 61 | +def img_paths(): |
| 62 | + img_root = f'/mnt/GEMstack/GEMstack/offboard/calibration/calibration_by_SfM/data/{cam}/images/' |
| 63 | + img_format = '.png' |
| 64 | + for id in range(0,N_PAIRS*STEP+1): |
| 65 | + path = img_root + str(id).zfill(6) + img_format |
| 66 | + print(path) |
| 67 | + yield path |
| 68 | + |
| 69 | +def pcs(): |
| 70 | + for path in pcd_paths(): |
| 71 | + yield get_pc_np(path) |
| 72 | + |
| 73 | +def imgs(): |
| 74 | + for path in img_paths(): |
| 75 | + yield get_img_np(path) |
| 76 | + |
| 77 | +def iterpair(iter,step): |
| 78 | + pre = None |
| 79 | + for i in iter: |
| 80 | + pre = i |
| 81 | + break |
| 82 | + for i,v in enumerate(iter): |
| 83 | + if i%step == step-1: |
| 84 | + yield pre,v |
| 85 | + pre = v |
| 86 | + |
| 87 | +#%% |
| 88 | +def findE_img(img1,img2,K): |
| 89 | + sift = cv2.SIFT.create() |
| 90 | + kp1,des1 = sift.detectAndCompute(img1,None) |
| 91 | + kp2,des2 = sift.detectAndCompute(img2,None) |
| 92 | + |
| 93 | + if des1 is None or des2 is None or len(des1) < 5 or len(des2) < 5: |
| 94 | + return None #insufficient features |
| 95 | + |
| 96 | + FLANN_INDEX_KDTREE = 1 |
| 97 | + index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) |
| 98 | + search_params = dict(checks=50) |
| 99 | + flann = cv2.FlannBasedMatcher(index_params, search_params) |
| 100 | + matches = flann.knnMatch(des1, des2, k=2) |
| 101 | + good_matches = [m for m,n in matches if m.distance < 0.8 * n.distance] |
| 102 | + |
| 103 | + pts1 = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 2) |
| 104 | + pts2 = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 2) |
| 105 | + |
| 106 | + E, mask = cv2.findEssentialMat(pts1, pts2, K, method=cv2.RANSAC, prob=0.999, threshold=1.0) |
| 107 | + if E is None or mask.sum() < 5: |
| 108 | + return None #Essential matrix computation failed |
| 109 | + _, R_rel, t_rel, mask = cv2.recoverPose(E, pts1, pts2, K, mask=mask) |
| 110 | + ret = np.eye(4) |
| 111 | + ret[:3, :3] = R_rel |
| 112 | + ret[:3, 3] = t_rel.flatten() |
| 113 | + return ret |
| 114 | + |
| 115 | +def findE_pc(pc1,pc2,mode:Literal['icp','kp','svd']): |
| 116 | + if mode == 'icp': |
| 117 | + source = o3d.geometry.PointCloud() |
| 118 | + source.points = o3d.utility.Vector3dVector(pc1) |
| 119 | + |
| 120 | + target = o3d.geometry.PointCloud() |
| 121 | + target.points = o3d.utility.Vector3dVector(pc2) |
| 122 | + |
| 123 | + source.estimate_normals( |
| 124 | + search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30) |
| 125 | + ) |
| 126 | + target.estimate_normals( |
| 127 | + search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30) |
| 128 | + ) |
| 129 | + init_pose = np.eye(4) #assume continuous movement |
| 130 | + # Robust configuration |
| 131 | + reg = o3d.pipelines.registration.registration_icp( |
| 132 | + source, target, |
| 133 | + 0.1, |
| 134 | + init_pose, |
| 135 | + o3d.pipelines.registration.TransformationEstimationPointToPoint(), |
| 136 | + o3d.pipelines.registration.ICPConvergenceCriteria( |
| 137 | + max_iteration=30, |
| 138 | + relative_rmse=1e-6 |
| 139 | + ) |
| 140 | + ) |
| 141 | + return reg.transformation |
| 142 | + else: |
| 143 | + print(mode,'no imp') |
| 144 | + return None |
| 145 | + |
| 146 | +#%% |
| 147 | +rcam, tcam = [],[] |
| 148 | +rlidar, tlidar = [],[] |
| 149 | +#%% |
| 150 | +for i,((img1,img2),(pc1,pc2)) in tqdm(enumerate(zip( |
| 151 | + iterpair(map(get_img_np,img_paths()),STEP), |
| 152 | + iterpair(map(get_pc_np,pcd_paths()),STEP))),total=N_PAIRS): |
| 153 | + K = getK() |
| 154 | + E_cam = findE_img(img1,img2,K) |
| 155 | + print(E_cam) |
| 156 | + E_lidar = findE_pc(pc1,pc2,mode='icp') |
| 157 | + print(E_lidar) |
| 158 | + if E_cam is None or E_lidar is None: |
| 159 | + continue #unsolved |
| 160 | + if np.isnan(E_cam).any() or np.isnan(E_lidar).any(): |
| 161 | + continue #has nan |
| 162 | + if np.linalg.norm(E_lidar[:3,3]) > 0.1*STEP: |
| 163 | + continue #too large to be possible |
| 164 | + rcam.append(E_cam[:3, :3]) |
| 165 | + tcam.append(E_cam[:3, 3]) |
| 166 | + rlidar.append(E_lidar[:3, :3]) |
| 167 | + tlidar.append(E_lidar[:3, 3]) |
| 168 | + |
| 169 | +#%% |
| 170 | +rt2cam = [] |
| 171 | +tt2cam = [] |
| 172 | +for r,t in zip(rlidar,tlidar): |
| 173 | + B = np.eye(4) |
| 174 | + B[:3,:3] = r |
| 175 | + B[:3,3] = t |
| 176 | + B = np.linalg.inv(B) |
| 177 | + rt2cam.append(B[:3,:3]) |
| 178 | + tt2cam.append(B[:3,3]) |
| 179 | + |
| 180 | +X = cv2.calibrateHandEye( |
| 181 | + R_gripper2base=rcam, t_gripper2base=tcam, |
| 182 | + R_target2cam=rt2cam, t_target2cam=tt2cam, |
| 183 | + # R_target2cam=rlidar, t_target2cam=tlidar, |
| 184 | + # method=cv2.CALIB_HAND_EYE_TSAI |
| 185 | + method=cv2.CALIB_HAND_EYE_PARK |
| 186 | +) |
| 187 | +#%% |
| 188 | +# Result is 4x4 transformation matrix: T_cam_lidar |
| 189 | +T_cam_lidar = np.eye(4) |
| 190 | +T_cam_lidar[:3, :3] = X[0] |
| 191 | +T_cam_lidar[:3, 3] = X[1].flatten() |
0 commit comments