Skip to content

Commit ade0559

Browse files
udymdanimeshsingh98
authored andcommitted
Modified braking function for experiment
1 parent 7dd38e3 commit ade0559

7 files changed

Lines changed: 940 additions & 2 deletions

File tree

GEMstack/knowledge/vehicle/gem_e4_sensor_environment.yaml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,5 +2,4 @@ ros_topics:
22
top_lidar: /ouster/points
33
front_camera: /oak/rgb/image_raw
44
front_depth: /oak/stereo/image_raw
5-
gnss: /septentrio_gnss/insnavgeod
6-
5+
gnss: /novatel/inspva
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
# ROS Headers
2+
import rospy
3+
from sensor_msgs.msg import Image,PointCloud2, CameraInfo
4+
import sensor_msgs.point_cloud2 as pc2
5+
import ctypes
6+
import struct
7+
import pickle
8+
import image_geometry
9+
10+
import numpy as np
11+
import os
12+
import time
13+
14+
camera_image = None
15+
16+
def camera_callback(info : CameraInfo):
17+
global camera_image
18+
camera_image = info
19+
20+
def get_intrinsics():
21+
model = image_geometry.PinholeCameraModel()
22+
model.fromCameraInfo(camera_image)
23+
print(model.intrinsicMatrix())
24+
25+
def main(folder='data',start_index=1):
26+
rospy.init_node("capture_cam_info",disable_signals=True)
27+
caminfo_sub = rospy.Subscriber("/oak/rgb/camera_info", CameraInfo, camera_callback)
28+
while True:
29+
if camera_image:
30+
time.sleep(1)
31+
get_intrinsics()
32+
33+
if __name__ == '__main__':
34+
import sys
35+
folder = 'data'
36+
start_index = 1
37+
if len(sys.argv) >= 2:
38+
folder = sys.argv[1]
39+
if len(sys.argv) >= 3:
40+
start_index = int(sys.argv[2])
41+
main(folder,start_index)
Lines changed: 102 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,102 @@
1+
# ROS Headers
2+
import rospy
3+
from sensor_msgs.msg import Image,PointCloud2
4+
import sensor_msgs.point_cloud2 as pc2
5+
import ctypes
6+
import struct
7+
8+
# OpenCV and cv2 bridge
9+
import cv2
10+
from cv_bridge import CvBridge
11+
import numpy as np
12+
import os
13+
import time
14+
15+
lidar_points = None
16+
camera_image = None
17+
depth = None
18+
depth_image = None
19+
bridge = CvBridge()
20+
21+
def lidar_callback(lidar : PointCloud2):
22+
global lidar_points
23+
lidar_points = lidar
24+
25+
def camera_callback(img : Image):
26+
global camera_image
27+
camera_image = img
28+
29+
def depth_callback(img : Image):
30+
global depth_image
31+
depth_image = img
32+
33+
def pc2_to_numpy(pc2_msg, want_rgb = False):
34+
gen = pc2.read_points(pc2_msg, skip_nans=True)
35+
if want_rgb:
36+
xyzpack = np.array(list(gen),dtype=np.float32)
37+
if xyzpack.shape[1] != 4:
38+
raise ValueError("PointCloud2 does not have points")
39+
xyzrgb = np.empty((xyzpack.shape[0],6))
40+
xyzrgb[:,:3] = xyzpack[:,:3]
41+
for i,x in enumerate(xyzpack):
42+
rgb = x[3]
43+
# cast float32 to int so that bitwise operations are possible
44+
s = struct.pack('>f' ,rgb)
45+
i = struct.unpack('>l',s)[0]
46+
# you can get back the float value by the inverse operations
47+
pack = ctypes.c_uint32(i).value
48+
r = (pack & 0x00FF0000)>> 16
49+
g = (pack & 0x0000FF00)>> 8
50+
b = (pack & 0x000000FF)
51+
#r,g,b values in the 0-255 range
52+
xyzrgb[i,3:] = (r,g,b)
53+
return xyzrgb
54+
else:
55+
return np.array(list(gen),dtype=np.float32)[:,:3]
56+
57+
def save_scan(lidar_fn,color_fn,depth_fn):
58+
print("Saving scan to",lidar_fn,color_fn)
59+
60+
pc = pc2_to_numpy(lidar_points,want_rgb=False) # convert lidar feed to numpy
61+
np.savez(lidar_fn,pc)
62+
cv2.imwrite(color_fn,bridge.imgmsg_to_cv2(camera_image))
63+
64+
dimage = bridge.imgmsg_to_cv2(depth_image)
65+
dimage_non_nan = dimage[np.isfinite(dimage)]
66+
print("Depth range",np.min(dimage_non_nan),np.max(dimage_non_nan))
67+
dimage = np.nan_to_num(dimage,nan=0,posinf=0,neginf=0)
68+
dimage = (dimage/4000*0xffff)
69+
print("Depth pixel range",np.min(dimage),np.max(dimage))
70+
dimage = dimage.astype(np.uint16)
71+
cv2.imwrite(depth_fn,dimage)
72+
73+
def main(folder='data',start_index=1):
74+
rospy.init_node("capture_ouster_oak",disable_signals=True)
75+
lidar_sub = rospy.Subscriber("/ouster/points", PointCloud2, lidar_callback)
76+
camera_sub = rospy.Subscriber("/oak/rgb/image_raw", Image, camera_callback)
77+
depth_sub = rospy.Subscriber("/oak/stereo/image_raw", Image, depth_callback)
78+
index = 0
79+
print(" Storing lidar point clouds as npz")
80+
print(" Storing color images as png")
81+
print(" Storing depth images as tif")
82+
print(" Ctrl+C to quit")
83+
while True:
84+
if camera_image and depth_image:
85+
cv2.imshow("result",bridge.imgmsg_to_cv2(camera_image))
86+
time.sleep(.5)
87+
files = [
88+
os.path.join(folder,'lidar{}.npz'.format(index)),
89+
os.path.join(folder,'color{}.png'.format(index)),
90+
os.path.join(folder,'depth{}.tif'.format(index))]
91+
save_scan(*files)
92+
index += 1
93+
94+
if __name__ == '__main__':
95+
import sys
96+
folder = 'data'
97+
start_index = 1
98+
if len(sys.argv) >= 2:
99+
folder = sys.argv[1]
100+
if len(sys.argv) >= 3:
101+
start_index = int(sys.argv[2])
102+
main(folder,start_index)
Lines changed: 179 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,179 @@
1+
#!/usr/bin/env python3
2+
3+
import os
4+
import glob
5+
import numpy as np
6+
import cv2
7+
import open3d as o3d
8+
9+
# --- Adjust these to match your intrinsics and data details ---
10+
INTRINSICS = np.array([
11+
[684.83331299, 0. , 573.37109375],
12+
[ 0. , 684.60968018, 363.70092773],
13+
[ 0. , 0. , 1. ]
14+
], dtype=np.float32)
15+
16+
# Depth scale: how to go from raw .tif values to actual "Z" in meters.
17+
# In your capture code, you used: dimage = (dimage/4000*0xffff)
18+
# Possibly each pixel is stored with range 0..65535. Adjust as needed.
19+
DEPTH_SCALE = 4000.0 # Example factor if your depth was in mm or a certain scale.
20+
21+
def depth_to_points(depth_img: np.ndarray, intrinsics: np.ndarray):
22+
"""
23+
Convert a single-channel depth image into an Nx3 array of 3D points
24+
in the camera coordinate system.
25+
- depth_img: 2D array of type uint16 or float with depth values
26+
- intrinsics: 3x3 camera matrix
27+
"""
28+
fx = intrinsics[0,0]
29+
fy = intrinsics[1,1]
30+
cx = intrinsics[0,2]
31+
cy = intrinsics[1,2]
32+
33+
# Indices of each pixel
34+
h, w = depth_img.shape
35+
i_range = np.arange(h)
36+
j_range = np.arange(w)
37+
jj, ii = np.meshgrid(j_range, i_range) # shape (h,w)
38+
39+
# Flatten
40+
ii = ii.flatten().astype(np.float32)
41+
jj = jj.flatten().astype(np.float32)
42+
depth = depth_img.flatten().astype(np.float32)
43+
44+
# Filter out zero / invalid depths
45+
valid_mask = (depth > 0)
46+
ii = ii[valid_mask]
47+
jj = jj[valid_mask]
48+
depth = depth[valid_mask]
49+
50+
# Reproject to 3D (camera frame)
51+
# X = (x - cx) * Z / fx, Y = (y - cy) * Z / fy, Z = depth
52+
# Note: Z must be in meters or consistent units
53+
z = depth / DEPTH_SCALE # Convert from your .tif scale to real meters
54+
x = (jj - cx) * z / fx
55+
y = (ii - cy) * z / fy
56+
points3d = np.stack((x, y, z), axis=-1) # shape (N,3)
57+
58+
return points3d
59+
60+
def load_lidar_points(npz_file: str):
61+
"""
62+
Load the Nx3 LiDAR points from a .npz file created by 'np.savez'.
63+
"""
64+
data = np.load(npz_file)
65+
# The capture code used: np.savez(lidar_fn, pc)
66+
# So 'data' might have the default key 'arr_0' or 'pc' if named
67+
# Inspect data.files to see. Let's assume 'arr_0' or single key:
68+
arr_key = data.files[0]
69+
points = data[arr_key]
70+
# shape check, must be Nx3 or Nx4, ...
71+
return points
72+
73+
def make_open3d_pcd(points: np.ndarray):
74+
"""
75+
Convert Nx3 numpy array into an Open3D point cloud object.
76+
"""
77+
pcd = o3d.geometry.PointCloud()
78+
pcd.points = o3d.utility.Vector3dVector(points)
79+
return pcd
80+
81+
def perform_icp(camera_pcd: o3d.geometry.PointCloud,
82+
lidar_pcd: o3d.geometry.PointCloud):
83+
"""
84+
Perform local ICP alignment of camera_pcd (source) to lidar_pcd (target).
85+
Returns a transformation 4x4 that maps camera -> lidar (or vice versa).
86+
"""
87+
# 1) Estimate normals if you want point-to-plane
88+
lidar_pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(
89+
radius=1.0, max_nn=30))
90+
camera_pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(
91+
radius=1.0, max_nn=30))
92+
93+
# 2) Initial guess: identity or something better if you have one
94+
init_guess = np.eye(4)
95+
96+
# 3) Choose a threshold for inlier distance
97+
threshold = 0.5 # adjust based on your environment scale
98+
99+
# 4) Run ICP (point-to-plane or point-to-point)
100+
print("[ICP] Running ICP alignment ...")
101+
result = o3d.pipelines.registration.registration_icp(
102+
camera_pcd, # source
103+
lidar_pcd, # target
104+
threshold,
105+
init_guess,
106+
estimation_method=o3d.pipelines.registration.TransformationEstimationPointToPlane()
107+
)
108+
109+
print("[ICP] Done. Fitness = %.4f, RMSE = %.4f" % (result.fitness, result.inlier_rmse))
110+
print("[ICP] Transformation:\n", result.transformation)
111+
return result.transformation
112+
113+
def main(folder='data'):
114+
color_files = sorted(glob.glob(os.path.join(folder, "color*.png")))
115+
icp_results = [] # to store (index, transform, fitness, rmse)
116+
117+
for color_path in color_files:
118+
# Extract index from filename, e.g. color10.png -> 10
119+
basename = os.path.basename(color_path)
120+
idx_str = basename.replace("color","").replace(".png","")
121+
122+
if int(idx_str) in range(77):
123+
depth_path = os.path.join(folder, f"depth{idx_str}.tif")
124+
lidar_path = os.path.join(folder, f"lidar{idx_str}.npz")
125+
126+
if not (os.path.exists(depth_path) and os.path.exists(lidar_path)):
127+
continue
128+
129+
# Load depth / convert to 3D
130+
depth_img = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED)
131+
if depth_img is None:
132+
continue
133+
camera_points = depth_to_points(depth_img, INTRINSICS)
134+
camera_pcd = make_open3d_pcd(camera_points)
135+
136+
# Load LiDAR
137+
lidar_points = load_lidar_points(lidar_path)
138+
lidar_pcd = make_open3d_pcd(lidar_points)
139+
140+
lidar_pcd.estimate_normals(
141+
search_param=o3d.geometry.KDTreeSearchParamHybrid(
142+
radius=1.0, # Adjust based on your scene scale
143+
max_nn=30
144+
)
145+
)
146+
147+
# Also estimate normals on the Camera (source) cloud (recommended)
148+
camera_pcd.estimate_normals(
149+
search_param=o3d.geometry.KDTreeSearchParamHybrid(
150+
radius=1.0,
151+
max_nn=30
152+
)
153+
)
154+
155+
# Now you can run Point-to-Plane ICP
156+
init_guess = np.eye(4)
157+
threshold = 0.5 # or some appropriate distance
158+
result_icp = o3d.pipelines.registration.registration_icp(
159+
camera_pcd,
160+
lidar_pcd,
161+
threshold,
162+
init_guess,
163+
o3d.pipelines.registration.TransformationEstimationPointToPlane()
164+
)
165+
166+
print("ICP result:", result_icp.transformation)
167+
print("Fitness:", result_icp.fitness, " RMSE:", result_icp.inlier_rmse)
168+
169+
# After processing all frames, we can analyze or average
170+
if len(icp_results) > 0:
171+
# Example: pick the best frame by highest fitness
172+
best_frame = max(icp_results, key=lambda x: x[2]) # x[2] is fitness
173+
print("\nBest frame by fitness was index=", best_frame[0],
174+
" with fitness=", best_frame[2], " rmse=", best_frame[3])
175+
else:
176+
print("No frames processed properly.")
177+
178+
if __name__ == "__main__":
179+
main()

0 commit comments

Comments
 (0)