Skip to content

Commit 39e43ca

Browse files
authored
Merge pull request #198 from utn-mi/juelg/no-frames-when-direct-camera-render
feat(camera): xtrinsics and callibration support
2 parents f60a7ed + 8cdae3f commit 39e43ca

18 files changed

Lines changed: 604 additions & 161 deletions

File tree

README.md

Lines changed: 11 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import rcs
3030
from rcs import sim
3131
from rcs._core.sim import CameraType
3232
from rcs.camera.sim import SimCameraConfig, SimCameraSet
33+
from time import sleep
3334
simulation = sim.Sim(rcs.scenes["fr3_empty_world"]["mjb"])
3435
urdf_path = rcs.scenes["fr3_empty_world"]["urdf"]
3536
ik = rcs.common.RL(str(urdf_path))
@@ -42,23 +43,18 @@ gripper_cfg_sim = sim.SimGripperConfig()
4243
gripper_cfg_sim.add_id("0")
4344
gripper = sim.SimGripper(simulation, gripper_cfg_sim)
4445

45-
# add camera to have a rendering gui
46-
cameras = {
47-
"wrist": SimCameraConfig(
48-
identifier="wrist_0",
49-
type=CameraType.fixed,
50-
resolution_width=640,
51-
resolution_height=480,
52-
frame_rate=30,
53-
),
54-
}
55-
camera_set = SimCameraSet(simulation, cameras)
46+
camera_set = SimCameraSet(simulation, {})
5647
simulation.open_gui()
48+
# wait for gui
49+
sleep(5)
50+
# step the robot 10 cm in x direction
5751
robot.set_cartesian_position(
58-
robot.get_cartesian_position() * rcs.common.Pose(translation=np.array([0.05, 0, 0]))
52+
robot.get_cartesian_position() * rcs.common.Pose(translation=np.array([0.1, 0, 0]))
5953
)
54+
# close gripper
6055
gripper.grasp()
6156
simulation.step_until_convergence()
57+
input("press enter to close")
6258
```
6359
### Gym Env Interface
6460
```python
@@ -80,7 +76,7 @@ env_rel = SimEnvCreator()(
8076
)
8177
env_rel.get_wrapper_attr("sim").open_gui()
8278

83-
for _ in range(10):
79+
for _ in range(100):
8480
obs, info = env_rel.reset()
8581
for _ in range(10):
8682
# sample random relative action and execute it
@@ -110,8 +106,8 @@ For more details real the readme file of the respective extension.
110106
After the required hardware extensions are installed the examples also above work on real hardware:
111107
Switch to hardware by setting the following flag:
112108
```python
113-
ROBOT_INSTANCE = RobotPlatform.SIMULATION
114-
# ROBOT_INSTANCE = RobotPlatform.HARDWARE
109+
# ROBOT_INSTANCE = RobotPlatform.SIMULATION
110+
ROBOT_INSTANCE = RobotPlatform.HARDWARE
115111
```
116112

117113
#### Command Line Interface

extensions/rcs_realsense/pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ description="RCS realsense module"
99
dependencies = [
1010
"rcs==0.4.0",
1111
"pyrealsense2~=2.55.1",
12+
"apriltag==0.0.16",
1213
]
1314
readme = "README.md"
1415
maintainers = [
Lines changed: 151 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,151 @@
1+
import logging
2+
import threading
3+
import typing
4+
from time import sleep
5+
6+
import apriltag
7+
import cv2
8+
import numpy as np
9+
from rcs._core import common
10+
from rcs.camera.hw import CalibrationStrategy
11+
from rcs.camera.interface import Frame
12+
from tqdm import tqdm
13+
14+
logger = logging.getLogger(__name__)
15+
16+
17+
class FR3BaseArucoCalibration(CalibrationStrategy):
18+
"""Calibration with a 3D printed aruco marker that fits around the vention's FR3 base mounting plate."""
19+
20+
def __init__(self, camera_name: str):
21+
# base frame to camera, world to base frame
22+
self._extrinsics: np.ndarray[tuple[typing.Literal[4], typing.Literal[4]], np.dtype[np.float64]] | None = None
23+
self.camera_name = camera_name
24+
self.tag_to_world = common.Pose(
25+
rpy_vector=np.array([np.pi, 0, -np.pi / 2]), translation=np.array([0.145, 0, 0])
26+
).pose_matrix()
27+
28+
def calibrate(
29+
self,
30+
samples: list[Frame],
31+
intrinsics: np.ndarray[tuple[typing.Literal[3], typing.Literal[4]], np.dtype[np.float64]],
32+
lock: threading.Lock,
33+
) -> bool:
34+
logger.info("Calibrating camera %s. Position it as you wish and press enter.", self.camera_name)
35+
input()
36+
tries = 3
37+
while len(samples) < 10 and tries > 0:
38+
logger.info("not enought frames in recorded, waiting 2 seconds...")
39+
tries = -1
40+
sleep(2)
41+
if tries == 0:
42+
logger.warning("Calibration failed, not enough frames arrived.")
43+
return False
44+
45+
frames = []
46+
with lock:
47+
for sample in samples:
48+
frames.append(sample.camera.color.data.copy())
49+
print(frames)
50+
51+
_, tag_to_cam = get_average_marker_pose(frames, intrinsics=intrinsics, calib_tag_id=9, show_live_window=False)
52+
53+
cam_to_world = self.tag_to_world @ np.linalg.inv(tag_to_cam)
54+
world_to_cam = np.linalg.inv(cam_to_world)
55+
self._extrinsics = world_to_cam # type: ignore
56+
return True
57+
58+
def get_extrinsics(self) -> np.ndarray[tuple[typing.Literal[4], typing.Literal[4]], np.dtype[np.float64]] | None:
59+
return self._extrinsics
60+
61+
62+
def get_average_marker_pose(
63+
samples,
64+
intrinsics,
65+
calib_tag_id,
66+
show_live_window,
67+
):
68+
# create detector
69+
options = apriltag.DetectorOptions(families="tag25h9")
70+
detector = apriltag.Detector(options=options)
71+
72+
# make while loop with tqdm
73+
poses = []
74+
75+
last_frame = None
76+
for frame in tqdm(samples):
77+
78+
# detect tags
79+
marker_det, pose = get_marker_pose(calib_tag_id, detector, intrinsics, frame)
80+
81+
if marker_det is None:
82+
continue
83+
84+
for corner in marker_det.corners:
85+
cv2.circle(frame, tuple(corner.astype(int)), 5, (0, 0, 255), -1)
86+
87+
poses.append(pose)
88+
89+
last_frame = frame.copy()
90+
91+
camera_matrix = intrinsics[:3, :3]
92+
93+
if show_live_window:
94+
cv2.drawFrameAxes(frame, camera_matrix, None, pose[:3, :3], pose[:3, 3], 0.1) # type: ignore
95+
# show frame
96+
cv2.imshow("frame", frame)
97+
98+
# wait for key press
99+
if cv2.waitKey(1) & 0xFF == ord("q"):
100+
break
101+
if last_frame is None:
102+
msg = "No frames were processed, cannot calculate average pose. Check if the tag is visible."
103+
raise ValueError(msg)
104+
105+
if show_live_window:
106+
cv2.destroyAllWindows()
107+
108+
# calculate the average marker pose
109+
avg_pose = np.mean(poses, axis=0)
110+
logger.info(f"Average pose: {avg_pose}")
111+
112+
return last_frame, avg_pose
113+
114+
115+
def get_marker_pose(calib_tag_id, detector, intrinsics, frame):
116+
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
117+
detections = detector.detect(gray)
118+
119+
# count detections
120+
n_det = 0
121+
marker_det = None
122+
for det in detections:
123+
if det.tag_id != calib_tag_id:
124+
continue
125+
n_det += 1
126+
marker_det = det
127+
128+
if n_det > 1:
129+
msg = f"Expected 1 detection of tag id {calib_tag_id}, got {n_det}."
130+
raise ValueError(msg)
131+
132+
if marker_det is None:
133+
return None, None
134+
135+
fx = intrinsics[0, 0]
136+
fy = intrinsics[1, 1]
137+
cx = intrinsics[0, 2]
138+
cy = intrinsics[1, 2]
139+
140+
pose, _, _ = detector.detection_pose(
141+
marker_det,
142+
camera_params=(
143+
fx,
144+
fy,
145+
cx,
146+
cy,
147+
),
148+
tag_size=0.1,
149+
)
150+
151+
return marker_det, pose

0 commit comments

Comments
 (0)