From 119bb214543d4cac3ffe973cbe4f533066197b05 Mon Sep 17 00:00:00 2001 From: Taeseung Sohn Date: Wed, 26 Nov 2025 22:45:29 +0900 Subject: [PATCH 01/20] remove duplicate and unused image_view.py --- .../image_view.py | 862 ------------------ 1 file changed, 862 deletions(-) delete mode 100644 calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/image_view.py diff --git a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/image_view.py b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/image_view.py deleted file mode 100644 index 0a7dd337..00000000 --- a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/image_view.py +++ /dev/null @@ -1,862 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2024 TIER IV, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import copy -import logging -import threading - -from PySide2.QtCore import QObject -from PySide2.QtCore import QPoint -from PySide2.QtCore import QRectF -from PySide2.QtCore import QSize -from PySide2.QtCore import QThread -from PySide2.QtCore import Qt -from PySide2.QtCore import Signal -from PySide2.QtGui import QColor -from PySide2.QtGui import QPainter -from PySide2.QtGui import QPen -from PySide2.QtGui import QPixmap -from PySide2.QtWidgets import QGraphicsItem -from PySide2.QtWidgets import QGraphicsView -import cv2 -import matplotlib.pyplot as plt -import numpy as np -from tier4_calibration_views.utils import decompose_transformation_matrix -from tier4_calibration_views.utils import transform_points - - -def intensity_to_rainbow_qcolor(value, alpha=1.0): - h = value * 5.0 + 1.0 - i = h // 1 # floor(h) - f = h - i - if i % 2 == 0: - f = 1.0 - f - - n = 1 - f - - if i <= 1: - r, g, b = n, 0, 1.0 - elif i == 2: - r, g, b = 0.0, n, 1.0 - elif i == 3: - r, g, b = 0.0, 1.0, n - elif i == 4: - r, g, b = n, 1.0, 0.0 - elif i >= 5: - r, g, b = 1.0, n, 0 - - return QColor(int(255 * r), int(255 * g), int(255 * b), int(255 * alpha)) - - -class RenderingData: - def __init__(self): - self.image_to_lidar_transform = None - self.image_to_lidar_translation = None - self.image_to_lidar_rotation = None - - self.draw_calibration_points_flag = False - self.draw_pointcloud_flag = False - self.draw_inliers_flag = False - self.marker_size_pixels = None - self.marker_size_meters = None - self.color_channel = None - self.marker_units = None - self.marker_type = None - self.rendering_alpha = None - self.subsample_factor = None - self.rainbow_distance = None - self.rainbow_offset = 0 - self.min_rendering_distance = 0.0 - self.max_rendering_distance = 100.0 - - self.current_object_point = None - self.object_points = None - self.image_points = None - self.external_object_points = None - self.external_image_points = None - self.pointcloud_xyz = None - self.pointcloud_intensity = None - - self.widget_size = None - - self.k = None - self.d = None - - self.critical_r2 = np.inf - - -class CustomQGraphicsView(QGraphicsView): - def __init__(self, parent=None): - super(CustomQGraphicsView, self).__init__(parent) - - def resizeEvent(self, event): - super().resizeEvent(event) - - for item in self.scene().items(): - item.prepareGeometryChange() - item.update() - - def wheelEvent(self, event): - zoom_in_factor = 1.25 - zoom_out_factor = 1 / zoom_in_factor - - for item in self.scene().items(): - item.prepareGeometryChange() - item.update() - - self.setTransformationAnchor(QGraphicsView.NoAnchor) - self.setResizeAnchor(QGraphicsView.NoAnchor) - - old_pos = self.mapToScene(event.pos()) - - # Zoom - if event.delta() > 0: - zoom_factor = zoom_in_factor - else: - zoom_factor = zoom_out_factor - self.scale(zoom_factor, zoom_factor) - - # Get the new position - new_pos = self.mapToScene(event.pos()) - - # Move scene to old position - delta = new_pos - old_pos - self.translate(delta.x(), delta.y()) - - -class Renderer(QObject): - def __init__(self, image_view): - super().__init__() - self.image_view = image_view - - def render(self): - self.image_view.paintEventThread() - - -class ImageView(QGraphicsItem, QObject): - clicked_signal = Signal(float, float) - render_request_signal = Signal() - rendered_signal = Signal() - - def __init__(self, parent=None): - QGraphicsItem.__init__(self, parent) - QObject.__init__(self, parent) - - self.pix = QPixmap() - self.image_width = None - self.image_height = None - self.display_width = None - self.display_height = None - - self.data_ui = RenderingData() - self.data_renderer = RenderingData() - - self.thread = QThread() - self.thread.start() - self.lock = threading.RLock() - self.renderer = Renderer(self) - self.renderer.moveToThread(self.thread) - # To debug rendering, consider not moving it into another thread - - self.rendered_signal.connect(self.update2) - self.render_request_signal.connect(self.renderer.render) - - self.line_pen = QPen() - self.line_pen.setWidth(2) - self.line_pen.setBrush(Qt.white) - - self.magenta_pen = QPen() - self.magenta_pen.setWidth(2) - self.magenta_pen.setBrush(Qt.magenta) - - self.cyan_pen = QPen() - self.cyan_pen.setWidth(2) - self.cyan_pen.setBrush(Qt.cyan) - - self.inlier_line_pen = QPen() - self.inlier_line_pen.setWidth(2) - self.inlier_line_pen.setBrush(Qt.green) - - self.outlier_line_pen = QPen() - self.outlier_line_pen.setWidth(2) - self.outlier_line_pen.setBrush(Qt.red) - - self.red_pen = QPen(Qt.red) - self.red_pen.setBrush(Qt.red) - - self.green_pen = QPen(Qt.green) - self.green_pen.setBrush(Qt.green) - - colormap_name = "hsv" - self.colormap_bins = 100 - self.colormap = plt.get_cmap(colormap_name, self.colormap_bins) - self.colormap = [ - ( - int(255 * self.colormap(i)[2]), - int(255 * self.colormap(i)[1]), - int(255 * self.colormap(i)[0]), - ) - for i in range(self.colormap_bins) - ] - - # self.setMinimumWidth(300) - - self.update_count = 0 - self.render_count = 0 - self.unprocessed_rendered_requests = 0 - self.rendering = False - self.rendered_image = None - - def update(self): - with self.lock: - self.update_count += 1 - self.unprocessed_rendered_requests += 1 - self.render_request_signal.emit() - - def update2(self): - super().update() - - def set_draw_calibration_points(self, value): - with self.lock: - self.data_ui.draw_calibration_points_flag = value - self.update() - - def set_draw_pointcloud(self, value): - with self.lock: - self.data_ui.draw_pointcloud_flag = value - self.update() - - def set_marker_size_pixels(self, value): - with self.lock: - self.data_ui.marker_size_pixels = value - self.update() - - def set_marker_size_meters(self, value): - with self.lock: - self.data_ui.marker_size_meters = value - self.update() - - def set_rainbow_distance(self, value): - with self.lock: - self.data_ui.rainbow_distance = value - self.update() - - def set_rainbow_offset(self, value): - with self.lock: - self.data_ui.rainbow_offset = value - self.update() - - def set_rendering_alpha(self, value): - with self.lock: - self.data_ui.rendering_alpha = value - self.update() - - def set_marker_type(self, value): - with self.lock: - self.data_ui.marker_type = value.lower() - self.update() - - def set_marker_units(self, value): - with self.lock: - value = value.lower() - assert value == "meters" or value == "pixels" - self.data_ui.marker_units = value - self.update() - - def set_color_channel(self, value): - with self.lock: - self.data_ui.color_channel = value.lower() - self.update() - - def set_draw_inliers(self, value): - with self.lock: - self.data_ui.draw_inliers_flag = value - self.update() - - def set_inlier_distance(self, value): - with self.lock: - self.data_ui.inlier_distance = value - self.update() - - def set_min_rendering_distance(self, value): - with self.lock: - self.data_ui.min_rendering_distance = value - self.update() - - def set_max_rendering_distance(self, value): - with self.lock: - self.data_ui.max_rendering_distance = value - self.update() - - def set_current_point(self, point): - with self.lock: - self.data_ui.current_object_point = None if point is None else point.reshape(1, 3) - self.update() - - def set_transform(self, transform): - with self.lock: - self.data_ui.image_to_lidar_transform = transform - ( - self.data_ui.image_to_lidar_translation, - self.data_ui.image_to_lidar_rotation, - ) = decompose_transformation_matrix(transform) - self.update() - - def pixmap(self): - with self.lock: - return self.pix - - def set_pixmap(self, pixmap): - with self.lock: - if self.pix is None or self.pix.size() != pixmap.size(): - self.prepareGeometryChange() - - self.pix = pixmap - - self.image_width = float(self.pix.size().width()) - self.image_height = float(self.pix.size().height()) - - def set_subsample_factor(self, value): - with self.lock: - self.data_ui.subsample_factor = int(value) - self.update() - - def set_pointcloud(self, pointcloud): - with self.lock: - self.data_ui.pointcloud_xyz = pointcloud[:, 0:3] - self.data_ui.pointcloud_intensity = pointcloud[:, 3] - - subsample = self.data_ui.subsample_factor - - self.data_ui.pointcloud_xyz = self.data_ui.pointcloud_xyz[::subsample, :] - self.data_ui.pointcloud_intensity = self.data_ui.pointcloud_intensity[::subsample] - - def set_camera_info(self, k, d): - with self.lock: - self.data_ui.k = np.copy(k).reshape((3, 3)) - self.data_ui.d = np.copy(d).reshape((-1,)) - - # Calculate critical r^2, such that the distorted radial coordinate - # r*(1+k1*r^2+k2*r^4+k3*r^6)/(1+k4*r^2+k5*r^4+k6*r^6) starts decreasing. - # Note that if p1, p2 (tangential distortion) are non-zero, - # then the 'folding boundary' cannot be determined by r^2 alone, - # and this calculation is only an approximation. - self.data_ui.critical_r2 = np.inf - - d = self.data_ui.d.flatten() - if d.shape[0] >= 2: - k1 = d[0] - k2 = d[1] - k3 = 0 - if d.shape[0] >= 5: - k3 = d[4] - k4 = 0 - k5 = 0 - k6 = 0 - if d.shape[0] >= 8: - k4 = d[5] - k5 = d[6] - k6 = d[7] - - coeffs = [ - k3 * k6, - 3 * k3 * k5 - k2 * k6, - 5 * k3 * k4 + k2 * k5 - 3 * k1 * k6, - 7 * k3 + 3 * k2 * k4 - k1 * k5 - 5 * k6, - 5 * k2 + k1 * k4 - 3 * k5, - 3 * k1 - k4, - 1, - ] - roots = np.roots(coeffs) - real_roots = roots[np.isclose(roots.imag, 0.0)].real - positive_roots = real_roots[real_roots > 0.0] - - if len(positive_roots) > 0: - self.data_ui.critical_r2 = np.min(positive_roots) - - def set_calibration_points(self, object_points, image_points): - with self.lock: - self.data_ui.object_points = object_points - self.data_ui.image_points = image_points - self.update() - - def set_external_calibration_points(self, object_points, image_points): - with self.lock: - self.data_ui.external_object_points = object_points - self.data_ui.external_image_points = image_points - self.update() - - def minimumSizeHint(self): - return QSize(1000, 400) - - def sizeHint(self): - return QSize(1000, 1000) - - def take_screenshot(self): - with self.lock: - return self.rendered_image.copy() - - def paint(self, painter, option, widget): - with self.lock: - self.data_ui.widget_size = widget.size() - painter.setRenderHint(QPainter.Antialiasing) - painter.drawPixmap(QPoint(), self.rendered_image) - - def boundingRect(self): - with self.lock: - if self.rendered_image is None: - return QRectF(0, 0, 500, 500) - - return QRectF( - 0, 0, self.rendered_image.size().width(), self.rendered_image.size().height() - ) - - def paintEventThread(self): - with self.lock: - self.render_count += 1 - self.unprocessed_rendered_requests -= 1 - - if self.pix.isNull(): - return - - if self.unprocessed_rendered_requests > 0: - self.rendered.emit() - return - - # Copy the data into the thread - self.data_renderer = copy.deepcopy(self.data_ui) - - if self.data_renderer.widget_size is None: - return - - scaled_pix_size = self.pix.size() - scaled_pix_size.scale(self.data_renderer.widget_size, Qt.KeepAspectRatio) - - rendered_image = self.pix.scaled( - scaled_pix_size, Qt.KeepAspectRatio, Qt.SmoothTransformation - ) - - painter = QPainter(rendered_image) - painter.setRenderHint(QPainter.Antialiasing) - - painter.setPen(Qt.red) - - painter.drawLine(QPoint(0, 0), QPoint(0, scaled_pix_size.height())) - painter.drawLine( - QPoint(0, scaled_pix_size.height()), - QPoint(scaled_pix_size.width(), scaled_pix_size.height()), - ) - painter.drawLine( - QPoint(scaled_pix_size.width(), scaled_pix_size.height()), - QPoint(scaled_pix_size.width(), 0), - ) - painter.drawLine(QPoint(scaled_pix_size.width(), 0), QPoint(0, 0)) - - self.width_image_to_widget_factor = float(scaled_pix_size.width()) / self.image_width - self.height_image_to_widget_factor = float(scaled_pix_size.height()) / self.image_height - self.image_to_widget_factor = np.array( - [self.width_image_to_widget_factor, self.height_image_to_widget_factor] - ) - - if self.data_renderer.draw_pointcloud_flag: - self.draw_pointcloud(painter) - - if self.data_renderer.draw_calibration_points_flag: - self.draw_calibration_points(painter) - self.draw_external_calibration_points(painter) - - self.draw_current_point(painter) - - painter.end() - - with self.lock: - self.rendered_image = rendered_image - self.rendered_signal.emit() - - def draw_pointcloud(self, painter): - if ( - self.data_renderer.image_to_lidar_translation is None - or self.data_renderer.image_to_lidar_rotation is None - ): - return - - # Note: ccs=camera coordinate system. ics=image coordinate system - pointcloud_ccs = transform_points( - self.data_renderer.image_to_lidar_translation, - self.data_renderer.image_to_lidar_rotation, - self.data_renderer.pointcloud_xyz, - ) - - if pointcloud_ccs.shape[0] == 0: - return - - # Transform to the image coordinate system - tvec = np.zeros((3, 1)) - rvec = np.zeros((3, 1)) - - pointcloud_ics, _ = cv2.projectPoints( - pointcloud_ccs, rvec, tvec, self.data_renderer.k, self.data_renderer.d - ) - - pointcloud_ics = pointcloud_ics.reshape(-1, 2) - - indexes = np.logical_and( - np.logical_and( - np.logical_and(pointcloud_ics[:, 0] >= 0, pointcloud_ics[:, 0] < self.image_width), - np.logical_and(pointcloud_ics[:, 1] >= 0, pointcloud_ics[:, 1] < self.image_height), - ), - np.logical_and( - np.logical_and( - pointcloud_ccs[:, 2] >= self.data_renderer.min_rendering_distance, - pointcloud_ccs[:, 2] < self.data_renderer.max_rendering_distance, - ), - (pointcloud_ccs[:, 0] ** 2 + pointcloud_ccs[:, 1] ** 2) - < pointcloud_ccs[:, 2] ** 2 * self.data_renderer.critical_r2, - ), - ) - - # Transform (rescale) into the widget coordinate system - pointcloud_z = pointcloud_ccs[indexes, 2] - pointcloud_i = self.data_renderer.pointcloud_intensity[indexes] - - if self.data_renderer.marker_units == "meters": - factor = ( - self.data_renderer.k[0, 0] - * self.data_renderer.marker_size_meters - * self.width_image_to_widget_factor - ) - scale_px = factor / pointcloud_z - else: - factor = self.data_renderer.marker_size_pixels * self.width_image_to_widget_factor - scale_px = factor * np.ones_like(pointcloud_z) - - pointcloud_wcs = pointcloud_ics[indexes, :] * self.image_to_widget_factor - - indexes2 = scale_px >= 1 - pointcloud_wcs = pointcloud_wcs[indexes2, :] - scale_px = scale_px[indexes2] - - if pointcloud_wcs.shape[0] == 0: - return - - try: - if self.data_renderer.color_channel == "x": - color_scalars = pointcloud_ccs[indexes, 0][indexes2] - elif self.data_renderer.color_channel == "y": - color_scalars = pointcloud_ccs[indexes, 1][indexes2] - elif self.data_renderer.color_channel == "z": - color_scalars = pointcloud_z[indexes2] - elif self.data_renderer.color_channel == "intensity": - color_scalars = pointcloud_i[indexes2] - min_value = color_scalars.min() - max_value = color_scalars.max() - if min_value == max_value: - color_scalars = np.ones_like(color_scalars) - else: - color_scalars = 1.0 - (color_scalars - min_value) / (max_value - min_value) - else: - raise NotImplementedError - except Exception as e: - logging.error(e) - - draw_marker_f = ( - painter.drawEllipse if self.data_renderer.marker_type == "circles" else painter.drawRect - ) - - for point, radius, color_channel in zip(pointcloud_wcs, scale_px, color_scalars): - if self.data_renderer.color_channel == "intensity": - color = intensity_to_rainbow_qcolor( - color_channel, self.data_renderer.rendering_alpha - ) - else: - color_index = int( - self.colormap_bins - * ( - ( - self.data_renderer.rainbow_offset - + (color_channel / self.data_renderer.rainbow_distance) - ) - % 1.0 - ) - ) - color = self.colormap[color_index] - color = QColor( - color[0], color[1], color[2], int(255 * self.data_renderer.rendering_alpha) - ) - - painter.setPen(color) - painter.setBrush(color) - draw_marker_f(point[0] - 0.5 * radius, point[1] - 0.5 * radius, radius, radius) - - def draw_calibration_points(self, painter): - if ( - self.data_renderer.image_points is None - or self.data_renderer.object_points is None - or len(self.data_renderer.image_points) == 0 - or len(self.data_renderer.object_points) == 0 - or self.data_renderer.image_to_lidar_translation is None - or self.data_renderer.image_to_lidar_rotation is None - ): - return - - image_points = np.array(self.data_renderer.image_points) - - # Note: lcs=lidar coordinate system. ccs=camera coordinate system - object_points_lcs = np.array(self.data_renderer.object_points) - - object_points_ccs = transform_points( - self.data_renderer.image_to_lidar_translation, - self.data_renderer.image_to_lidar_rotation, - object_points_lcs, - ) - - # Transform to the image coordinate system - tvec = np.zeros((3, 1)) - rvec = np.zeros((3, 1)) - object_points_ics, _ = cv2.projectPoints( - object_points_ccs, rvec, tvec, self.data_renderer.k, self.data_renderer.d - ) - object_points_ics = object_points_ics.reshape(-1, 2) - - repr_err = np.linalg.norm(object_points_ics - image_points, axis=1) - - # Transform (rescale) into the widget coordinate system - object_points_wcs = object_points_ics * self.image_to_widget_factor - - radius = 10 * self.width_image_to_widget_factor - - object_pen = self.red_pen - image_pen = self.green_pen - line_pen = self.line_pen - - for object_point_wcs, image_point, d in zip( - object_points_wcs, self.data_renderer.image_points, repr_err - ): - image_point_wcs = image_point * self.image_to_widget_factor - - if self.data_renderer.draw_inliers_flag: - if d < self.data_renderer.inlier_distance: - line_pen = self.inlier_line_pen - object_pen = self.green_pen - image_pen = self.green_pen - else: - line_pen = self.outlier_line_pen - object_pen = self.red_pen - image_pen = self.red_pen - - painter.setPen(line_pen) - painter.drawLine( - object_point_wcs[0], object_point_wcs[1], image_point_wcs[0], image_point_wcs[1] - ) - - painter.setPen(object_pen) - painter.setBrush(object_pen.brush()) - painter.drawEllipse( - object_point_wcs[0] - 0.5 * radius, - object_point_wcs[1] - 0.5 * radius, - radius, - radius, - ) - - painter.setPen(image_pen) - painter.setBrush(image_pen.brush()) - painter.drawEllipse( - image_point_wcs[0] - 0.5 * radius, image_point_wcs[1] - 0.5 * radius, radius, radius - ) - - def draw_external_calibration_points(self, painter): - if ( - self.data_renderer.external_image_points is None - or self.data_renderer.external_object_points is None - or len(self.data_renderer.external_image_points) == 0 - or len(self.data_renderer.external_object_points) == 0 - or self.data_renderer.image_to_lidar_translation is None - or self.data_renderer.image_to_lidar_rotation is None - ): - return - - object_points_lcs = np.array(self.data_renderer.external_object_points) - - # Note: lcs=lidar coordinate system. ccs=camera coordinate system. ics=image coordinate system. wcs=widget coordinate system - object_points_ccs = transform_points( - self.data_renderer.image_to_lidar_translation, - self.data_renderer.image_to_lidar_rotation, - object_points_lcs, - ) - - # Transform to the image coordinate system - tvec = np.zeros((3, 1)) - rvec = np.zeros((3, 1)) - object_points_ics, _ = cv2.projectPoints( - object_points_ccs, rvec, tvec, self.data_renderer.k, self.data_renderer.d - ) - object_points_ics = object_points_ics.reshape(-1, 2) - - # Transform (rescale) into the widget coordinate system - object_points_wcs = object_points_ics * self.image_to_widget_factor - - radius = 10 * self.width_image_to_widget_factor - - object_pen = self.red_pen - image_pen = self.green_pen - line_pen = self.line_pen - object_line_pen = self.magenta_pen - image_line_pen = self.cyan_pen - - # Draw tag borders - image_points = self.data_renderer.external_image_points - - scaled_pix_size = self.pix.size() - scaled_pix_size.scale(self.data_renderer.widget_size, Qt.KeepAspectRatio) - - for i1 in range(len(image_points)): - tag_index = i1 // 4 - i2 = 4 * tag_index + ((i1 + 1) % 4) - - image_point_1_wcs = image_points[i1] * self.image_to_widget_factor - image_point_2_wcs = image_points[i2] * self.image_to_widget_factor - - object_point_1_wcs = object_points_wcs[i1] - object_point_2_wcs = object_points_wcs[i2] - - painter.setPen(image_line_pen) - painter.drawLine( - image_point_1_wcs[0], - image_point_1_wcs[1], - image_point_2_wcs[0], - image_point_2_wcs[1], - ) - - if ( - np.any(np.isnan(object_point_1_wcs)) - or np.any(np.isnan(object_point_2_wcs)) - or object_point_1_wcs[0] < 0 - or object_point_1_wcs[0] > scaled_pix_size.width() - or object_point_1_wcs[1] < 0 - or object_point_1_wcs[1] > scaled_pix_size.height() - or object_point_2_wcs[0] < 0 - or object_point_2_wcs[0] > scaled_pix_size.width() - or object_point_2_wcs[1] < 0 - or object_point_2_wcs[1] > scaled_pix_size.height() - ): - continue - - painter.setPen(object_line_pen) - painter.drawLine( - object_point_1_wcs[0], - object_point_1_wcs[1], - object_point_2_wcs[0], - object_point_2_wcs[1], - ) - - # Draw normal points - for object_point_wcs, image_point in zip( - object_points_wcs, self.data_renderer.external_image_points - ): - image_point_wcs = image_point * self.image_to_widget_factor - - painter.setPen(line_pen) - painter.drawLine( - object_point_wcs[0], object_point_wcs[1], image_point_wcs[0], image_point_wcs[1] - ) - - painter.setPen(object_pen) - painter.setBrush(object_pen.brush()) - painter.drawEllipse( - object_point_wcs[0] - 0.5 * radius, - object_point_wcs[1] - 0.5 * radius, - radius, - radius, - ) - - painter.setPen(image_pen) - painter.setBrush(image_pen.brush()) - painter.drawEllipse( - image_point_wcs[0] - 0.5 * radius, image_point_wcs[1] - 0.5 * radius, radius, radius - ) - - def draw_current_point(self, painter): - if self.data_renderer.current_object_point is None: - return - - if ( - self.data_renderer.image_to_lidar_translation is None - or self.data_renderer.image_to_lidar_rotation is None - ): - return - - # Note: ccs=camera coordinate system. ics=image coordinate system. wcs=widget coordinate system - object_point_ccs = transform_points( - self.data_renderer.image_to_lidar_translation, - self.data_renderer.image_to_lidar_rotation, - self.data_renderer.current_object_point, - ) - - # Transform to the image coordinate system - tvec = np.zeros((3, 1)) - rvec = np.zeros((3, 1)) - object_point_ics, _ = cv2.projectPoints( - object_point_ccs, rvec, tvec, self.data_renderer.k, self.data_renderer.d - ) - object_point_ics = object_point_ics.reshape(1, 2) - - # Transform (rescale) into the widget coordinate system - object_point_wcs = object_point_ics * self.image_to_widget_factor - object_point_wcs = object_point_wcs.reshape( - 2, - ) - - radius = 20 * self.width_image_to_widget_factor - - painter.setPen(Qt.magenta) - painter.drawLine( - object_point_wcs[0] - radius, - object_point_wcs[1] - radius, - object_point_wcs[0] + radius, - object_point_wcs[1] + radius, - ) - - painter.drawLine( - object_point_wcs[0] + radius, - object_point_wcs[1] - radius, - object_point_wcs[0] - radius, - object_point_wcs[1] + radius, - ) - - def mousePressEvent(self, e): - with self.lock: - if self.pix is None or self.data_renderer.widget_size is None: - return - - scaled_pix_size = self.pix.size() - scaled_pix_size.scale(self.data_renderer.widget_size, Qt.KeepAspectRatio) - - width_widget_to_image_factor = self.image_width / float(scaled_pix_size.width()) - height_widget_to_image_factor = self.image_height / float(scaled_pix_size.height()) - - x = (e.scenePos().x() + 0.5) * width_widget_to_image_factor - y = (e.scenePos().y() + 0.5) * height_widget_to_image_factor - - if x >= 0 and x < self.image_width and y >= 0 and y < self.image_height: - self.update() - - self.clicked_signal.emit(x, y) - else: - logging.error("Click out of image coordinates !") - - self.prepareGeometryChange() - return super().mousePressEvent(e) From 1d24fa7b32f91d3002fe41e7c4d986e21ba10c0d Mon Sep 17 00:00:00 2001 From: Taeseung Sohn Date: Thu, 27 Nov 2025 09:03:52 +0900 Subject: [PATCH 02/20] add current tf display (contains TEMP) --- .../image_view_ros_interface.py | 13 +- .../tier4_calibration_views/image_view_ui.py | 120 +++++++++++++++--- 2 files changed, 112 insertions(+), 21 deletions(-) diff --git a/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py b/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py index 6b78dc88..d14d7d1f 100644 --- a/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py +++ b/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2024 TIER IV, Inc. +# Copyright 2024-2025 TIER IV, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -112,6 +112,17 @@ def __init__(self, node_name="image_view"): self.timer = self.create_timer(self.timer_period, self.timer_callback) + def get_transform(self, parent: str, child: str): + with self.lock: + try: + return tf_message_to_transform_matrix( + self.tf_buffer.lookup_transform( + parent, child, rclpy.time.Time(), timeout=Duration(seconds=0.0) + ) + ) + except TransformException: + return None + def set_sensor_data_callback(self, callback): with self.lock: self.sensor_data_callback = callback diff --git a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py index 401e05bd..d351f5d8 100644 --- a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py +++ b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py @@ -30,6 +30,7 @@ from PySide2.QtWidgets import QHBoxLayout from PySide2.QtWidgets import QLabel from PySide2.QtWidgets import QMainWindow +from PySide2.QtWidgets import QPlainTextEdit from PySide2.QtWidgets import QSpinBox from PySide2.QtWidgets import QVBoxLayout from PySide2.QtWidgets import QWidget @@ -37,6 +38,7 @@ from tier4_calibration_views.image_view import CustomQGraphicsView from tier4_calibration_views.image_view import ImageView from tier4_calibration_views.image_view_ros_interface import ImageViewRosInterface +import transforms3d class ImageViewUI(QMainWindow): @@ -84,7 +86,8 @@ def __init__(self, ros_interface: ImageViewRosInterface): # Parent widget self.central_widget = QWidget(self) self.left_menu_widget = None - self.right_menu_widget = None + self.right_menu_widget_1 = None + self.right_menu_widget_2 = None self.setCentralWidget(self.central_widget) self.layout = QHBoxLayout(self.central_widget) @@ -94,30 +97,43 @@ def __init__(self, ros_interface: ImageViewRosInterface): # Menu Widgets self.make_left_menu() - self.make_right_menu() + self.make_right_menu_1() + self.make_right_menu_2() self.layout.addWidget(self.graphics_view) if self.left_menu_widget: self.layout.addWidget(self.left_menu_widget) - if self.right_menu_widget: - self.layout.addWidget(self.right_menu_widget) + if self.right_menu_widget_1: + self.layout.addWidget(self.right_menu_widget_1) + if self.right_menu_widget_2: + self.layout.addWidget(self.right_menu_widget_2) self.show() def make_left_menu(self): pass - def make_right_menu(self): - self.right_menu_widget = QWidget(self.central_widget) - self.right_menu_widget.setFixedWidth(210) - self.right_menu_layout = QVBoxLayout(self.right_menu_widget) + def make_right_menu_1(self): + self.right_menu_widget_1 = QWidget(self.central_widget) + self.right_menu_widget_1.setFixedWidth(210) + self.right_menu_layout_1 = QVBoxLayout(self.right_menu_widget_1) # Visualization group self.make_visualization_options() - self.right_menu_layout.addWidget(self.visualization_options_group) + self.right_menu_layout_1.addWidget(self.visualization_options_group) + + def make_right_menu_2(self): + self.right_menu_widget_2 = QWidget(self.central_widget) + self.right_menu_widget_2.setFixedWidth(210) + self.right_menu_layout_2 = QVBoxLayout(self.right_menu_widget_2) + + # Source group + self.make_source_options() + + self.right_menu_layout_2.addWidget(self.source_options_group) def make_image_view(self): self.image_view = ImageView() @@ -148,10 +164,6 @@ def make_visualization_options(self): self.visualization_options_group = QGroupBox("Visualization options") self.visualization_options_group.setFlat(True) - tf_source_label = QLabel("TF source:") - self.tf_source_combobox = QComboBox() - self.tf_source_combobox.currentTextChanged.connect(self.tf_source_callback) - def marker_type_callback(value): self.image_view.set_marker_type(value) @@ -247,7 +259,7 @@ def rendering_min_distance_callback(value): rendering_min_distance_label = QLabel("Min rendering distance (m)") rendering_min_distance_spinbox = QDoubleSpinBox() rendering_min_distance_spinbox.valueChanged.connect(rendering_min_distance_callback) - rendering_min_distance_spinbox.setRange(0.01, 100.0) + rendering_min_distance_spinbox.setRange(0.01, 200.0) rendering_min_distance_spinbox.setSingleStep(0.1) rendering_min_distance_spinbox.setValue(0.1) @@ -257,35 +269,33 @@ def rendering_max_distance_callback(value): rendering_max_distance_label = QLabel("Max rendering distance (m)") rendering_max_distance_spinbox = QDoubleSpinBox() rendering_max_distance_spinbox.valueChanged.connect(rendering_max_distance_callback) - rendering_max_distance_spinbox.setRange(0.01, 100.0) + rendering_max_distance_spinbox.setRange(0.01, 200.0) rendering_max_distance_spinbox.setSingleStep(0.1) rendering_max_distance_spinbox.setValue(100.0) def render_pointcloud_callback(value): self.image_view.set_draw_pointcloud(value == Qt.Checked) - render_pointcloud_checkbox = QCheckBox("Render pointcloud") + render_pointcloud_checkbox = QCheckBox("Show pointcloud") render_pointcloud_checkbox.stateChanged.connect(render_pointcloud_callback) render_pointcloud_checkbox.setChecked(True) def render_calibration_points_callback(value): self.image_view.set_draw_calibration_points(value == Qt.Checked) - render_calibration_points_checkbox = QCheckBox("Render calibration points") + render_calibration_points_checkbox = QCheckBox("Show calibration pairs") render_calibration_points_checkbox.stateChanged.connect(render_calibration_points_callback) render_calibration_points_checkbox.setChecked(True) def render_inliers_callback(value): self.image_view.set_draw_inliers(value == Qt.Checked) - self.render_inliers_checkbox = QCheckBox("Render inliers") + self.render_inliers_checkbox = QCheckBox("Show inliers") self.render_inliers_checkbox.stateChanged.connect(render_inliers_callback) self.render_inliers_checkbox.setChecked(False) self.render_inliers_checkbox.setEnabled(False) visualization_options_layout = QVBoxLayout() - visualization_options_layout.addWidget(tf_source_label) - visualization_options_layout.addWidget(self.tf_source_combobox) visualization_options_layout.addWidget(marker_type_label) visualization_options_layout.addWidget(marker_type_combobox) visualization_options_layout.addWidget(marker_units_label) @@ -315,6 +325,25 @@ def render_inliers_callback(value): # visualization_options_layout.addStretch(1) self.visualization_options_group.setLayout(visualization_options_layout) + def make_source_options(self): + self.source_options_group = QGroupBox("Source options") + self.source_options_group.setFlat(True) + + tf_source_label = QLabel("TF source:") + self.tf_source_combobox = QComboBox() + self.tf_source_combobox.currentTextChanged.connect(self.tf_source_callback) + + self.tf_source_status_text = QPlainTextEdit() + self.tf_source_status_text.setReadOnly(True) + self.tf_source_status_text.setPlainText("TFs not available") + + source_options_layout = QVBoxLayout() + source_options_layout.addWidget(tf_source_label) + source_options_layout.addWidget(self.tf_source_combobox) + source_options_layout.addWidget(self.tf_source_status_text) + + self.source_options_group.setLayout(source_options_layout) + def tf_source_callback(self, string): string = string.lower() @@ -325,6 +354,7 @@ def tf_source_callback(self, string): assert self.initial_transform is not None self.source_transform = self.initial_transform elif "calibrator" in string: + assert self.calibrated_transform is not None self.source_transform = self.calibrated_transform else: raise NotImplementedError @@ -332,6 +362,56 @@ def tf_source_callback(self, string): self.image_view.set_transform(self.source_transform) self.image_view.update() + # update status text + source_xyz = self.source_transform[0:3, 3] + source_rpy = transforms3d.euler.mat2euler(self.source_transform[0:3, 0:3]) + status_text = ( + f"{self.ros_interface.image_frame}\n" + f"-> {self.ros_interface.lidar_frame}:\n" + f"x: {source_xyz[0]:.6f}\n" + f"y: {source_xyz[1]:.6f}\n" + f"z: {source_xyz[2]:.6f}\n" + f"roll: {source_rpy[0]:.6f}\n" + f"pitch: {source_rpy[1]:.6f}\n" + f"yaw: {source_rpy[2]:.6f}\n" + ) + self.tf_source_status_text.setPlainText(status_text) + + # postprocess the source transform + parent_frame = "sensor_kit_base_link" # TEMP + child_frame = self.ros_interface.image_frame.split("/")[0] + "/camera_link" # TEMP + parent_to_image_transform = self.ros_interface.get_transform( + parent_frame, + self.ros_interface.image_frame, + ) + if parent_to_image_transform is None: + return + lidar_to_child_transform = self.ros_interface.get_transform( + self.ros_interface.lidar_frame, + child_frame, + ) + if lidar_to_child_transform is None: + return + + postprocessed_transform = ( + parent_to_image_transform @ self.source_transform @ lidar_to_child_transform + ) + + # update (append) status text + postprocessed_xyz = postprocessed_transform[0:3, 3] + postprocessed_rpy = transforms3d.euler.mat2euler(postprocessed_transform[0:3, 0:3]) + status_text += ( + f"\n{parent_frame}\n" + f"-> {child_frame}:\n" + f"x: {postprocessed_xyz[0]:.6f}\n" + f"y: {postprocessed_xyz[1]:.6f}\n" + f"z: {postprocessed_xyz[2]:.6f}\n" + f"roll: {postprocessed_rpy[0]:.6f}\n" + f"pitch: {postprocessed_rpy[1]:.6f}\n" + f"yaw: {postprocessed_rpy[2]:.6f}\n" + ) + self.tf_source_status_text.setPlainText(status_text) + def sensor_data_ros_callback(self, img, camera_info, pointcloud, delay): # This method is executed in the ROS spin thread with self.lock: From 3a639e498511e26273fd6339603b900690d3bcee Mon Sep 17 00:00:00 2001 From: Taeseung Sohn Date: Thu, 27 Nov 2025 09:05:14 +0900 Subject: [PATCH 03/20] change calibration output format to yaml (contains TEMP) --- .../interactive_calibrator.py | 98 ++++++++++++++++--- .../ros_interface.py | 20 +--- 2 files changed, 88 insertions(+), 30 deletions(-) diff --git a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py index 921ac4e8..6821c52b 100644 --- a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py +++ b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2024 TIER IV, Inc. +# Copyright 2024-2025 TIER IV, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json import logging import os import signal @@ -42,6 +41,16 @@ import rclpy from rosidl_runtime_py.convert import message_to_ordereddict from tier4_calibration_views.image_view_ui import ImageViewUI +import transforms3d +import yaml + + +def float_representer(dumper, value): + text = "{0:.6f}".format(value) # noqa E231 + return dumper.represent_scalar("tag:yaml.org,2002:float", text) # noqa E231 + + +yaml.add_representer(float, float_representer) class InteractiveCalibratorUI(ImageViewUI): @@ -145,18 +154,18 @@ def calibration_api_button_callback(): self.calibration_status_inliers_label.setAlignment(Qt.AlignTop | Qt.AlignLeft) self.state_1_message = ( - "To add a calibration pair\nfirst click the 3d point." - + "\nTo delete a calibration\npoint, click it in the\nimage" + "To add a calibration pair,\nfirst click the 3d point." + + "\nTo delete a calibration\npair, click it in the image." ) user_message_label = QLabel() user_message_label.setText(self.state_1_message) user_message_label.setAlignment(Qt.AlignTop | Qt.AlignLeft) - save_calibration_button = QPushButton("Save calibration") + save_calibration_button = QPushButton("Save pairs and result") save_calibration_button.clicked.connect(self.save_calibration_callback) - load_calibration_button = QPushButton("Load calibration") + load_calibration_button = QPushButton("Load pairs") load_calibration_button.clicked.connect(self.load_calibration_callback) calibration_status_layout = QVBoxLayout() @@ -238,7 +247,7 @@ def pnp_min_points_callback(): self.calibration_button.setEnabled(self.calibration_possible) self.calibration2_button.setEnabled(self.calibration_possible) - pnp_min_points_label = QLabel("Minimum pnp\n points") + pnp_min_points_label = QLabel("Minimum pnp pairs") self.pnp_min_points_spinbox = QSpinBox() self.pnp_min_points_spinbox.valueChanged.connect(pnp_min_points_callback) self.pnp_min_points_spinbox.setRange(4, 100) @@ -248,7 +257,7 @@ def pnp_min_points_callback(): def ransac_inlier_error_callback(value): self.image_view.set_inlier_distance(value) - ransac_inlier_error_label = QLabel("RANSAC inlier\nerror (px)") + ransac_inlier_error_label = QLabel("RANSAC inlier error (px)") self.ransac_inlier_error_spinbox = QDoubleSpinBox() self.ransac_inlier_error_spinbox.valueChanged.connect(ransac_inlier_error_callback) self.ransac_inlier_error_spinbox.setRange(0.0, 1000.0) @@ -363,10 +372,76 @@ def save_calibration_callback(self): if self.optimized_camera_info is not None: d = message_to_ordereddict(self.optimized_camera_info) - with open(os.path.join(output_folder, "optimized_camera_info.json"), "w") as f: - f.write(json.dumps(d, indent=4, sort_keys=False)) + with open(os.path.join(output_folder, "optimized_camera_info.yaml"), "w") as f: + yaml.dump(d, f, sort_keys=False) + + # save calibrated extrinsics + assert self.calibrated_transform is not None + + use_rpy = True + + calibrated_tf = { + "x": self.calibrated_transform[0, 3].item(), + "y": self.calibrated_transform[1, 3].item(), + "z": self.calibrated_transform[2, 3].item(), + } + if use_rpy: + rpy = transforms3d.euler.mat2euler(self.calibrated_transform[0:3, 0:3]) + calibrated_tf["roll"] = rpy[0] + calibrated_tf["pitch"] = rpy[1] + calibrated_tf["yaw"] = rpy[2] + else: + quat = transforms3d.quaternions.mat2quat(self.calibrated_transform[0:3, 0:3]) + calibrated_tf["qx"] = quat[1] + calibrated_tf["qy"] = quat[2] + calibrated_tf["qz"] = quat[3] + calibrated_tf["qw"] = quat[0] + + calibrated_d = { + self.ros_interface.image_frame: {self.ros_interface.lidar_frame: calibrated_tf} + } + with open(os.path.join(output_folder, "tf.yaml"), "w") as f: + yaml.dump(calibrated_d, f, sort_keys=False) + + # postprocess the calibrated transform + parent_frame = "sensor_kit_base_link" # TEMP + child_frame = self.ros_interface.image_frame.split("/")[0] + "/camera_link" # TEMP + parent_to_image_transform = self.ros_interface.get_transform( + parent_frame, + self.ros_interface.image_frame, + ) + if parent_to_image_transform is None: + return + lidar_to_child_transform = self.ros_interface.get_transform( + self.ros_interface.lidar_frame, + child_frame, + ) + if lidar_to_child_transform is None: + return + + postprocessed_transform = ( + parent_to_image_transform @ self.source_transform @ lidar_to_child_transform + ) + postprocessed_tf = { + "x": postprocessed_transform[0, 3].item(), + "y": postprocessed_transform[1, 3].item(), + "z": postprocessed_transform[2, 3].item(), + } + if use_rpy: + rpy = transforms3d.euler.mat2euler(postprocessed_transform[0:3, 0:3]) + postprocessed_tf["roll"] = rpy[0] + postprocessed_tf["pitch"] = rpy[1] + postprocessed_tf["yaw"] = rpy[2] + else: + quat = transforms3d.quaternions.mat2quat(postprocessed_transform[0:3, 0:3]) + postprocessed_tf["qx"] = quat[1] + postprocessed_tf["qy"] = quat[2] + postprocessed_tf["qz"] = quat[3] + postprocessed_tf["qw"] = quat[0] - self.ros_interface.save_calibration_tfs(output_folder) + postprocessed_d = {parent_frame: {child_frame: postprocessed_tf}} + with open(os.path.join(output_folder, "tf_postprocessed.yaml"), "w") as f: + yaml.dump(postprocessed_d, f, sort_keys=False) def load_calibration_callback(self): input_dir = QFileDialog.getExistingDirectory( @@ -444,6 +519,7 @@ def calibration_callback(self): self.calibration_api_button.setEnabled( self.calibration_api_request_received and self.calibrated_transform is not None ) + # pass tf so that ros interface can publish if allowed self.ros_interface.set_camera_lidar_transform(transform) def update_calibration_status(self): diff --git a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/ros_interface.py b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/ros_interface.py index 515b5ca8..7ba02814 100644 --- a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/ros_interface.py +++ b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/ros_interface.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2024 TIER IV, Inc. +# Copyright 2024-2025 TIER IV, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json -import os import threading import time @@ -25,7 +23,6 @@ from rclpy.callback_groups import MutuallyExclusiveCallbackGroup from rclpy.duration import Duration from rclpy.qos import qos_profile_system_default -from rosidl_runtime_py.convert import message_to_ordereddict from tf2_ros import TransformException from tier4_calibration_views.image_view_ros_interface import ImageViewRosInterface from tier4_calibration_views.utils import decompose_transformation_matrix @@ -34,7 +31,6 @@ from tier4_calibration_views.utils import transform_points from tier4_sensor_calibration_msgs.msg import CalibrationResult from tier4_sensor_calibration_msgs.srv import ExtrinsicCalibrator -import transforms3d class InteractiveCalibratorRosInterface(ImageViewRosInterface): @@ -138,20 +134,6 @@ def set_camera_lidar_transform(self, camera_optical_lidar_transform): self.output_transform_msg.child_frame_id = self.lidar_frame self.new_output_tf = True - def save_calibration_tfs(self, output_dir): - with self.lock: - d = message_to_ordereddict(self.output_transform_msg) - - q = self.output_transform_msg.transform.rotation - e = transforms3d.euler.quat2euler((q.w, q.x, q.y, q.z)) - - d["roll"] = e[0] - d["pitch"] = e[1] - d["yaw"] = e[2] - - with open(os.path.join(output_dir, "tf.json"), "w") as f: - f.write(json.dumps(d, indent=4, sort_keys=False)) - def point_callback(self, point: PointStamped): point_xyz = np.array([point.point.x, point.point.y, point.point.z]).reshape(1, 3) From 0b004489be449e0ec22f7ba76cacfb1103290643 Mon Sep 17 00:00:00 2001 From: Taeseung Sohn Date: Thu, 27 Nov 2025 11:04:39 +0900 Subject: [PATCH 04/20] generalize transform postprocess (resolve TEMPs) --- .../interactive_calibrator.py | 22 ++--- .../image_view_ros_interface.py | 89 ++++++++++++++++--- .../tier4_calibration_views/image_view_ui.py | 23 ++--- .../tag_based_pnp_calibrator.launch.xml | 11 ++- .../rdv/tag_based_pnp_calibrator.launch.xml | 23 ++++- .../x2/tag_based_pnp_calibrator.launch.xml | 34 +++++-- .../xx1/tag_based_pnp_calibrator.launch.xml | 23 ++++- .../tag_based_pnp_calibrator.launch.xml | 31 +++++-- .../tag_based_pnp_calibrator.launch.xml | 22 ++++- 9 files changed, 209 insertions(+), 69 deletions(-) diff --git a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py index 6821c52b..7577e1d4 100644 --- a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py +++ b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py @@ -404,24 +404,12 @@ def save_calibration_callback(self): yaml.dump(calibrated_d, f, sort_keys=False) # postprocess the calibrated transform - parent_frame = "sensor_kit_base_link" # TEMP - child_frame = self.ros_interface.image_frame.split("/")[0] + "/camera_link" # TEMP - parent_to_image_transform = self.ros_interface.get_transform( - parent_frame, - self.ros_interface.image_frame, + postprocessed_transform = self.ros_interface.get_parent_to_child_transform( + self.calibrated_transform ) - if parent_to_image_transform is None: - return - lidar_to_child_transform = self.ros_interface.get_transform( - self.ros_interface.lidar_frame, - child_frame, - ) - if lidar_to_child_transform is None: + if postprocessed_transform is None: return - postprocessed_transform = ( - parent_to_image_transform @ self.source_transform @ lidar_to_child_transform - ) postprocessed_tf = { "x": postprocessed_transform[0, 3].item(), "y": postprocessed_transform[1, 3].item(), @@ -439,7 +427,9 @@ def save_calibration_callback(self): postprocessed_tf["qz"] = quat[3] postprocessed_tf["qw"] = quat[0] - postprocessed_d = {parent_frame: {child_frame: postprocessed_tf}} + postprocessed_d = { + self.ros_interface.parent_frame: {self.ros_interface.child_frame: postprocessed_tf} + } with open(os.path.join(output_folder, "tf_postprocessed.yaml"), "w") as f: yaml.dump(postprocessed_d, f, sort_keys=False) diff --git a/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py b/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py index d14d7d1f..7834d08b 100644 --- a/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py +++ b/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py @@ -57,6 +57,12 @@ def __init__(self, node_name="image_view"): self.declare_parameter("timer_period", 1.0) self.declare_parameter("delay_tolerance", 0.06) + self.declare_parameter("image_frame", "") + self.declare_parameter("lidar_frame", "") + self.declare_parameter("parent_frame", "") + self.declare_parameter("child_frame", "") + self.declare_parameter("should_reverse_transform", False) + self.use_rectified = self.get_parameter("use_rectified").get_parameter_value().bool_value self.use_compressed = self.get_parameter("use_compressed").get_parameter_value().bool_value self.timer_period = self.get_parameter("timer_period").get_parameter_value().double_value @@ -64,8 +70,13 @@ def __init__(self, node_name="image_view"): self.get_parameter("delay_tolerance").get_parameter_value().double_value ) - self.image_frame: Optional[str] = None - self.lidar_frame: Optional[str] = None + self.image_frame = self.get_parameter("image_frame").get_parameter_value().string_value + self.lidar_frame = self.get_parameter("lidar_frame").get_parameter_value().string_value + self.parent_frame = self.get_parameter("parent_frame").get_parameter_value().string_value + self.child_frame = self.get_parameter("child_frame").get_parameter_value().string_value + self.should_reverse_transform = ( + self.get_parameter("should_reverse_transform").get_parameter_value().bool_value + ) # Data self.pointcloud_queue: Deque[PointCloud2] = deque([], 5) @@ -112,14 +123,62 @@ def __init__(self, node_name="image_view"): self.timer = self.create_timer(self.timer_period, self.timer_callback) - def get_transform(self, parent: str, child: str): + def get_parent_to_child_transform(self, image_to_lidar_transform): + if ( + self.image_frame == "" + or self.lidar_frame == "" + or self.parent_frame == "" + or self.child_frame == "" + ): + return None with self.lock: try: - return tf_message_to_transform_matrix( - self.tf_buffer.lookup_transform( - parent, child, rclpy.time.Time(), timeout=Duration(seconds=0.0) + if self.should_reverse_transform: + # image -> child -> parent -> lidar + child_to_image_transform = tf_message_to_transform_matrix( + self.tf_buffer.lookup_transform( + self.child_frame, + self.image_frame, + rclpy.time.Time(), + timeout=Duration(seconds=0.0), + ) ) - ) + parent_to_lidar_transform = tf_message_to_transform_matrix( + self.tf_buffer.lookup_transform( + self.parent_frame, + self.lidar_frame, + rclpy.time.Time(), + timeout=Duration(seconds=0.0), + ) + ) + parent_to_child_transform = parent_to_lidar_transform @ np.linalg.inv( + child_to_image_transform @ image_to_lidar_transform + ) + return parent_to_child_transform + else: + # image -> parent -> child -> lidar + parent_to_image_transform = tf_message_to_transform_matrix( + self.tf_buffer.lookup_transform( + self.parent_frame, + self.image_frame, + rclpy.time.Time(), + timeout=Duration(seconds=0.0), + ) + ) + lidar_to_child_transform = tf_message_to_transform_matrix( + self.tf_buffer.lookup_transform( + self.lidar_frame, + self.child_frame, + rclpy.time.Time(), + timeout=Duration(seconds=0.0), + ) + ) + parent_to_child_transform = ( + parent_to_image_transform + @ image_to_lidar_transform + @ lidar_to_child_transform + ) + return parent_to_child_transform except TransformException: return None @@ -140,7 +199,12 @@ def set_external_calibration_points_callback(self, callback): self.external_calibration_points_callback = callback def pointcloud_callback(self, pointcloud_msg: PointCloud2): - self.lidar_frame = pointcloud_msg.header.frame_id + if self.lidar_frame != pointcloud_msg.header.frame_id: + self.get_logger().error( + f"Unexpected lidar frame {pointcloud_msg.header.frame_id} (should be {self.lidar_frame})" + ) + return + self.pointcloud_queue.append(pointcloud_msg) self.check_sync() @@ -149,8 +213,13 @@ def image_callback(self, image_msg: Union[CompressedImage, Image]): self.check_sync() def camera_info_callback(self, camera_info_msg: CameraInfo): + if self.image_frame != camera_info_msg.header.frame_id: + self.get_logger().error( + f"Unexpected image frame {camera_info_msg.header.frame_id} (should be {self.image_frame})" + ) + return + self.camera_info = camera_info_msg - self.image_frame = camera_info_msg.header.frame_id if self.use_rectified: self.camera_info.k[0] = self.camera_info.p[0] @@ -223,7 +292,7 @@ def calibration_points_callback(self, calibration_points: CalibrationPoints): def timer_callback(self): with self.lock: - if self.image_frame is None or self.lidar_frame is None: + if self.image_frame == "" or self.lidar_frame == "": return try: diff --git a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py index d351f5d8..a0d378c8 100644 --- a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py +++ b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py @@ -378,31 +378,18 @@ def tf_source_callback(self, string): self.tf_source_status_text.setPlainText(status_text) # postprocess the source transform - parent_frame = "sensor_kit_base_link" # TEMP - child_frame = self.ros_interface.image_frame.split("/")[0] + "/camera_link" # TEMP - parent_to_image_transform = self.ros_interface.get_transform( - parent_frame, - self.ros_interface.image_frame, + postprocessed_transform = self.ros_interface.get_parent_to_child_transform( + self.source_transform ) - if parent_to_image_transform is None: + if postprocessed_transform is None: return - lidar_to_child_transform = self.ros_interface.get_transform( - self.ros_interface.lidar_frame, - child_frame, - ) - if lidar_to_child_transform is None: - return - - postprocessed_transform = ( - parent_to_image_transform @ self.source_transform @ lidar_to_child_transform - ) # update (append) status text postprocessed_xyz = postprocessed_transform[0:3, 3] postprocessed_rpy = transforms3d.euler.mat2euler(postprocessed_transform[0:3, 0:3]) status_text += ( - f"\n{parent_frame}\n" - f"-> {child_frame}:\n" + f"\n{self.ros_interface.parent_frame}\n" + f"-> {self.ros_interface.child_frame}:\n" f"x: {postprocessed_xyz[0]:.6f}\n" f"y: {postprocessed_xyz[1]:.6f}\n" f"z: {postprocessed_xyz[2]:.6f}\n" diff --git a/sensor_calibration_manager/launch/default_project/tag_based_pnp_calibrator.launch.xml b/sensor_calibration_manager/launch/default_project/tag_based_pnp_calibrator.launch.xml index 7210951c..76d49e3d 100644 --- a/sensor_calibration_manager/launch/default_project/tag_based_pnp_calibrator.launch.xml +++ b/sensor_calibration_manager/launch/default_project/tag_based_pnp_calibrator.launch.xml @@ -1,6 +1,6 @@ - - + + @@ -56,7 +56,9 @@ - + + + @@ -67,6 +69,9 @@ + + + diff --git a/sensor_calibration_manager/launch/rdv/tag_based_pnp_calibrator.launch.xml b/sensor_calibration_manager/launch/rdv/tag_based_pnp_calibrator.launch.xml index bdcbd0ca..465e4736 100644 --- a/sensor_calibration_manager/launch/rdv/tag_based_pnp_calibrator.launch.xml +++ b/sensor_calibration_manager/launch/rdv/tag_based_pnp_calibrator.launch.xml @@ -20,11 +20,17 @@ + + + + + + + + - - @@ -56,7 +62,12 @@ - + + + + + + @@ -67,6 +78,12 @@ + + + + + + diff --git a/sensor_calibration_manager/launch/x2/tag_based_pnp_calibrator.launch.xml b/sensor_calibration_manager/launch/x2/tag_based_pnp_calibrator.launch.xml index 9cf0568f..fca4fd17 100644 --- a/sensor_calibration_manager/launch/x2/tag_based_pnp_calibrator.launch.xml +++ b/sensor_calibration_manager/launch/x2/tag_based_pnp_calibrator.launch.xml @@ -29,11 +29,9 @@ - - - - - + + + @@ -42,7 +40,20 @@ + + + + + + + + + + + + + @@ -74,7 +85,12 @@ - + + + + + + @@ -85,6 +101,12 @@ + + + + + + diff --git a/sensor_calibration_manager/launch/xx1/tag_based_pnp_calibrator.launch.xml b/sensor_calibration_manager/launch/xx1/tag_based_pnp_calibrator.launch.xml index 87b510fa..f3efa250 100644 --- a/sensor_calibration_manager/launch/xx1/tag_based_pnp_calibrator.launch.xml +++ b/sensor_calibration_manager/launch/xx1/tag_based_pnp_calibrator.launch.xml @@ -20,11 +20,17 @@ + + + + + + + + - - @@ -56,7 +62,12 @@ - + + + + + + @@ -67,6 +78,12 @@ + + + + + + diff --git a/sensor_calibration_manager/launch/xx1_15/tag_based_pnp_calibrator.launch.xml b/sensor_calibration_manager/launch/xx1_15/tag_based_pnp_calibrator.launch.xml index c8e1f861..5d5ee875 100644 --- a/sensor_calibration_manager/launch/xx1_15/tag_based_pnp_calibrator.launch.xml +++ b/sensor_calibration_manager/launch/xx1_15/tag_based_pnp_calibrator.launch.xml @@ -11,7 +11,19 @@ + + + + + + + + + + + + @@ -23,12 +35,6 @@ - - - - - - @@ -58,7 +64,12 @@ - + + + + + + @@ -69,6 +80,12 @@ + + + + + + diff --git a/sensor_calibration_manager/launch/xx1_gen2/tag_based_pnp_calibrator.launch.xml b/sensor_calibration_manager/launch/xx1_gen2/tag_based_pnp_calibrator.launch.xml index 76248a39..f40121af 100644 --- a/sensor_calibration_manager/launch/xx1_gen2/tag_based_pnp_calibrator.launch.xml +++ b/sensor_calibration_manager/launch/xx1_gen2/tag_based_pnp_calibrator.launch.xml @@ -23,12 +23,17 @@ + + + + + + + - - @@ -62,7 +67,12 @@ - + + + + + + @@ -73,6 +83,12 @@ + + + + + + From 84a2c4aed9853ee1bddbf0ff23192ac7f058bb5e Mon Sep 17 00:00:00 2001 From: Taeseung Sohn Date: Thu, 27 Nov 2025 16:41:47 +0900 Subject: [PATCH 05/20] Add "open file" TF source --- .../interactive_calibrator.py | 14 +-- .../image_view_ros_interface.py | 60 ++++++++++++ .../tier4_calibration_views/image_view_ui.py | 98 ++++++++++++++++--- 3 files changed, 153 insertions(+), 19 deletions(-) diff --git a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py index 7577e1d4..998f4af6 100644 --- a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py +++ b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py @@ -142,11 +142,11 @@ def calibration_api_button_callback(): calibration_status_helper_label.setText("(calibration vs. tf source)") self.calibration_status_points_label = QLabel() - self.calibration_status_points_label.setText("#points: ") + self.calibration_status_points_label.setText("# pairs: ") self.calibration_status_points_label.setAlignment(Qt.AlignTop | Qt.AlignLeft) self.calibration_status_error_label = QLabel() - self.calibration_status_error_label.setText("r.error: ") + self.calibration_status_error_label.setText("R.error: ") self.calibration_status_error_label.setAlignment(Qt.AlignTop | Qt.AlignLeft) self.calibration_status_inliers_label = QLabel() @@ -334,8 +334,8 @@ def sensor_data_callback(self): with self.lock: self.calibrator.set_camera_info(self.camera_info_tmp.k, self.camera_info_tmp.d) - def tf_source_callback(self, string): - super().tf_source_callback(string) + def tf_source_callback(self, source): + super().tf_source_callback(source) self.update_calibration_status() def save_calibration_callback(self): @@ -500,11 +500,11 @@ def calibration_callback(self): return if self.calibrated_transform is None: - self.tf_source_combobox.addItem("Calibrator") + self.tf_source_combobox.insertItem(0, "Calibrator", "calibrator") # prepend self.calibrated_transform = transform - self.tf_source_callback(self.tf_source_combobox.currentText()) + self.tf_source_callback(self.tf_source_combobox.currentData()) self.calibration_api_button.setEnabled( self.calibration_api_request_received and self.calibrated_transform is not None @@ -548,7 +548,7 @@ def update_calibration_status(self): source_error_string = "" source_inliers = np.array([]) - self.calibration_status_points_label.setText(f"#points: {len(object_calibration_points)}") + self.calibration_status_points_label.setText(f"# pairs: {len(object_calibration_points)}") self.calibration_status_error_label.setText( f"R.error: {calibrated_error_string} / {source_error_string}" ) diff --git a/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py b/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py index 7834d08b..01543900 100644 --- a/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py +++ b/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py @@ -182,6 +182,66 @@ def get_parent_to_child_transform(self, image_to_lidar_transform): except TransformException: return None + def get_image_to_lidar_transform(self, parent_to_child_transform): + if ( + self.image_frame == "" + or self.lidar_frame == "" + or self.parent_frame == "" + or self.child_frame == "" + ): + return None + with self.lock: + try: + if self.should_reverse_transform: + # image -> child -> parent -> lidar + child_to_image_transform = tf_message_to_transform_matrix( + self.tf_buffer.lookup_transform( + self.child_frame, + self.image_frame, + rclpy.time.Time(), + timeout=Duration(seconds=0.0), + ) + ) + parent_to_lidar_transform = tf_message_to_transform_matrix( + self.tf_buffer.lookup_transform( + self.parent_frame, + self.lidar_frame, + rclpy.time.Time(), + timeout=Duration(seconds=0.0), + ) + ) + image_to_lidar_transform = ( + np.linalg.inv(parent_to_child_transform @ child_to_image_transform) + @ parent_to_lidar_transform + ) + return image_to_lidar_transform + else: + # image -> parent -> child -> lidar + image_to_parent_transform = tf_message_to_transform_matrix( + self.tf_buffer.lookup_transform( + self.image_frame, + self.parent_frame, + rclpy.time.Time(), + timeout=Duration(seconds=0.0), + ) + ) + child_to_lidar_transform = tf_message_to_transform_matrix( + self.tf_buffer.lookup_transform( + self.child_frame, + self.lidar_frame, + rclpy.time.Time(), + timeout=Duration(seconds=0.0), + ) + ) + image_to_lidar_transform = ( + image_to_parent_transform + @ parent_to_child_transform + @ child_to_lidar_transform + ) + return image_to_lidar_transform + except TransformException: + return None + def set_sensor_data_callback(self, callback): with self.lock: self.sensor_data_callback = callback diff --git a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py index a0d378c8..3349ceb4 100644 --- a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py +++ b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py @@ -24,6 +24,7 @@ from PySide2.QtWidgets import QCheckBox from PySide2.QtWidgets import QComboBox from PySide2.QtWidgets import QDoubleSpinBox +from PySide2.QtWidgets import QFileDialog from PySide2.QtWidgets import QGraphicsScene from PySide2.QtWidgets import QGraphicsView from PySide2.QtWidgets import QGroupBox @@ -39,6 +40,7 @@ from tier4_calibration_views.image_view import ImageView from tier4_calibration_views.image_view_ros_interface import ImageViewRosInterface import transforms3d +import yaml class ImageViewUI(QMainWindow): @@ -331,7 +333,16 @@ def make_source_options(self): tf_source_label = QLabel("TF source:") self.tf_source_combobox = QComboBox() - self.tf_source_combobox.currentTextChanged.connect(self.tf_source_callback) + self.tf_source_combobox.addItem("Load from File", "file") + self.tf_source_combobox.setCurrentIndex(-1) # do not select "file" by default + + def tf_source_index_callback(index): + if index != -1: + self.tf_source_callback(self.tf_source_combobox.itemData(index)) + + # `activated` is favored over `currentTextChanged`; + # reselecting "file" should show file dialog regardless of the current selection + self.tf_source_combobox.activated.connect(tf_source_index_callback) self.tf_source_status_text = QPlainTextEdit() self.tf_source_status_text.setReadOnly(True) @@ -344,18 +355,74 @@ def make_source_options(self): self.source_options_group.setLayout(source_options_layout) - def tf_source_callback(self, string): - string = string.lower() - - if "current" in string: + def tf_source_callback(self, source): + if source == "current": assert self.current_transform is not None self.source_transform = self.current_transform - elif "initial" in string: + elif source == "initial": assert self.initial_transform is not None self.source_transform = self.initial_transform - elif "calibrator" in string: + elif source == "calibrator": assert self.calibrated_transform is not None self.source_transform = self.calibrated_transform + elif source == "file": + # Reads TF in yaml format, regardless of the choice: + # - xyz + quaternion or xyz + rpy + # - preprocessed or non-preprocessed entry + filename, _ = QFileDialog.getOpenFileName( + self, "Open TF File", ".", "YAML files (*.yaml)" + ) + if len(filename) == 0: + return + try: + with open(filename, "r") as f: + data = yaml.safe_load(f) + + entry = None + is_preprocessed_entry = False + + try: + entry = data[self.ros_interface.parent_frame][ + self.ros_interface.child_frame + ] + is_preprocessed_entry = True + except KeyError: + # Fallback to non-preprocessed entry. + # If this fails, silently return. + entry = data[self.ros_interface.image_frame][self.ros_interface.lidar_frame] + + x = float(entry.get("x")) + y = float(entry.get("y")) + z = float(entry.get("z")) + + if all(key in entry for key in ("roll", "pitch", "yaw")): + roll = float(entry["roll"]) + pitch = float(entry["pitch"]) + yaw = float(entry["yaw"]) + rot = transforms3d.euler.euler2mat(roll, pitch, yaw) + elif all(key in entry for key in ("qx", "qy", "qz", "qw")): + qx = float(entry["qx"]) + qy = float(entry["qy"]) + qz = float(entry["qz"]) + qw = float(entry["qw"]) + rot = transforms3d.quaternions.quat2mat((qw, qx, qy, qz)) + else: + return + + mat = np.eye(4) + mat[0:3, 0:3] = rot + mat[0:3, 3] = [x, y, z] + + if is_preprocessed_entry: + self.source_transform = self.ros_interface.get_image_to_lidar_transform(mat) + else: + self.source_transform = mat + except Exception as ex: + self.ros_interface.get_logger().error( + f"Could not load valid TF from {filename}. {ex}" + ) + return + else: raise NotImplementedError @@ -463,17 +530,24 @@ def transform_callback(self): if self.initial_transform is None: self.initial_transform = np.copy(self.transform_tmp) self.current_transform = self.initial_transform - self.tf_source_combobox.addItem("Initial /tf") - self.tf_source_combobox.addItem("Current /tf") + + self.tf_source_combobox.addItem("Initial /tf", "initial") + self.tf_source_combobox.addItem("Current /tf", "current") self.image_view.update() changed = (self.transform_tmp != self.current_transform).any() self.current_transform = np.copy(self.transform_tmp) - # the Current /tf case - if "current" in self.tf_source_combobox.currentText().lower() and changed: - self.tf_source_callback(self.tf_source_combobox.currentText()) + # if not selected, switch to "initial" and force update + if self.tf_source_combobox.currentIndex() == -1: + self.tf_source_combobox.setCurrentIndex(self.tf_source_combobox.findData("initial")) + self.tf_source_callback("initial") + + # Force update on the current /tf case + source = self.tf_source_combobox.currentData() + if "current" == source and changed: + self.tf_source_callback(source) def image_click_callback(self, x, y): pass From 37fe8934fcc3bd510b4f96c87308b62d816bd177 Mon Sep 17 00:00:00 2001 From: Taeseung Sohn Date: Fri, 28 Nov 2025 01:28:00 +0900 Subject: [PATCH 06/20] Add "open file" camera info source --- .../interactive_calibrator.py | 89 ++++++++----- .../image_view_ros_interface.py | 15 ++- .../tier4_calibration_views/image_view_ui.py | 121 +++++++++++++++--- 3 files changed, 171 insertions(+), 54 deletions(-) diff --git a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py index 998f4af6..ddde3eef 100644 --- a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py +++ b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py @@ -46,11 +46,16 @@ def float_representer(dumper, value): - text = "{0:.6f}".format(value) # noqa E231 + text = f"{round(value, 6)}" # noqa E231 return dumper.represent_scalar("tag:yaml.org,2002:float", text) # noqa E231 +def list_representer(dumper, value): + return dumper.represent_sequence("tag:yaml.org,2002:seq", value, flow_style=True) + + yaml.add_representer(float, float_representer) +yaml.add_representer(list, list_representer) class InteractiveCalibratorUI(ImageViewUI): @@ -188,12 +193,21 @@ def make_calibration_options(self): self.calibration_button.setEnabled(False) def calibration_intrinsics_callback(): + is_init = self.optimized_camera_info is None self.optimized_camera_info = camera_lidar_calibrate_intrinsics( np.array(self.object_calibration_points + self.external_object_calibration_points), np.array(self.image_calibration_points + self.external_image_calibration_points), - self.camera_info, + self.source_camera_info, ) - self.use_optimized_intrinsics_checkbox.setEnabled(True) + + if is_init: + self.camera_info_source_combobox.insertItem( + 0, "Calibrator", "calibrator" + ) # prepend + + # Force update + if self.camera_info_source_combobox.currentData() == "calibrator": + self.camera_info_source_callback("calibrator") self.calibration2_button = QPushButton("Calibrate intrinsics\n(experimental)") self.calibration2_button.clicked.connect(calibration_intrinsics_callback) @@ -205,7 +219,7 @@ def calibration_intrinsics_callback(): self.pnp_method_combobox.addItem("SQPNP") def use_ransac_callback(value): - if self.camera_info is None: + if self.source_camera_info is None: return if value == Qt.Checked: @@ -219,25 +233,6 @@ def use_ransac_callback(value): self.use_ransac_checkbox.stateChanged.connect(use_ransac_callback) self.use_ransac_checkbox.setChecked(False) - def use_optimized_intrinsics_callback(state): - if state == Qt.Checked: - self.image_view.set_camera_info( - self.optimized_camera_info.k, self.optimized_camera_info.d - ) - self.calibrator.set_camera_info( - self.optimized_camera_info.k, self.optimized_camera_info.d - ) - else: - self.image_view.set_camera_info(self.camera_info.k, self.camera_info.d) - self.calibrator.set_camera_info(self.camera_info.k, self.camera_info.d) - - self.use_optimized_intrinsics_checkbox = QCheckBox("Use optimized Intrinsics") - self.use_optimized_intrinsics_checkbox.stateChanged.connect( - use_optimized_intrinsics_callback - ) - self.use_optimized_intrinsics_checkbox.setEnabled(False) - self.use_optimized_intrinsics_checkbox.setChecked(False) - def pnp_min_points_callback(): self.calibration_possible = ( len(self.image_calibration_points) + len(self.external_image_calibration_points) @@ -270,7 +265,6 @@ def ransac_inlier_error_callback(value): calibration_options_layout.addWidget(pnp_method_label) calibration_options_layout.addWidget(self.pnp_method_combobox) calibration_options_layout.addWidget(self.use_ransac_checkbox) - calibration_options_layout.addWidget(self.use_optimized_intrinsics_checkbox) calibration_options_layout.addWidget(pnp_min_points_label) calibration_options_layout.addWidget(self.pnp_min_points_spinbox) calibration_options_layout.addWidget(ransac_inlier_error_label) @@ -329,15 +323,19 @@ def screenshot_callback(): data_collection_options_layout.addStretch(1) self.data_collection_options_group.setLayout(data_collection_options_layout) - def sensor_data_callback(self): - super().sensor_data_callback() - with self.lock: - self.calibrator.set_camera_info(self.camera_info_tmp.k, self.camera_info_tmp.d) - def tf_source_callback(self, source): super().tf_source_callback(source) self.update_calibration_status() + def camera_info_source_callback(self, source): + super().camera_info_source_callback(source) + with self.lock: + if self.source_camera_info is not None: + self.calibrator.set_camera_info( + self.source_camera_info.k, + self.source_camera_info.d, + ) + def save_calibration_callback(self): output_folder = QFileDialog.getExistingDirectory( None, @@ -372,6 +370,32 @@ def save_calibration_callback(self): if self.optimized_camera_info is not None: d = message_to_ordereddict(self.optimized_camera_info) + d = {} + d["image_width"] = self.optimized_camera_info.width + d["image_height"] = self.optimized_camera_info.height + d["camera_name"] = self.optimized_camera_info.header.frame_id.split("/")[0] # TEMP + d["camera_matrix"] = { + "rows": 3, + "cols": 3, + "data": np.array(self.optimized_camera_info.k).reshape(-1).tolist(), + } + d["distortion_model"] = self.optimized_camera_info.distortion_model + d["distortion_coefficients"] = { + "rows": 1, + "cols": len(self.optimized_camera_info.d), + "data": np.array(self.optimized_camera_info.d).reshape(-1).tolist(), + } + d["projection_matrix"] = { + "rows": 3, + "cols": 4, + "data": np.array(self.optimized_camera_info.p).reshape(-1).tolist(), + } + d["rectification_matrix"] = { + "rows": 3, + "cols": 3, + "data": np.array(self.optimized_camera_info.r).reshape(-1).tolist(), + } + with open(os.path.join(output_folder, "optimized_camera_info.yaml"), "w") as f: yaml.dump(d, f, sort_keys=False) @@ -474,7 +498,7 @@ def load_calibration_callback(self): ) def calibration_callback(self): - if self.camera_info is None: + if self.source_camera_info is None: return # Configure the calibrator @@ -504,7 +528,10 @@ def calibration_callback(self): self.calibrated_transform = transform - self.tf_source_callback(self.tf_source_combobox.currentData()) + # Force update + self.update_calibration_status() + if self.tf_source_combobox.currentData() == "calibrator": + self.tf_source_callback("calibrator") self.calibration_api_button.setEnabled( self.calibration_api_request_received and self.calibrated_transform is not None diff --git a/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py b/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py index 01543900..8f88a721 100644 --- a/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py +++ b/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py @@ -282,10 +282,17 @@ def camera_info_callback(self, camera_info_msg: CameraInfo): self.camera_info = camera_info_msg if self.use_rectified: - self.camera_info.k[0] = self.camera_info.p[0] - self.camera_info.k[2] = self.camera_info.p[2] - self.camera_info.k[4] = self.camera_info.p[5] - self.camera_info.k[5] = self.camera_info.p[6] + self.camera_info.k = [ + self.camera_info.p[0], + 0.0, + self.camera_info.p[2], + 0.0, + self.camera_info.p[5], + self.camera_info.p[6], + 0.0, + 0.0, + 1.0, + ] self.camera_info.d = 0.0 * self.camera_info.d def check_sync(self): diff --git a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py index 3349ceb4..235da6a2 100644 --- a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py +++ b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py @@ -36,6 +36,7 @@ from PySide2.QtWidgets import QVBoxLayout from PySide2.QtWidgets import QWidget import numpy as np +from sensor_msgs.msg import CameraInfo from tier4_calibration_views.image_view import CustomQGraphicsView from tier4_calibration_views.image_view import ImageView from tier4_calibration_views.image_view_ros_interface import ImageViewRosInterface @@ -74,12 +75,15 @@ def __init__(self, ros_interface: ImageViewRosInterface): self.external_object_calibration_points_tmp = None self.external_image_calibration_points_tmp = None self.pixmap_tmp = None - self.camera_info_tmp = None + self.message_camera_info_tmp = None self.pointcloud_tmp = None self.delay_tmp = None # Calibration variables - self.camera_info = None + self.message_camera_info = None + self.optimized_camera_info = None + self.source_camera_info = None + self.initial_transform = None self.current_transform = None self.calibrated_transform = None @@ -348,10 +352,29 @@ def tf_source_index_callback(index): self.tf_source_status_text.setReadOnly(True) self.tf_source_status_text.setPlainText("TFs not available") + camera_info_source_label = QLabel("Camera info source:") + self.camera_info_source_combobox = QComboBox() + self.camera_info_source_combobox.addItem("ROS topic", "message") + self.camera_info_source_combobox.addItem("Load from File", "file") + self.camera_info_source_combobox.setCurrentIndex(0) + + def camera_info_source_index_callback(index): + if index != -1: + self.camera_info_source_callback(self.camera_info_source_combobox.itemData(index)) + + self.camera_info_source_combobox.activated.connect(camera_info_source_index_callback) + + self.camera_info_source_status_text = QPlainTextEdit() + self.camera_info_source_status_text.setReadOnly(True) + self.camera_info_source_status_text.setPlainText("Camera info not available") + source_options_layout = QVBoxLayout() source_options_layout.addWidget(tf_source_label) source_options_layout.addWidget(self.tf_source_combobox) source_options_layout.addWidget(self.tf_source_status_text) + source_options_layout.addWidget(camera_info_source_label) + source_options_layout.addWidget(self.camera_info_source_combobox) + source_options_layout.addWidget(self.camera_info_source_status_text) self.source_options_group.setLayout(source_options_layout) @@ -435,12 +458,12 @@ def tf_source_callback(self, source): status_text = ( f"{self.ros_interface.image_frame}\n" f"-> {self.ros_interface.lidar_frame}:\n" - f"x: {source_xyz[0]:.6f}\n" - f"y: {source_xyz[1]:.6f}\n" - f"z: {source_xyz[2]:.6f}\n" - f"roll: {source_rpy[0]:.6f}\n" - f"pitch: {source_rpy[1]:.6f}\n" - f"yaw: {source_rpy[2]:.6f}\n" + f"x: {round(source_xyz[0], 6)}\n" + f"y: {round(source_xyz[1], 6)}\n" + f"z: {round(source_xyz[2], 6)}\n" + f"roll: {round(source_rpy[0], 6)}\n" + f"pitch: {round(source_rpy[1], 6)}\n" + f"yaw: {round(source_rpy[2], 6)}\n" ) self.tf_source_status_text.setPlainText(status_text) @@ -457,15 +480,71 @@ def tf_source_callback(self, source): status_text += ( f"\n{self.ros_interface.parent_frame}\n" f"-> {self.ros_interface.child_frame}:\n" - f"x: {postprocessed_xyz[0]:.6f}\n" - f"y: {postprocessed_xyz[1]:.6f}\n" - f"z: {postprocessed_xyz[2]:.6f}\n" - f"roll: {postprocessed_rpy[0]:.6f}\n" - f"pitch: {postprocessed_rpy[1]:.6f}\n" - f"yaw: {postprocessed_rpy[2]:.6f}\n" + f"x: {round(postprocessed_xyz[0], 6)}\n" + f"y: {round(postprocessed_xyz[1], 6)}\n" + f"z: {round(postprocessed_xyz[2], 6)}\n" + f"roll: {round(postprocessed_rpy[0], 6)}\n" + f"pitch: {round(postprocessed_rpy[1], 6)}\n" + f"yaw: {round(postprocessed_rpy[2], 6)}\n" ) self.tf_source_status_text.setPlainText(status_text) + def camera_info_source_callback(self, source): + if source == "message": + # # If we add item "message" only after the message arrives, then we can: + # assert self.message_camera_info is not None + if self.message_camera_info is None: + return + self.source_camera_info = self.message_camera_info + elif source == "calibrator": + assert self.optimized_camera_info is not None + self.source_camera_info = self.optimized_camera_info + elif source == "file": + filename, _ = QFileDialog.getOpenFileName( + self, "Open camera info File", ".", "YAML files (*.yaml)" + ) + if len(filename) == 0: + return + try: + with open(filename, "r") as f: + data = yaml.safe_load(f) + + camera_info = CameraInfo() + camera_info.width = data["image_width"] + camera_info.height = data["image_height"] + camera_info.distortion_model = data["distortion_model"] + + camera_info.d = data["distortion_coefficients"]["data"] + camera_info.k = data["camera_matrix"]["data"] + camera_info.p = data["projection_matrix"]["data"] + camera_info.r = data["rectification_matrix"]["data"] + + self.source_camera_info = camera_info + except Exception as ex: + self.ros_interface.get_logger().error( + f"Could not load valid CameraInfo from {filename}. {ex}" + ) + return + else: + raise NotImplementedError + + self.image_view.set_camera_info(self.source_camera_info.k, self.source_camera_info.d) + self.image_view.update() + self.graphics_view.update() + + # update status text + K = self.source_camera_info.k + D_str = ", ".join([f"{round(p, 6)}" for p in self.source_camera_info.d]) + status_text = ( + f"D: [{D_str}]\n" + f"K: [\n" + f" {round(K[0], 6)}, {round(K[1], 6)}, {round(K[2], 6)},\n" + f" {round(K[3], 6)}, {round(K[4], 6)}, {round(K[5], 6)},\n" + f" {round(K[6], 6)}, {round(K[7], 6)}, {round(K[8], 6)},\n" + f"]\n" + ) + self.camera_info_source_status_text.setPlainText(status_text) + def sensor_data_ros_callback(self, img, camera_info, pointcloud, delay): # This method is executed in the ROS spin thread with self.lock: @@ -477,7 +556,7 @@ def sensor_data_ros_callback(self, img, camera_info, pointcloud, delay): self.pixmap_tmp = QPixmap(q_img) self.pointcloud_tmp = pointcloud - self.camera_info_tmp = camera_info + self.message_camera_info_tmp = camera_info self.delay_tmp = delay self.sensor_data_signal.emit() @@ -508,8 +587,12 @@ def sensor_data_callback(self): self.image_view.set_pixmap(self.pixmap_tmp) self.image_view.set_pointcloud(self.pointcloud_tmp) - self.camera_info = self.camera_info_tmp - self.image_view.set_camera_info(self.camera_info_tmp.k, self.camera_info_tmp.d) + self.message_camera_info = self.message_camera_info_tmp + + # Force update on the message source case + source = self.camera_info_source_combobox.currentData() + if "message" == source: + self.camera_info_source_callback(source) self.image_view.update() self.graphics_view.update() @@ -536,7 +619,7 @@ def transform_callback(self): self.image_view.update() - changed = (self.transform_tmp != self.current_transform).any() + is_changed = (self.transform_tmp != self.current_transform).any() self.current_transform = np.copy(self.transform_tmp) # if not selected, switch to "initial" and force update @@ -546,7 +629,7 @@ def transform_callback(self): # Force update on the current /tf case source = self.tf_source_combobox.currentData() - if "current" == source and changed: + if "current" == source and is_changed: self.tf_source_callback(source) def image_click_callback(self, x, y): From 958d39395d24542f4d339b50f8ac61ffa7ee720f Mon Sep 17 00:00:00 2001 From: Taeseung Sohn Date: Fri, 28 Nov 2025 10:42:58 +0900 Subject: [PATCH 07/20] spells and text displays --- .../tier4_calibration_views/image_view_ui.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py index 235da6a2..6dc09db4 100644 --- a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py +++ b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py @@ -345,7 +345,7 @@ def tf_source_index_callback(index): self.tf_source_callback(self.tf_source_combobox.itemData(index)) # `activated` is favored over `currentTextChanged`; - # reselecting "file" should show file dialog regardless of the current selection + # selecting "file" again should show file dialog regardless of the current selection self.tf_source_combobox.activated.connect(tf_source_index_callback) self.tf_source_status_text = QPlainTextEdit() @@ -614,8 +614,8 @@ def transform_callback(self): self.initial_transform = np.copy(self.transform_tmp) self.current_transform = self.initial_transform - self.tf_source_combobox.addItem("Initial /tf", "initial") - self.tf_source_combobox.addItem("Current /tf", "current") + self.tf_source_combobox.addItem("ROS topic (initial)", "initial") + self.tf_source_combobox.addItem("ROS topic (current)", "current") self.image_view.update() From 5201af80baa01f9a6709b25e9754b2a62d29720b Mon Sep 17 00:00:00 2001 From: Taeseung Sohn Date: Mon, 1 Dec 2025 14:44:35 +0900 Subject: [PATCH 08/20] support .json inputs and validations --- .../tier4_calibration_views/image_view_ui.py | 209 +++++++++++++----- 1 file changed, 156 insertions(+), 53 deletions(-) diff --git a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py index 6dc09db4..585a17a1 100644 --- a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py +++ b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py @@ -15,6 +15,7 @@ # limitations under the License. import copy +import json import threading from PySide2.QtCore import Qt @@ -44,6 +45,125 @@ import yaml +def load_transform_from_yaml(yaml_data, parent_frame: str, child_frame: str): + entry = yaml_data[parent_frame][child_frame] + + x = float(entry.get("x")) + y = float(entry.get("y")) + z = float(entry.get("z")) + + if all(key in entry for key in ("roll", "pitch", "yaw")): + roll = float(entry["roll"]) + pitch = float(entry["pitch"]) + yaw = float(entry["yaw"]) + rot = transforms3d.euler.euler2mat(roll, pitch, yaw) + elif all(key in entry for key in ("qx", "qy", "qz", "qw")): + qx = float(entry["qx"]) + qy = float(entry["qy"]) + qz = float(entry["qz"]) + qw = float(entry["qw"]) + rot = transforms3d.quaternions.quat2mat((qw, qx, qy, qz)) + else: + rot = np.eye(3) + + mat = np.eye(4) + mat[0:3, 0:3] = rot + mat[0:3, 3] = [x, y, z] + + return mat + + +def load_transform_from_json(json_data, parent_frame: str, child_frame: str): + if ( + json_data["header"]["frame_id"] != parent_frame + or json_data["child_frame_id"] != child_frame + ): + raise KeyError( + f"Expected transform from {parent_frame} to {child_frame}, but got from {json_data['header']['frame_id']} to {json_data['child_frame_id']}" + ) + + translation = json_data["transform"]["translation"] + rotation = json_data["transform"]["rotation"] + + x = translation["x"] + y = translation["y"] + z = translation["z"] + + qx = rotation["x"] + qy = rotation["y"] + qz = rotation["z"] + qw = rotation["w"] + rot = transforms3d.quaternions.quat2mat((qw, qx, qy, qz)) + + mat = np.eye(4) + mat[0:3, 0:3] = rot + mat[0:3, 3] = [x, y, z] + + return mat + + +def load_camera_info_from_yaml(yaml_data) -> CameraInfo: + camera_info = CameraInfo() + camera_info.width = yaml_data["image_width"] + camera_info.height = yaml_data["image_height"] + camera_info.distortion_model = yaml_data["distortion_model"] + + camera_info.d = yaml_data["distortion_coefficients"]["data"] + camera_info.k = yaml_data["camera_matrix"]["data"] + camera_info.p = yaml_data["projection_matrix"]["data"] + camera_info.r = yaml_data["rectification_matrix"]["data"] + + return camera_info + + +def load_camera_info_from_json(json_data) -> CameraInfo: + camera_info = CameraInfo() + + camera_info.header.frame_id = json_data["header"]["frame_id"] + camera_info.width = json_data["width"] + camera_info.height = json_data["height"] + camera_info.distortion_model = json_data["distortion_model"] + camera_info.d = json_data["d"] + camera_info.k = json_data["k"] + camera_info.p = json_data["p"] + camera_info.r = json_data["r"] + + return camera_info + + +def validate_camera_info(camera_info: CameraInfo) -> bool: + if camera_info is None: + return False + + if len(camera_info.k) != 9: + return False + if camera_info.k[0] <= 0 or camera_info.k[4] <= 0: + # fx, fy should be positive + return False + if ( + camera_info.k[1] != 0 + or camera_info.k[3] != 0 + or camera_info.k[6] != 0 + or camera_info.k[7] != 0 + ): + # skew and other parameters should be zero + return False + if len(camera_info.d) not in [4, 5, 8, 12, 14]: + return False + if len(camera_info.r) != 9: + return False + if len(camera_info.p) != 12: + return False + if camera_info.distortion_model not in [ + "plumb_bob", + "rational_polynomial", + ]: + return False + if camera_info.width <= 0 or camera_info.height <= 0: + return False + return True + + class ImageViewUI(QMainWindow): sensor_data_signal = Signal() sensor_data_delay_signal = Signal(float) @@ -389,56 +509,36 @@ def tf_source_callback(self, source): assert self.calibrated_transform is not None self.source_transform = self.calibrated_transform elif source == "file": - # Reads TF in yaml format, regardless of the choice: + # Reads TF regardless of the format: + # - yaml or json (output from previous versions) # - xyz + quaternion or xyz + rpy # - preprocessed or non-preprocessed entry filename, _ = QFileDialog.getOpenFileName( - self, "Open TF File", ".", "YAML files (*.yaml)" + self, "Open TF File", ".", "YAML file (*.yaml);;JSON file (*.json)" ) if len(filename) == 0: return try: - with open(filename, "r") as f: - data = yaml.safe_load(f) + file_to_data, load_transform = ( + (json.load, load_transform_from_json) + if filename.endswith(".json") + else (yaml.safe_load, load_transform_from_yaml) + ) - entry = None - is_preprocessed_entry = False + with open(filename, "r") as f: + data = file_to_data(f) try: - entry = data[self.ros_interface.parent_frame][ - self.ros_interface.child_frame - ] - is_preprocessed_entry = True + mat = load_transform( + data, self.ros_interface.parent_frame, self.ros_interface.child_frame + ) + self.source_transform = self.ros_interface.get_image_to_lidar_transform(mat) except KeyError: - # Fallback to non-preprocessed entry. + # Fallback to non-postprocessed entry. # If this fails, silently return. - entry = data[self.ros_interface.image_frame][self.ros_interface.lidar_frame] - - x = float(entry.get("x")) - y = float(entry.get("y")) - z = float(entry.get("z")) - - if all(key in entry for key in ("roll", "pitch", "yaw")): - roll = float(entry["roll"]) - pitch = float(entry["pitch"]) - yaw = float(entry["yaw"]) - rot = transforms3d.euler.euler2mat(roll, pitch, yaw) - elif all(key in entry for key in ("qx", "qy", "qz", "qw")): - qx = float(entry["qx"]) - qy = float(entry["qy"]) - qz = float(entry["qz"]) - qw = float(entry["qw"]) - rot = transforms3d.quaternions.quat2mat((qw, qx, qy, qz)) - else: - return - - mat = np.eye(4) - mat[0:3, 0:3] = rot - mat[0:3, 3] = [x, y, z] - - if is_preprocessed_entry: - self.source_transform = self.ros_interface.get_image_to_lidar_transform(mat) - else: + mat = load_transform( + data, self.ros_interface.image_frame, self.ros_interface.lidar_frame + ) self.source_transform = mat except Exception as ex: self.ros_interface.get_logger().error( @@ -491,33 +591,36 @@ def tf_source_callback(self, source): def camera_info_source_callback(self, source): if source == "message": - # # If we add item "message" only after the message arrives, then we can: - # assert self.message_camera_info is not None - if self.message_camera_info is None: + if not validate_camera_info(self.message_camera_info): return self.source_camera_info = self.message_camera_info elif source == "calibrator": - assert self.optimized_camera_info is not None + if not validate_camera_info(self.optimized_camera_info): + return self.source_camera_info = self.optimized_camera_info elif source == "file": + # Reads TF regardless of the format: + # - yaml or json (output from previous versions) + # - xyz + quaternion or xyz + rpy + # - preprocessed or non-preprocessed entry filename, _ = QFileDialog.getOpenFileName( - self, "Open camera info File", ".", "YAML files (*.yaml)" + self, "Open camera info File", ".", "YAML file (*.yaml);;JSON file (*.json)" ) if len(filename) == 0: return try: - with open(filename, "r") as f: - data = yaml.safe_load(f) + file_to_data, load_camera_info = ( + (json.load, load_camera_info_from_json) + if filename.endswith(".json") + else (yaml.safe_load, load_camera_info_from_yaml) + ) - camera_info = CameraInfo() - camera_info.width = data["image_width"] - camera_info.height = data["image_height"] - camera_info.distortion_model = data["distortion_model"] + with open(filename, "r") as f: + data = file_to_data(f) + camera_info = load_camera_info(data) - camera_info.d = data["distortion_coefficients"]["data"] - camera_info.k = data["camera_matrix"]["data"] - camera_info.p = data["projection_matrix"]["data"] - camera_info.r = data["rectification_matrix"]["data"] + if not validate_camera_info(camera_info): + raise ValueError("Invalid CameraInfo data") self.source_camera_info = camera_info except Exception as ex: From 4df5595c9c6d58e976b6c10d7dfe64a70f709ece Mon Sep 17 00:00:00 2001 From: Taeseung Sohn Date: Mon, 1 Dec 2025 15:22:33 +0900 Subject: [PATCH 09/20] add log on optimized camera info obtained --- .../interactive_calibrator.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py index ddde3eef..6f09a499 100644 --- a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py +++ b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py @@ -200,6 +200,10 @@ def calibration_intrinsics_callback(): self.source_camera_info, ) + self.ros_interface.get_logger().info( + f"Optimized camera intrinsics obtained: K = {self.optimized_camera_info.k}, D = {self.optimized_camera_info.d}" + ) + if is_init: self.camera_info_source_combobox.insertItem( 0, "Calibrator", "calibrator" From 1e1e5b8680d03297ef9f2893c217a2cfe8488b7f Mon Sep 17 00:00:00 2001 From: Shumpei Wakabayashi <42209144+shmpwk@users.noreply.github.com> Date: Tue, 2 Dec 2025 09:06:09 +0900 Subject: [PATCH 10/20] Update sensor_calibration_manager/launch/xx1/tag_based_pnp_calibrator.launch.xml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../launch/xx1/tag_based_pnp_calibrator.launch.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/sensor_calibration_manager/launch/xx1/tag_based_pnp_calibrator.launch.xml b/sensor_calibration_manager/launch/xx1/tag_based_pnp_calibrator.launch.xml index f3efa250..f836c181 100644 --- a/sensor_calibration_manager/launch/xx1/tag_based_pnp_calibrator.launch.xml +++ b/sensor_calibration_manager/launch/xx1/tag_based_pnp_calibrator.launch.xml @@ -22,7 +22,6 @@ - From 615356af7473735e759bb151568fdb0e9ed091a2 Mon Sep 17 00:00:00 2001 From: Shumpei Wakabayashi <42209144+shmpwk@users.noreply.github.com> Date: Tue, 2 Dec 2025 09:10:40 +0900 Subject: [PATCH 11/20] remove duplicated lidar_frame Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../launch/rdv/tag_based_pnp_calibrator.launch.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/sensor_calibration_manager/launch/rdv/tag_based_pnp_calibrator.launch.xml b/sensor_calibration_manager/launch/rdv/tag_based_pnp_calibrator.launch.xml index 465e4736..18218893 100644 --- a/sensor_calibration_manager/launch/rdv/tag_based_pnp_calibrator.launch.xml +++ b/sensor_calibration_manager/launch/rdv/tag_based_pnp_calibrator.launch.xml @@ -22,7 +22,6 @@ - From ab97e014584cf8e1efa68f316dc5378c5f49b30c Mon Sep 17 00:00:00 2001 From: Taeseung Sohn Date: Tue, 2 Dec 2025 09:19:12 +0900 Subject: [PATCH 12/20] Update calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../interactive_calibrator.py | 1 - 1 file changed, 1 deletion(-) diff --git a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py index 6f09a499..df1b0454 100644 --- a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py +++ b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py @@ -372,7 +372,6 @@ def save_calibration_callback(self): np.savetxt(os.path.join(output_folder, "image_points.txt"), image_points) if self.optimized_camera_info is not None: - d = message_to_ordereddict(self.optimized_camera_info) d = {} d["image_width"] = self.optimized_camera_info.width From d4768f26ee70d8172f2d417502b49e445d42c93c Mon Sep 17 00:00:00 2001 From: Taeseung Sohn Date: Tue, 2 Dec 2025 09:19:42 +0900 Subject: [PATCH 13/20] Update calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../interactive_calibrator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py index df1b0454..8a3aebb8 100644 --- a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py +++ b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py @@ -539,7 +539,7 @@ def calibration_callback(self): self.calibration_api_button.setEnabled( self.calibration_api_request_received and self.calibrated_transform is not None ) - # pass tf so that ros interface can publish if allowed + # Pass tf so that ROS interface can publish if allowed. self.ros_interface.set_camera_lidar_transform(transform) def update_calibration_status(self): From ad488bc7488efd256564c930f6cafb49bb2a5dec Mon Sep 17 00:00:00 2001 From: Taeseung Sohn Date: Tue, 2 Dec 2025 09:22:55 +0900 Subject: [PATCH 14/20] Update common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../tier4_calibration_views/image_view_ui.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py index 585a17a1..64657108 100644 --- a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py +++ b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py @@ -643,8 +643,7 @@ def camera_info_source_callback(self, source): f"K: [\n" f" {round(K[0], 6)}, {round(K[1], 6)}, {round(K[2], 6)},\n" f" {round(K[3], 6)}, {round(K[4], 6)}, {round(K[5], 6)},\n" - f" {round(K[6], 6)}, {round(K[7], 6)}, {round(K[8], 6)},\n" - f"]\n" + f" {round(K[6], 6)}, {round(K[7], 6)}, {round(K[8], 6)}]\n" ) self.camera_info_source_status_text.setPlainText(status_text) From 82aa24626e82ca5264fc5085adf32e5370831f64 Mon Sep 17 00:00:00 2001 From: Taeseung Sohn Date: Tue, 2 Dec 2025 09:38:31 +0900 Subject: [PATCH 15/20] update K matrix printing format --- .../tier4_calibration_views/image_view_ui.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py index 64657108..b04773de 100644 --- a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py +++ b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py @@ -640,10 +640,9 @@ def camera_info_source_callback(self, source): D_str = ", ".join([f"{round(p, 6)}" for p in self.source_camera_info.d]) status_text = ( f"D: [{D_str}]\n" - f"K: [\n" - f" {round(K[0], 6)}, {round(K[1], 6)}, {round(K[2], 6)},\n" - f" {round(K[3], 6)}, {round(K[4], 6)}, {round(K[5], 6)},\n" - f" {round(K[6], 6)}, {round(K[7], 6)}, {round(K[8], 6)}]\n" + f"K: [{round(K[0], 6)}, {round(K[1], 6)}, {round(K[2], 6)},\n" + f" {round(K[3], 6)}, {round(K[4], 6)}, {round(K[5], 6)},\n" + f" {round(K[6], 6)}, {round(K[7], 6)}, {round(K[8], 6)}]\n" ) self.camera_info_source_status_text.setPlainText(status_text) From 260cdfa917b252b2e29878c6ae416667e8fffc1f Mon Sep 17 00:00:00 2001 From: Taeseung Sohn Date: Tue, 2 Dec 2025 09:57:25 +0900 Subject: [PATCH 16/20] pre-commit --- .../interactive_calibrator.py | 1 - 1 file changed, 1 deletion(-) diff --git a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py index 8a3aebb8..277a91f0 100644 --- a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py +++ b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py @@ -39,7 +39,6 @@ from interactive_camera_lidar_calibrator.utils import camera_lidar_calibrate_intrinsics import numpy as np import rclpy -from rosidl_runtime_py.convert import message_to_ordereddict from tier4_calibration_views.image_view_ui import ImageViewUI import transforms3d import yaml From 0143b675c7cd72147d94ce3eaa99b137776921a4 Mon Sep 17 00:00:00 2001 From: Taeseung Sohn Date: Tue, 2 Dec 2025 10:08:00 +0900 Subject: [PATCH 17/20] explicit keyerror instead of silent identity matrix --- .../tier4_calibration_views/image_view_ui.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py index b04773de..9de24fde 100644 --- a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py +++ b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py @@ -64,7 +64,9 @@ def load_transform_from_yaml(yaml_data, parent_frame: str, child_frame: str): qw = float(entry["qw"]) rot = transforms3d.quaternions.quat2mat((qw, qx, qy, qz)) else: - rot = np.eye(3) + raise KeyError( + f"Missing rotation parameters: expected either roll/pitch/yaw or qx/qy/qz/qw." + ) mat = np.eye(4) mat[0:3, 0:3] = rot From 2ac84a07a494c73eb949bbee4ab0bb0dc91d11d6 Mon Sep 17 00:00:00 2001 From: Taeseung Sohn Date: Tue, 2 Dec 2025 10:14:03 +0900 Subject: [PATCH 18/20] add explicit camera_name ros parameter --- .../interactive_calibrator.py | 3 +-- .../tier4_calibration_views/image_view_ros_interface.py | 2 ++ .../launch/rdv/tag_based_pnp_calibrator.launch.xml | 2 ++ .../launch/x2/tag_based_pnp_calibrator.launch.xml | 2 ++ .../launch/xx1/tag_based_pnp_calibrator.launch.xml | 2 ++ .../launch/xx1_15/tag_based_pnp_calibrator.launch.xml | 2 ++ .../launch/xx1_gen2/tag_based_pnp_calibrator.launch.xml | 2 ++ 7 files changed, 13 insertions(+), 2 deletions(-) diff --git a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py index 277a91f0..7aa60c78 100644 --- a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py +++ b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py @@ -371,11 +371,10 @@ def save_calibration_callback(self): np.savetxt(os.path.join(output_folder, "image_points.txt"), image_points) if self.optimized_camera_info is not None: - d = {} d["image_width"] = self.optimized_camera_info.width d["image_height"] = self.optimized_camera_info.height - d["camera_name"] = self.optimized_camera_info.header.frame_id.split("/")[0] # TEMP + d["camera_name"] = self.ros_interface.camera_name d["camera_matrix"] = { "rows": 3, "cols": 3, diff --git a/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py b/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py index 8f88a721..3f3a11b1 100644 --- a/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py +++ b/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py @@ -62,6 +62,7 @@ def __init__(self, node_name="image_view"): self.declare_parameter("parent_frame", "") self.declare_parameter("child_frame", "") self.declare_parameter("should_reverse_transform", False) + self.declare_parameter("camera_name", "camera") self.use_rectified = self.get_parameter("use_rectified").get_parameter_value().bool_value self.use_compressed = self.get_parameter("use_compressed").get_parameter_value().bool_value @@ -77,6 +78,7 @@ def __init__(self, node_name="image_view"): self.should_reverse_transform = ( self.get_parameter("should_reverse_transform").get_parameter_value().bool_value ) + self.camera_name = self.get_parameter("camera_name").get_parameter_value().string_value # Data self.pointcloud_queue: Deque[PointCloud2] = deque([], 5) diff --git a/sensor_calibration_manager/launch/rdv/tag_based_pnp_calibrator.launch.xml b/sensor_calibration_manager/launch/rdv/tag_based_pnp_calibrator.launch.xml index 18218893..e21847f2 100644 --- a/sensor_calibration_manager/launch/rdv/tag_based_pnp_calibrator.launch.xml +++ b/sensor_calibration_manager/launch/rdv/tag_based_pnp_calibrator.launch.xml @@ -67,6 +67,8 @@ + + diff --git a/sensor_calibration_manager/launch/x2/tag_based_pnp_calibrator.launch.xml b/sensor_calibration_manager/launch/x2/tag_based_pnp_calibrator.launch.xml index fca4fd17..9e99c377 100644 --- a/sensor_calibration_manager/launch/x2/tag_based_pnp_calibrator.launch.xml +++ b/sensor_calibration_manager/launch/x2/tag_based_pnp_calibrator.launch.xml @@ -91,6 +91,8 @@ + + diff --git a/sensor_calibration_manager/launch/xx1/tag_based_pnp_calibrator.launch.xml b/sensor_calibration_manager/launch/xx1/tag_based_pnp_calibrator.launch.xml index f836c181..4fde263d 100644 --- a/sensor_calibration_manager/launch/xx1/tag_based_pnp_calibrator.launch.xml +++ b/sensor_calibration_manager/launch/xx1/tag_based_pnp_calibrator.launch.xml @@ -67,6 +67,8 @@ + + diff --git a/sensor_calibration_manager/launch/xx1_15/tag_based_pnp_calibrator.launch.xml b/sensor_calibration_manager/launch/xx1_15/tag_based_pnp_calibrator.launch.xml index 5d5ee875..87190d40 100644 --- a/sensor_calibration_manager/launch/xx1_15/tag_based_pnp_calibrator.launch.xml +++ b/sensor_calibration_manager/launch/xx1_15/tag_based_pnp_calibrator.launch.xml @@ -70,6 +70,8 @@ + + diff --git a/sensor_calibration_manager/launch/xx1_gen2/tag_based_pnp_calibrator.launch.xml b/sensor_calibration_manager/launch/xx1_gen2/tag_based_pnp_calibrator.launch.xml index f40121af..29da94cc 100644 --- a/sensor_calibration_manager/launch/xx1_gen2/tag_based_pnp_calibrator.launch.xml +++ b/sensor_calibration_manager/launch/xx1_gen2/tag_based_pnp_calibrator.launch.xml @@ -73,6 +73,8 @@ + + From 6e7ffd11e2bc586d7b546adebcb4dec1af827386 Mon Sep 17 00:00:00 2001 From: Taeseung Sohn Date: Tue, 2 Dec 2025 10:16:06 +0900 Subject: [PATCH 19/20] remove unnecessary f-string --- .../tier4_calibration_views/image_view_ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py index 9de24fde..3f774b05 100644 --- a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py +++ b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py @@ -65,7 +65,7 @@ def load_transform_from_yaml(yaml_data, parent_frame: str, child_frame: str): rot = transforms3d.quaternions.quat2mat((qw, qx, qy, qz)) else: raise KeyError( - f"Missing rotation parameters: expected either roll/pitch/yaw or qx/qy/qz/qw." + "Missing rotation parameters: expected either roll/pitch/yaw or qx/qy/qz/qw." ) mat = np.eye(4) From 15f6343bad4bcc1bc58cc4de04f8b60e5ef9e5d7 Mon Sep 17 00:00:00 2001 From: Taeseung Sohn Date: Tue, 2 Dec 2025 12:27:34 +0900 Subject: [PATCH 20/20] enhance file load failure debugging --- .../interactive_calibrator.py | 56 ++--- .../image_view_ros_interface.py | 211 +++++++++--------- .../tier4_calibration_views/image_view_ui.py | 43 ++-- 3 files changed, 157 insertions(+), 153 deletions(-) diff --git a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py index 7aa60c78..37321096 100644 --- a/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py +++ b/calibrators/interactive_camera_lidar_calibrator/interactive_camera_lidar_calibrator/interactive_calibrator.py @@ -429,34 +429,36 @@ def save_calibration_callback(self): yaml.dump(calibrated_d, f, sort_keys=False) # postprocess the calibrated transform - postprocessed_transform = self.ros_interface.get_parent_to_child_transform( - self.calibrated_transform - ) - if postprocessed_transform is None: - return + try: + postprocessed_transform = self.ros_interface.get_parent_to_child_transform( + self.calibrated_transform + ) - postprocessed_tf = { - "x": postprocessed_transform[0, 3].item(), - "y": postprocessed_transform[1, 3].item(), - "z": postprocessed_transform[2, 3].item(), - } - if use_rpy: - rpy = transforms3d.euler.mat2euler(postprocessed_transform[0:3, 0:3]) - postprocessed_tf["roll"] = rpy[0] - postprocessed_tf["pitch"] = rpy[1] - postprocessed_tf["yaw"] = rpy[2] - else: - quat = transforms3d.quaternions.mat2quat(postprocessed_transform[0:3, 0:3]) - postprocessed_tf["qx"] = quat[1] - postprocessed_tf["qy"] = quat[2] - postprocessed_tf["qz"] = quat[3] - postprocessed_tf["qw"] = quat[0] - - postprocessed_d = { - self.ros_interface.parent_frame: {self.ros_interface.child_frame: postprocessed_tf} - } - with open(os.path.join(output_folder, "tf_postprocessed.yaml"), "w") as f: - yaml.dump(postprocessed_d, f, sort_keys=False) + postprocessed_tf = { + "x": postprocessed_transform[0, 3].item(), + "y": postprocessed_transform[1, 3].item(), + "z": postprocessed_transform[2, 3].item(), + } + if use_rpy: + rpy = transforms3d.euler.mat2euler(postprocessed_transform[0:3, 0:3]) + postprocessed_tf["roll"] = rpy[0] + postprocessed_tf["pitch"] = rpy[1] + postprocessed_tf["yaw"] = rpy[2] + else: + quat = transforms3d.quaternions.mat2quat(postprocessed_transform[0:3, 0:3]) + postprocessed_tf["qx"] = quat[1] + postprocessed_tf["qy"] = quat[2] + postprocessed_tf["qz"] = quat[3] + postprocessed_tf["qw"] = quat[0] + + postprocessed_d = { + self.ros_interface.parent_frame: {self.ros_interface.child_frame: postprocessed_tf} + } + with open(os.path.join(output_folder, "tf_postprocessed.yaml"), "w") as f: + yaml.dump(postprocessed_d, f, sort_keys=False) + except Exception as ex: + self.ros_interface.get_logger().error(f"Could not save postprocessed TF. {ex}") + return def load_calibration_callback(self): input_dir = QFileDialog.getExistingDirectory( diff --git a/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py b/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py index 3f3a11b1..5152a396 100644 --- a/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py +++ b/common/tier4_calibration_views/tier4_calibration_views/image_view_ros_interface.py @@ -80,6 +80,15 @@ def __init__(self, node_name="image_view"): ) self.camera_name = self.get_parameter("camera_name").get_parameter_value().string_value + for frame_key, frame_name in [ + ("image_frame", self.image_frame), + ("lidar_frame", self.lidar_frame), + ("parent_frame", self.parent_frame), + ("child_frame", self.child_frame), + ]: + if frame_name == "": + self.get_logger().warn(f"{frame_key} is not set") + # Data self.pointcloud_queue: Deque[PointCloud2] = deque([], 5) self.image_queue: Deque[Union[CompressedImage, Image]] = deque([], 5) @@ -126,123 +135,115 @@ def __init__(self, node_name="image_view"): self.timer = self.create_timer(self.timer_period, self.timer_callback) def get_parent_to_child_transform(self, image_to_lidar_transform): - if ( - self.image_frame == "" - or self.lidar_frame == "" - or self.parent_frame == "" - or self.child_frame == "" - ): - return None + for frame_key, frame_name in [ + ("image_frame", self.image_frame), + ("lidar_frame", self.lidar_frame), + ("parent_frame", self.parent_frame), + ("child_frame", self.child_frame), + ]: + if frame_name == "": + raise ValueError(f"{frame_key} is not set") with self.lock: - try: - if self.should_reverse_transform: - # image -> child -> parent -> lidar - child_to_image_transform = tf_message_to_transform_matrix( - self.tf_buffer.lookup_transform( - self.child_frame, - self.image_frame, - rclpy.time.Time(), - timeout=Duration(seconds=0.0), - ) - ) - parent_to_lidar_transform = tf_message_to_transform_matrix( - self.tf_buffer.lookup_transform( - self.parent_frame, - self.lidar_frame, - rclpy.time.Time(), - timeout=Duration(seconds=0.0), - ) + if self.should_reverse_transform: + # image -> child -> parent -> lidar + child_to_image_transform = tf_message_to_transform_matrix( + self.tf_buffer.lookup_transform( + self.child_frame, + self.image_frame, + rclpy.time.Time(), + timeout=Duration(seconds=0.0), ) - parent_to_child_transform = parent_to_lidar_transform @ np.linalg.inv( - child_to_image_transform @ image_to_lidar_transform - ) - return parent_to_child_transform - else: - # image -> parent -> child -> lidar - parent_to_image_transform = tf_message_to_transform_matrix( - self.tf_buffer.lookup_transform( - self.parent_frame, - self.image_frame, - rclpy.time.Time(), - timeout=Duration(seconds=0.0), - ) + ) + parent_to_lidar_transform = tf_message_to_transform_matrix( + self.tf_buffer.lookup_transform( + self.parent_frame, + self.lidar_frame, + rclpy.time.Time(), + timeout=Duration(seconds=0.0), ) - lidar_to_child_transform = tf_message_to_transform_matrix( - self.tf_buffer.lookup_transform( - self.lidar_frame, - self.child_frame, - rclpy.time.Time(), - timeout=Duration(seconds=0.0), - ) + ) + parent_to_child_transform = parent_to_lidar_transform @ np.linalg.inv( + child_to_image_transform @ image_to_lidar_transform + ) + return parent_to_child_transform + else: + # image -> parent -> child -> lidar + parent_to_image_transform = tf_message_to_transform_matrix( + self.tf_buffer.lookup_transform( + self.parent_frame, + self.image_frame, + rclpy.time.Time(), + timeout=Duration(seconds=0.0), ) - parent_to_child_transform = ( - parent_to_image_transform - @ image_to_lidar_transform - @ lidar_to_child_transform + ) + lidar_to_child_transform = tf_message_to_transform_matrix( + self.tf_buffer.lookup_transform( + self.lidar_frame, + self.child_frame, + rclpy.time.Time(), + timeout=Duration(seconds=0.0), ) - return parent_to_child_transform - except TransformException: - return None + ) + parent_to_child_transform = ( + parent_to_image_transform @ image_to_lidar_transform @ lidar_to_child_transform + ) + return parent_to_child_transform def get_image_to_lidar_transform(self, parent_to_child_transform): - if ( - self.image_frame == "" - or self.lidar_frame == "" - or self.parent_frame == "" - or self.child_frame == "" - ): - return None + for frame_key, frame_name in [ + ("image_frame", self.image_frame), + ("lidar_frame", self.lidar_frame), + ("parent_frame", self.parent_frame), + ("child_frame", self.child_frame), + ]: + if frame_name == "": + raise ValueError(f"{frame_key} is not set") with self.lock: - try: - if self.should_reverse_transform: - # image -> child -> parent -> lidar - child_to_image_transform = tf_message_to_transform_matrix( - self.tf_buffer.lookup_transform( - self.child_frame, - self.image_frame, - rclpy.time.Time(), - timeout=Duration(seconds=0.0), - ) - ) - parent_to_lidar_transform = tf_message_to_transform_matrix( - self.tf_buffer.lookup_transform( - self.parent_frame, - self.lidar_frame, - rclpy.time.Time(), - timeout=Duration(seconds=0.0), - ) - ) - image_to_lidar_transform = ( - np.linalg.inv(parent_to_child_transform @ child_to_image_transform) - @ parent_to_lidar_transform + if self.should_reverse_transform: + # image -> child -> parent -> lidar + child_to_image_transform = tf_message_to_transform_matrix( + self.tf_buffer.lookup_transform( + self.child_frame, + self.image_frame, + rclpy.time.Time(), + timeout=Duration(seconds=0.0), ) - return image_to_lidar_transform - else: - # image -> parent -> child -> lidar - image_to_parent_transform = tf_message_to_transform_matrix( - self.tf_buffer.lookup_transform( - self.image_frame, - self.parent_frame, - rclpy.time.Time(), - timeout=Duration(seconds=0.0), - ) + ) + parent_to_lidar_transform = tf_message_to_transform_matrix( + self.tf_buffer.lookup_transform( + self.parent_frame, + self.lidar_frame, + rclpy.time.Time(), + timeout=Duration(seconds=0.0), ) - child_to_lidar_transform = tf_message_to_transform_matrix( - self.tf_buffer.lookup_transform( - self.child_frame, - self.lidar_frame, - rclpy.time.Time(), - timeout=Duration(seconds=0.0), - ) + ) + image_to_lidar_transform = ( + np.linalg.inv(parent_to_child_transform @ child_to_image_transform) + @ parent_to_lidar_transform + ) + return image_to_lidar_transform + else: + # image -> parent -> child -> lidar + image_to_parent_transform = tf_message_to_transform_matrix( + self.tf_buffer.lookup_transform( + self.image_frame, + self.parent_frame, + rclpy.time.Time(), + timeout=Duration(seconds=0.0), ) - image_to_lidar_transform = ( - image_to_parent_transform - @ parent_to_child_transform - @ child_to_lidar_transform + ) + child_to_lidar_transform = tf_message_to_transform_matrix( + self.tf_buffer.lookup_transform( + self.child_frame, + self.lidar_frame, + rclpy.time.Time(), + timeout=Duration(seconds=0.0), ) - return image_to_lidar_transform - except TransformException: - return None + ) + image_to_lidar_transform = ( + image_to_parent_transform @ parent_to_child_transform @ child_to_lidar_transform + ) + return image_to_lidar_transform def set_sensor_data_callback(self, callback): with self.lock: diff --git a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py index 3f774b05..ad37c938 100644 --- a/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py +++ b/common/tier4_calibration_views/tier4_calibration_views/image_view_ui.py @@ -537,7 +537,6 @@ def tf_source_callback(self, source): self.source_transform = self.ros_interface.get_image_to_lidar_transform(mat) except KeyError: # Fallback to non-postprocessed entry. - # If this fails, silently return. mat = load_transform( data, self.ros_interface.image_frame, self.ros_interface.lidar_frame ) @@ -569,27 +568,29 @@ def tf_source_callback(self, source): ) self.tf_source_status_text.setPlainText(status_text) - # postprocess the source transform - postprocessed_transform = self.ros_interface.get_parent_to_child_transform( - self.source_transform - ) - if postprocessed_transform is None: - return + try: + # postprocess the source transform + postprocessed_transform = self.ros_interface.get_parent_to_child_transform( + self.source_transform + ) - # update (append) status text - postprocessed_xyz = postprocessed_transform[0:3, 3] - postprocessed_rpy = transforms3d.euler.mat2euler(postprocessed_transform[0:3, 0:3]) - status_text += ( - f"\n{self.ros_interface.parent_frame}\n" - f"-> {self.ros_interface.child_frame}:\n" - f"x: {round(postprocessed_xyz[0], 6)}\n" - f"y: {round(postprocessed_xyz[1], 6)}\n" - f"z: {round(postprocessed_xyz[2], 6)}\n" - f"roll: {round(postprocessed_rpy[0], 6)}\n" - f"pitch: {round(postprocessed_rpy[1], 6)}\n" - f"yaw: {round(postprocessed_rpy[2], 6)}\n" - ) - self.tf_source_status_text.setPlainText(status_text) + # update (append) status text + postprocessed_xyz = postprocessed_transform[0:3, 3] + postprocessed_rpy = transforms3d.euler.mat2euler(postprocessed_transform[0:3, 0:3]) + status_text += ( + f"\n{self.ros_interface.parent_frame}\n" + f"-> {self.ros_interface.child_frame}:\n" + f"x: {round(postprocessed_xyz[0], 6)}\n" + f"y: {round(postprocessed_xyz[1], 6)}\n" + f"z: {round(postprocessed_xyz[2], 6)}\n" + f"roll: {round(postprocessed_rpy[0], 6)}\n" + f"pitch: {round(postprocessed_rpy[1], 6)}\n" + f"yaw: {round(postprocessed_rpy[2], 6)}\n" + ) + self.tf_source_status_text.setPlainText(status_text) + except Exception as ex: + self.ros_interface.get_logger().error(f"Could not postprocess TF from {filename}. {ex}") + return def camera_info_source_callback(self, source): if source == "message":