diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml new file mode 100644 index 000000000..3de575d8e --- /dev/null +++ b/.github/workflows/python-app.yml @@ -0,0 +1,62 @@ +# This workflow will install Python dependencies, run tests and lint with a single version of Python +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python + +name: Python application + +on: + push: + branches: + - '**' + + +permissions: + contents: read + +jobs: + PEP-Guidelines: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.10 + uses: actions/setup-python@v3 + with: + python-version: "3.10" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install flake8 flake8-docstrings pep8-naming + - name: Lint with flake8 + run: | + # stop the build if there are Python syntax errors or undefined names + flake8 ./GEMstack --count --select=E9,F63,F7,F82 --show-source --statistics --exclude=__init__.py || exit 1 + # to enable more advanced checks on the repo, uncomment the lines below (There are around 3000 violations) + # flake8 ./GEMstack --ignore=D,C901,E402,E231 --count --max-complexity=10 --max-line-length=127 --statistics --exclude=__init__.py || exit 1 + # if we want to enable documentation checks, uncomment the line below + # flake8 ./GEMstack --ignore=E128,E402,E501,F401 --docstring-convention pep257 --max-line-length=120 --exclude=__init__.py || exit 1 + continue-on-error: false + + Documentation: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.10 + uses: actions/setup-python@v3 + with: + python-version: "3.10" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install sphinx sphinx-rtd-theme + - name: Generate Documentation + run: | + # stop the build if there are Python syntax errors or undefined names + sphinx-build -b html docs docs/build + - name: Save Documentation as Artifact + uses: actions/upload-artifact@v4 + with: + name: documentation + path: docs/build diff --git a/.gitignore b/.gitignore index 0c36d682f..5a0b72642 100644 --- a/.gitignore +++ b/.gitignore @@ -165,3 +165,20 @@ cython_debug/ # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ + +# ZED run files +**/*.run +.vscode/ +setup/zed_sdk.run +cuda/ +onedrive_config.json +zed_sdk.run +ZED_SDK_Ubuntu20_cuda11.8_v4.0.8.zstd.run +ros.asc + +homework/yolov8n.pt +homework/yolo11n.pt +GEMstack/knowledge/detection/yolov8n.pt +GEMstack/knowledge/detection/yolo11n.pt +yolov8n.pt +yolo11n.pt diff --git a/GEMstack/knowledge/calibration/extrinsics.npz b/GEMstack/knowledge/calibration/extrinsics.npz new file mode 100644 index 000000000..663fe14bd Binary files /dev/null and b/GEMstack/knowledge/calibration/extrinsics.npz differ diff --git a/GEMstack/knowledge/calibration/gem_e4.yaml b/GEMstack/knowledge/calibration/gem_e4.yaml index d0954dc06..146dac1be 100644 --- a/GEMstack/knowledge/calibration/gem_e4.yaml +++ b/GEMstack/knowledge/calibration/gem_e4.yaml @@ -1,7 +1,63 @@ -calibration_date: "2024-03-05" # Date of calibration YYYY-MM-DD -reference: rear_axle_center # rear axle center -rear_axle_height: 0.33 # height of rear axle center above flat ground -gnss_location: [1.10,0,1.62] # meters, taken from https://github.com/hangcui1201/POLARIS_GEM_e2_Real/blob/main/vehicle_drivers/gem_gnss_control/scripts/gem_gnss_tracker_stanley_rtk.py. Note conflict with pure pursuit location? -gnss_yaw: 0.0 # radians +T_frontcamera_vehicle: +- - 0.04979815921110274 + - -0.02889308605338095 + - 0.998341277156963 + - 0.6874899022540225 +- - -0.9986898492450907 + - 0.010343220052680617 + - 0.05011489006575621 + - -0.006750728511462573 +- - -0.011774038000135968 + - -0.9995289850272167 + - -0.02834016107658961 + - -0.07694233627246522 +- - 0.0 + - 0.0 + - 0.0 + - 1.0 +T_toplidar_frontcamera: +- - 0.029096143490519977 + - -0.9995463314295703 + - -0.007781115579835407 + - -0.045768971801312515 +- - -0.005777780139254174 + - 0.007616104089319731 + - -0.999954305063568 + - -0.3868171492765085 +- - 0.9995599190096819 + - 0.02913977151915098 + - -0.005553559684576381 + - -0.6975440651652165 +- - 0.0 + - 0.0 + - 0.0 + - 1.0 +T_toplidar_vehicle: +- - 0.9995177984237671 + - -0.020904183387756348 + - 0.022959932684898376 + - 0.0 +- - 0.020975051447749138 + - 0.9997758865356445 + - -0.0028501423075795174 + - 0.0 +- - -0.0228952094912529 + - 0.003330353880301118 + - 0.9997323155403137 + - 0.33000001311302185 +- - 0.0 + - 0.0 + - 0.0 + - 1.0 +date: '2025-02-25 22:31:02.057447' +calibration_type: absolute_vehicle_calibration +gnss_location: +- 1.1 +- 0 +- 1.62 +gnss_yaw: 0.0 +rear_axle_height: 0.33 +reference: rear_axle_center +vehicle: gem_e4 top_lidar: !include "gem_e4_ouster.yaml" front_camera: !include "gem_e4_oak.yaml" diff --git a/GEMstack/knowledge/calibration/intrinsic.json b/GEMstack/knowledge/calibration/intrinsic.json new file mode 100644 index 000000000..37e870fd6 --- /dev/null +++ b/GEMstack/knowledge/calibration/intrinsic.json @@ -0,0 +1,7 @@ +{ + "fx": 684.8333129882812, + "fy": 684.6096801757812, + "cx": 573.37109375, + "cy": 363.700927734375 + } + diff --git a/GEMstack/knowledge/defaults/computation_graph.yaml b/GEMstack/knowledge/defaults/computation_graph.yaml index d7a606326..6f468163d 100644 --- a/GEMstack/knowledge/defaults/computation_graph.yaml +++ b/GEMstack/knowledge/defaults/computation_graph.yaml @@ -44,11 +44,23 @@ components: inputs: [vehicle, roadgraph, mission] outputs: route - driving_logic: - inputs: all + inputs: outputs: intent + - parking_component: # one way + inputs: all + outputs: mission_plan + - summon_component: # one way + inputs: all + outputs: route + - route_planning_component: # one way + inputs: all + outputs: route - motion_planning: inputs: all outputs: trajectory - trajectory_tracking: inputs: [vehicle, trajectory] outputs: + - signaling: + inputs: [intent] + outputs: diff --git a/GEMstack/knowledge/defaults/current.yaml b/GEMstack/knowledge/defaults/current.yaml index c886288be..a72c7256a 100644 --- a/GEMstack/knowledge/defaults/current.yaml +++ b/GEMstack/knowledge/defaults/current.yaml @@ -14,10 +14,10 @@ control: pure_pursuit: lookahead: 2.0 lookahead_scale: 3.0 - crosstrack_gain: 1.0 + crosstrack_gain: 0.5 # default: 1 desired_speed: trajectory longitudinal_control: - pid_p: 1.0 + pid_p: 1.5 pid_i: 0.1 pid_d: 0.0 diff --git a/GEMstack/knowledge/vehicle/dynamics.py b/GEMstack/knowledge/vehicle/dynamics.py index 9bd33a92e..f2b3ff4da 100644 --- a/GEMstack/knowledge/vehicle/dynamics.py +++ b/GEMstack/knowledge/vehicle/dynamics.py @@ -18,6 +18,7 @@ def acceleration_to_pedal_positions(acceleration : float, velocity : float, pitc Returns tuple (accelerator_pedal_position, brake_pedal_position, desired_gear) """ model = settings.get('vehicle.dynamics.acceleration_model','hang_v1') + if model == 'hang_v1': if gear != 1: print("WARNING can't handle gears other than 1 yet") @@ -36,6 +37,13 @@ def acceleration_to_pedal_positions(acceleration : float, velocity : float, pitc else: brake_percent = brake_active_range[0] + -(acceleration+dry_decel)/max_brake * (brake_active_range[1]-brake_active_range[0]) throttle_percent = 0 + + print("accel : {:.3f}".format(acceleration)) + print("vel : {:.3f}".format(velocity)) + print("gear : {}".format(gear)) + print("gas : {:.2f}".format(throttle_percent)) + print("brake : {:.2f}".format(brake_percent)) + return (max(throttle_percent,0.0),max(brake_percent,0.0),1) elif model == 'kris_v1': diff --git a/GEMstack/knowledge/vehicle/gem_e4_dynamics.yaml b/GEMstack/knowledge/vehicle/gem_e4_dynamics.yaml index d20d2c357..fdc07f5c7 100644 --- a/GEMstack/knowledge/vehicle/gem_e4_dynamics.yaml +++ b/GEMstack/knowledge/vehicle/gem_e4_dynamics.yaml @@ -12,7 +12,7 @@ max_accelerator_power: #Watts. Power at max accelerator pedal, by gear - 10000.0 max_accelerator_power_reverse: 10000.0 #Watts. Power (backwards) in reverse gear -acceleration_model : kris_v1 +acceleration_model : hang_v1 # kris_v1 accelerator_active_range : [0.2, 1.0] #range of accelerator pedal where output acceleration is not flat brake_active_range : [0,1] #range of brake pedal where output deceleration is not flat diff --git a/GEMstack/mathutils/dubins.py b/GEMstack/mathutils/dubins.py index cdb4655ab..b840201d9 100644 --- a/GEMstack/mathutils/dubins.py +++ b/GEMstack/mathutils/dubins.py @@ -44,7 +44,7 @@ def derivative(self,x,u): right = [-fwd[1],fwd[0]] phi = u[1] d = u[0] - return np.array([fwd[0]*d,fwd[1]*d,phi]) + return np.array([fwd[0]*d,fwd[1]*d,phi*d]) class DubinsCarIntegrator(ControlSpace): diff --git a/GEMstack/mathutils/transforms.py b/GEMstack/mathutils/transforms.py index 833ecc80d..a29ec48e2 100644 --- a/GEMstack/mathutils/transforms.py +++ b/GEMstack/mathutils/transforms.py @@ -36,6 +36,14 @@ def vector_dist(v1, v2) -> float: """Euclidean distance between two vectors""" return vo.distance(v1,v2) +def vector_dot(v1, v2) -> float: + """Dot product between two vectors""" + return vo.dot(v1,v2) + +def vector_cross(v1, v2) -> float: + """Cross product between two 2D vectors""" + return vo.cross(v1,v2) + def vector2_angle(v1, v2 = None) -> float: """find the ccw angle bewtween two 2d vectors""" if v2 is None: @@ -123,3 +131,16 @@ def xy_to_lat_lon(x_east : float, y_north : float, lat_reference : float, lon_re # convert GNSS waypoints into local fixed frame reprented in x and y lat, lon = axy.xy2ll(x_east, y_north, lat_reference, lon_reference) return lat, lon + +def quaternion_to_euler(x : float, y : float, z : float, w : float): + t0 = +2.0 * (w * x + y * z) + t1 = +1.0 - 2.0 * (x * x + y * y) + roll = np.arctan2(t0, t1) + t2 = +2.0 * (w * y - z * x) + t2 = +1.0 if t2 > +1.0 else t2 + t2 = -1.0 if t2 < -1.0 else t2 + pitch = np.arcsin(t2) + t3 = +2.0 * (w * z + x * y) + t4 = +1.0 - 2.0 * (y * y + z * z) + yaw = np.arctan2(t3, t4) + return [roll, pitch, yaw] diff --git a/GEMstack/offboard/calibration/ReadMe.md b/GEMstack/offboard/calibration/ReadMe.md new file mode 100644 index 000000000..6f7f09ef4 --- /dev/null +++ b/GEMstack/offboard/calibration/ReadMe.md @@ -0,0 +1,130 @@ + + +The vehicle frame is defined as: +- **Origin:** Rear axle center +- **X-axis:** Forward +- **Y-axis:** Left +- **Z-axis:** Up + + +## Assignment Overview + +- **Capture Paired Data:** + Acquire synchronized LiDAR scans and camera images using your ROS-based capture scripts. + +- **Extract Camera Intrinsics:** + Use the `/oak/rgb/camera_info` topic to extract intrinsic data (`fx`, `fy`, `cx`, `cy`) and save them into a JSON file (e.g., `camera_intrinsics.json`). + +- **Extrinsic Calibration:** + Using a calibration target (e.g., a checkerboard, foam cubes, or a calibration box), select corresponding points in the LiDAR and image data in order to compute the transform $$ T_{\text{toplidar}}^{\text{frontcamera}} $$ with scripts like **calibrate_extrinsics.py**. + +- **Vehicle Frame Alignment:** + Determine the yaw offset between the LiDAR frame and the vehicle frame: + - Use **compute_yaw_offset.py** to manually select calibration objects (e.g., foam cubes) from the LiDAR scan and provide their corresponding vehicle frame positions (x forward, y left). + - The computed yaw offset is then incorporated with any other measurements in your full calibration pipeline. + +- **Sensor Fusion Visualization:** + Finally, use the **sensor_fusion.py** script to project the LiDAR points—transformed using the computed extrinsics and vehicle alignment—onto a camera image, verifying that the sensors have been accurately calibrated. + +--- +## Dependencies and Setup + +### Required Packages + +Install the following Python packages: +```bash +pip install numpy open3d opencv-python matplotlib argparse pyyaml +``` + +### ROS Environment + +Ensure you have a working ROS (e.g., Noetic) setup with the following topics: +- `/oak/rgb/camera_info` +- `/oak/rgb/image_raw` +- `/ouster/points` + +--- + +## Scripts and Their Functions + +### capture_ouster_oak.py +- **Purpose:** Captures paired LiDAR scans and camera images. +- **Output:** Stores images as `.png` and LiDAR scans as `.npz`. + +### pick_lidar_points.py +- **Purpose:** Allows manual selection of feature points from the LiDAR scan for extrinsic calibration. + +### calibrate_extrinsics.py +- **Purpose:** Computes the sensor-to-sensor transform $$ T_{\text{toplidar}}^{\text{frontcamera}} $$ using corresponding points between LiDAR and camera images. + +### compute_yaw_offset.py +- **Purpose:** + - First, lets you manually crop or select calibration objects (typically foam cubes along the vehicle centerline) in the LiDAR scan. + - Then, prompts you to enter the corresponding vehicle frame (x, y) positions. + - Finally, computes the yaw offset (in degrees) between the LiDAR frame and the vehicle frame. + +### sensor_fusion.py +- **Purpose:** + - Loads the extrinsic calibration (from **calibrate_extrinsics.py**) and the camera intrinsics. + - Transforms LiDAR points into the camera frame. + - Projects these points onto a camera image. + - Visualizes the result to verify sensor alignment. + +--- + +## How to Run + +### Step 1: Capture the Calibration Dataset +- Run **capture_ouster_oak.py**: + ```bash + python3 capture_ouster_oak.py --output_dir /path/to/calibration_01 + ``` + This will save paired LiDAR and image data. + +### Step 2: Extract Camera Intrinsics +- Use your ROS system to extract the intrinsic parameters (`fx`, `fy`, `cx`, `cy`) from the `/oak/rgb/camera_info` topic and save them as `camera_intrinsics.json`. + +### Step 3: Select LiDAR Calibration Points +- Run **pick_lidar_points.py** to manually select key points from your LiDAR scan: + ```bash + python3 pick_lidar_points.py --lidar_npz /path/to/lidar_0.npz --output /path/to/lidar_cal_points.npy + ``` + +### Step 4: Compute LiDAR-to-Camera Extrinsics +- Run **calibrate_extrinsics.py**: + ```bash + python3 calibrate_extrinsics.py \ + --lidar_points /path/to/lidar_cal_points.npy \ + --image /path/to/image_0.png \ + --intrinsics /path/to/camera_intrinsics.json \ + --output /path/to/extrinsics.npz + ``` + Follow prompts to select corresponding points in the camera frame. + +### Step 5: Compute Yaw Offset (Manual Method) +- Run **compute_yaw_offset.py**: + ```bash + python3 compute_yaw_offset.py --lidar_scan /path/to/lidar_0.npz + ``` + - A window will open for manual selection ("Manual Selection"). + - Use SHIFT+LEFT CLICK to select the calibration objects (foam cubes) along the vehicle centerline. + - After quitting the window, you will be prompted to enter the corresponding vehicle frame (x, y) positions (e.g., `5.5, 0`). + - The script computes and prints the yaw offset. + +### Step 6: Visualize Sensor Fusion +- Run **sensor_fusion.py** to project LiDAR points onto a camera image: + ```bash + python3 sensor_fusion.py \ + --extrinsics /path/to/extrinsics.npz \ + --intrinsics /path/to/camera_intrinsics.json \ + --lidar_scan /path/to/lidar_cal_points.npy \ + --image /path/to/image_0.png + ``` + Verify that the projected points align well with the image features. + +### Step 7: Vehicle Frame Calibration (Absolute Calibration) +- Once you have the yaw offset and other measurements (e.g., ground plane height, rear axle height), run your absolute calibration script (e.g., **absolute_calibration.py**) to compute: + - $$ T_{\text{toplidar}}^{\text{vehicle}} $$ + - $$ T_{\text{frontcamera}}^{\text{vehicle}} $$ +- Save these calibrations in a YAML file (e.g., `gem_e4.yaml` in `GEMstack/knowledge/calibration/`). + diff --git a/GEMstack/offboard/calibration/absolute_calibration.py b/GEMstack/offboard/calibration/absolute_calibration.py new file mode 100644 index 000000000..76010bca8 --- /dev/null +++ b/GEMstack/offboard/calibration/absolute_calibration.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python3 +""" +Absolute Calibration of Vehicle Geometry and Sensor Mounts + +This script computes: + • T_toplidar^vehicle: the homogeneous transformation from the top LiDAR frame to the vehicle frame, + where the vehicle frame is defined with its origin at the rear axle center, x pointing forward, + y pointing left, and z pointing up. + + The calibration uses: + - A LiDAR scan of the flat ground to determine pitch and roll (by plane fitting) and the height above ground. + - A measured yaw offset (from distinctive calibration objects placed along the vehicle centerline) to correct horizontal alignment. + - A manual measurement of rear axle height above the ground. + + Once T_toplidar^vehicle is determined, T_frontcamera^vehicle is computed via: + T_frontcamera^vehicle = T_toplidar^vehicle * inv(T_toplidar^frontcamera) + + The transform T_toplidar^frontcamera is assumed to be computed in an earlier calibration step (e.g., using calibrate_extrinsics.py). + +The resulting calibrations are saved into a YAML file (e.g., gem_e4.yaml) with metadata. +""" + +import numpy as np +import cv2 +import argparse +import datetime +import yaml + +# --------------------------- +# Utility Functions +# --------------------------- + +def load_transform(npz_file): + """ + Load a 4x4 homogeneous transform from an npz file. + Assumes the file contains keys 'R' and 't'. + """ + data = np.load(npz_file) + R = data['R'] + t = data['t'] + T = np.eye(4) + T[:3, :3] = R + T[:3, 3] = t.flatten() + return T + +def fit_ground_plane(points): + """ + Fit a plane to ground LiDAR points using SVD. + Returns the normalized plane normal and the centroid. + Here, points is an (N,3) NumPy array. + """ + centroid = np.mean(points, axis=0, dtype=np.float32) + + # SVD on the centered data + _, _, Vt = np.linalg.svd((points - centroid).astype(np.float32)) + + normal = Vt[-1, :] # The smallest singular value gives the normal direction + # Ensure the normal points upward (z>0 in vehicle frame) + if normal[2] < 0: + normal = -normal + normal /= np.linalg.norm(normal) + return normal, centroid + +def compute_R_ground(normal): + """ + Compute the rotation matrix that aligns the measured ground normal to the vehicle's + desired ground normal [0, 0, 1] (i.e., z upward). + This correction removes pitch and roll errors. + """ + desired = np.array([0, 0, 1], dtype=np.float32) + # Compute axis of rotation and angle between the normals + axis = np.cross(normal, desired) + axis_norm = np.linalg.norm(axis) + if axis_norm < 1e-6: + # Already aligned; return identity + return np.eye(3, dtype=np.float32) + axis = axis / axis_norm + angle = np.arccos(np.clip(np.dot(normal, desired), -1.0, 1.0)) + # Rodrigues formula to convert axis-angle to rotation matrix + K = np.array([[0, -axis[2], axis[1]], + [axis[2], 0, -axis[0]], + [-axis[1], axis[0], 0]], dtype=np.float32) + R = np.eye(3, dtype=np.float32) + np.sin(angle) * K + (1 - np.cos(angle)) * (K @ K) + return R + +def create_T_toplidar_vehicle(ground_points, yaw_offset_deg, rear_axle_height): + """ + Using ground plane fit and measured yaw offset and rear axle height, compute the + transform T_toplidar^vehicle. + + Parameters: + - ground_points: (N,3) LiDAR scan points that belong to the ground (use a cropped region). + - yaw_offset_deg: Measured yaw offset (in degrees) between the LiDAR and vehicle centerline. + - rear_axle_height: Measured height of the rear axle center above the ground (in meters). + + Returns: + - A 4x4 homogeneous matrix T_toplidar_vehicle. + """ + # Fit the ground plane to obtain the measured ground normal. + normal, centroid = fit_ground_plane(ground_points) + # Compute rotation (pitch & roll correction) to align measured ground to [0,0,1]. + R_ground = compute_R_ground(normal) + # Yaw correction: rotation about the z-axis (vehicle's vertical axis) + yaw_offset_rad = np.radians(yaw_offset_deg) + R_yaw = np.array([[np.cos(yaw_offset_rad), -np.sin(yaw_offset_rad), 0], + [np.sin(yaw_offset_rad), np.cos(yaw_offset_rad), 0], + [0, 0, 1]], dtype=np.float32) + # Combined sensor-to-vehicle rotation: first align pitch/roll, then adjust yaw + R_vehicle = R_yaw @ R_ground + # For translation: we determine the desired z offset as the measured rear axle height. + # Assume x and y offsets between the sensor and the vehicle frame are negligible, + # or have been measured separately (set to 0 here). + t_vehicle = np.array([0, 0, rear_axle_height], dtype=np.float32) + # Build homogeneous transform + T = np.eye(4, dtype=np.float32) + T[:3, :3] = R_vehicle + T[:3, 3] = t_vehicle + return T + +def combine_transforms(T_toplidar_vehicle, T_toplidar_frontcamera): + """ + Given T_toplidar^vehicle and T_toplidar^frontcamera (from previous front-camera calibration), + compute T_frontcamera^vehicle: + T_frontcamera^vehicle = T_toplidar^vehicle * inv(T_toplidar^frontcamera) + """ + T_frontcamera_vehicle = T_toplidar_vehicle @ np.linalg.inv(T_toplidar_frontcamera) + return T_frontcamera_vehicle + +def save_calibration(filename, T_toplidar_vehicle, T_frontcamera_vehicle, vehicle, calibration_type): + """ + Save the calibration transforms and metadata to a YAML file. + """ + calibration_data = { + "vehicle": vehicle, + "calibration_type": calibration_type, + "date": str(datetime.datetime.now()), + "T_toplidar_vehicle": T_toplidar_vehicle.tolist(), + "T_frontcamera_vehicle": T_frontcamera_vehicle.tolist() + } + with open(filename, 'w') as f: + yaml.dump(calibration_data, f) + print("Calibration saved to", filename) + +# --------------------------- +# Main routine +# --------------------------- +def main(args): + # For ground calibration, load a LiDAR scan file that covers a flat floor. + # It is assumed that the file contains points that belong to the ground. + ground_points = np.load(args.ground_lidar_points).astype(np.float32) + + print("Loaded ground LiDAR points with shape:", ground_points.shape) + + # Compute T_toplidar^vehicle from ground plane and measurements. + T_toplidar_vehicle = create_T_toplidar_vehicle(ground_points, args.yaw_offset, args.rear_axle_height) + print("Computed T_toplidar^vehicle:") + print(T_toplidar_vehicle) + + # Load the previously computed sensor calibration T_toplidar^frontcamera. + T_toplidar_frontcamera = load_transform(args.toplidar_frontcamera) + print("Loaded T_toplidar^frontcamera:") + print(T_toplidar_frontcamera) + + # Compute T_frontcamera^vehicle. + T_frontcamera_vehicle = combine_transforms(T_toplidar_vehicle, T_toplidar_frontcamera) + print("Computed T_frontcamera^vehicle:") + print(T_frontcamera_vehicle) + + # Save the calibration to a YAML file with metadata. + save_calibration(args.output_yaml, T_toplidar_vehicle, T_frontcamera_vehicle, args.vehicle, "absolute_vehicle_calibration") + +if __name__ == '__main__': + # Import datetime and yaml here + import datetime, yaml + parser = argparse.ArgumentParser( + description="Absolute Calibration of Vehicle Frame: Compute transforms T_toplidar^vehicle and T_frontcamera^vehicle." + ) + parser.add_argument("--ground_lidar_points", default = "./data/ground_points.npy", type=str, + help="Path to LiDAR scan file with ground points (e.g., ground_points.npy)") + parser.add_argument("--toplidar_frontcamera", default = "./data/extrinsics.npz", type=str, + help="Path to sensor calibration file for T_toplidar^frontcamera (e.g., extrinsics.npz)") + parser.add_argument("--rear_axle_height", default = 0.33, type=float, + help="Measured rear axle height above the ground (in meters)") + parser.add_argument("--yaw_offset",default = 0.12, type=float, + help="Measured yaw offset (in degrees) from calibration objects on the vehicle centerline") + parser.add_argument("--vehicle", default = "gem_e4", type=str, + help="Vehicle identifier (e.g., gem_e4)") + parser.add_argument("--output_yaml", default = "gem_e4.yaml",type=str, + help="Output YAML file path (e.g., gem_e4.yaml)") + args = parser.parse_args() + main(args) diff --git a/GEMstack/offboard/calibration/add2yaml.py b/GEMstack/offboard/calibration/add2yaml.py new file mode 100644 index 000000000..c291f6f1d --- /dev/null +++ b/GEMstack/offboard/calibration/add2yaml.py @@ -0,0 +1,32 @@ +import yaml +import numpy as np + +# Load the transformation data from the NPZ file +npz_file = "./data/extrinsics.npz" +extrinsics_data = np.load(npz_file) + +# Extract rotation (3x3) and translation (3x1) +R = extrinsics_data["R"] # Shape: (3, 3) +t = extrinsics_data["t"].reshape(3, 1) # Ensure shape: (3, 1) + +# Create a 4x4 transformation matrix +T = np.eye(4) # Initialize as identity matrix +T[:3, :3] = R # Set rotation part +T[:3, 3] = t.flatten() # Set translation part + +# Convert NumPy array to list for YAML compatibility +transformation_matrix = T.tolist() + +# Load existing YAML file +yaml_file = "./gem_e4.yaml" +with open(yaml_file, "r") as file: + data = yaml.safe_load(file) + +# Add the transformation matrix to the YAML data +data["T_toplidar_frontcamera"] = transformation_matrix # Modify key as needed + +# Save the updated YAML file +with open(yaml_file, "w") as file: + yaml.dump(data, file, default_flow_style=False) + +print(f"Transformation matrix added to {yaml_file}") diff --git a/GEMstack/offboard/calibration/calibrate_extrinsics.py b/GEMstack/offboard/calibration/calibrate_extrinsics.py new file mode 100644 index 000000000..bfa27dff9 --- /dev/null +++ b/GEMstack/offboard/calibration/calibrate_extrinsics.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python3 +import numpy as np +import cv2 +import matplotlib.pyplot as plt +import json +import argparse + +def pick_image_points(image): + """ + Display the image using matplotlib and let the user pick + calibration points interactively. + """ + plt.imshow(image) + plt.title("Click to select corresponding calibration points.\nPress Enter when done.") + pts = plt.ginput(n=-1, timeout=0) + plt.close() + image_points = np.array(pts, dtype=np.float32) + return image_points + +def compute_reprojection_error(object_points, image_points, rvec, tvec, camera_matrix, dist_coeffs): + projected_points, _ = cv2.projectPoints(object_points, rvec, tvec, camera_matrix, dist_coeffs) + projected_points = projected_points.squeeze() + errors = np.linalg.norm(image_points - projected_points, axis=1) + return np.mean(errors) + +def main(args): + # Load LiDAR calibration points (3D points). These should be in the same coordinate ordering as your image points. + lidar_points = np.load(args.lidar_points) # e.g., lidar_cal_points.npy + print("Loaded LiDAR calibration points:") + print(lidar_points) + + # Load the calibration image. + image = cv2.imread(args.image) + if image is None: + print("Error: could not load image", args.image) + return + image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + + # Pick corresponding 2D image points. + print("Select corresponding feature points in the image, in the same order as the LiDAR points.") + image_points = pick_image_points(image_rgb) + + if image_points.shape[0] != lidar_points.shape[0]: + print("Error: number of image points and LiDAR points do not match.") + return + + # Load camera intrinsics. + with open(args.intrinsics, "r") as f: + intrinsics = json.load(f) + fx = intrinsics["fx"] + fy = intrinsics["fy"] + cx = intrinsics["cx"] + cy = intrinsics["cy"] + camera_matrix = np.array([[fx, 0, cx], + [0, fy, cy], + [0, 0, 1]], dtype=np.float32) + # Assume no lens distortion (or load them if available). + dist_coeffs = np.zeros((4,1), dtype=np.float32) + + # Use cv2.solvePnPRansac for a robust initial estimate. + print("Computing initial solution using solvePnPRansac...") + retval, rvec, tvec, inliers = cv2.solvePnPRansac( + objectPoints=lidar_points, + imagePoints=image_points, + cameraMatrix=camera_matrix, + distCoeffs=dist_coeffs, + flags=cv2.SOLVEPNP_EPNP + ) + if not retval: + print("solvePnPRansac failed to find a solution.") + return + print("RANSAC output:") + print("Rotation vector (rvec):", rvec) + print("Translation vector (tvec):", tvec) + if inliers is not None: + print("Number of inliers:", len(inliers)) + + # Refine the solution with iterative optimization. + print("Refining solution using solvePnP (SOLVEPNP_ITERATIVE)...") + retval, rvec_refined, tvec_refined = cv2.solvePnP( + objectPoints=lidar_points, + imagePoints=image_points, + cameraMatrix=camera_matrix, + distCoeffs=dist_coeffs, + rvec=rvec, + tvec=tvec, + useExtrinsicGuess=True, + flags=cv2.SOLVEPNP_ITERATIVE + ) + if not retval: + print("Refinement with solvePnP failed.") + return + + # Compute reprojection error using the refined solution. + mean_error = compute_reprojection_error(lidar_points, image_points, rvec_refined, tvec_refined, camera_matrix, dist_coeffs) + print("Mean reprojection error (refined):", mean_error) + + # Convert the refined rotation vector to a rotation matrix. + R_refined, _ = cv2.Rodrigues(rvec_refined) + print("Estimated refined Rotation Matrix:\n", R_refined) + print("Estimated refined Translation Vector:\n", tvec_refined) + + # Visualize reprojection: project the LiDAR points onto the image using the refined extrinsics. + projected_img_points, _ = cv2.projectPoints(lidar_points, rvec_refined, tvec_refined, camera_matrix, dist_coeffs) + projected_img_points = projected_img_points.squeeze() + + for pt in projected_img_points: + cv2.circle(image, (int(pt[0]), int(pt[1])), 5, (0, 0, 255), -1) + + cv2.imshow("Reprojected LiDAR Points", image) + cv2.waitKey(0) + cv2.destroyAllWindows() + + # Save the refined extrinsics to disk. + extrinsics = {"R": R_refined.tolist(), "t": tvec_refined.tolist(), "rvec": rvec_refined.tolist()} + np.savez(args.output, **extrinsics) + print("Extrinsics saved to:", args.output) + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="Robust Extrinsic Calibration: Compute LiDAR-to-Camera transformation using RANSAC and iterative refinement." + ) + parser.add_argument("--lidar_points", type=str, required=True, + help="Path to saved LiDAR calibration points file (e.g., lidar_cal_points.npy)") + parser.add_argument("--image", type=str, required=True, + help="Path to a calibration image (e.g., image_0.png)") + parser.add_argument("--intrinsics", type=str, required=True, + help="Path to camera intrinsics JSON file (e.g., camera_intrinsics.json)") + parser.add_argument("--output", type=str, default="extrinsics.npz", + help="Output file name for saving computed extrinsics") + args = parser.parse_args() + main(args) diff --git a/GEMstack/offboard/calibration/capture_lidar_zed.py b/GEMstack/offboard/calibration/capture_lidar_zed.py index 21814b793..f343ccd74 100644 --- a/GEMstack/offboard/calibration/capture_lidar_zed.py +++ b/GEMstack/offboard/calibration/capture_lidar_zed.py @@ -68,7 +68,7 @@ def save_scan(lidar_fn,color_fn,depth_fn): def main(folder='data',start_index=1): rospy.init_node("capture_lidar_zed",disable_signals=True) - lidar_sub = rospy.Subscriber("/lidar1/velodyne_points", PointCloud2, lidar_callback) + lidar_sub = rospy.Subscriber("/ouster/points", PointCloud2, lidar_callback) camera_sub = rospy.Subscriber("/zed2/zed_node/rgb/image_rect_color", Image, camera_callback) depth_sub = rospy.Subscriber("/zed2/zed_node/depth/depth_registered", Image, depth_callback) index = start_index @@ -94,12 +94,17 @@ def main(folder='data',start_index=1): files = [os.path.join(folder,'lidar{}.npz'.format(index)), os.path.join(folder,'color{}.png'.format(index)), os.path.join(folder,'depth{}.tif'.format(index))] - save_scan(*files) + #Lpath = "GEMstack/data/scans/lidar_scan/" + str(index) + ".npz" + #Cpath = "GEMstack/data/scans/color_image/" + str(index) + ".png" + #Dpath = "GEMstack/data/scans/depth_image/" + str(index) + ".tif" + save_scan(files[0],files[1],files[2]) index += 1 if __name__ == '__main__': import sys - folder = 'data' + folder = '/home/karteek/GEMstack/data/li_zed/scans/' + if not os.path.exists(folder): + os.makedirs(folder) start_index = 1 if len(sys.argv) >= 2: folder = sys.argv[1] diff --git a/GEMstack/offboard/calibration/capture_ouster_oak.py b/GEMstack/offboard/calibration/capture_ouster_oak.py new file mode 100644 index 000000000..acc1c6d84 --- /dev/null +++ b/GEMstack/offboard/calibration/capture_ouster_oak.py @@ -0,0 +1,63 @@ +import rospy +from sensor_msgs.msg import Image, PointCloud2, CameraInfo +import cv2 +from cv_bridge import CvBridge +import numpy as np +import os +import sensor_msgs.point_cloud2 as pc2 + +# Global variables +bridge = CvBridge() +lidar_points = None +camera_image = None + +# Callback for LiDAR data +def lidar_callback(msg): + global lidar_points + lidar_points = msg + +# Callback for camera image +def camera_callback(msg): + global camera_image + camera_image = msg + +# Convert PointCloud2 to numpy array +def pc2_to_numpy(pc2_msg): + return np.array(list(pc2.read_points(pc2_msg, skip_nans=True)), dtype=np.float32)[:, :3] + +# Save data +def save_data(lidar_file, image_file): + global lidar_points, camera_image + if lidar_points and camera_image: + # Save LiDAR data + lidar_np = pc2_to_numpy(lidar_points) + np.savez(lidar_file, lidar_np) + # Save Image data + cv_image = bridge.imgmsg_to_cv2(camera_image, "bgr8") + cv2.imwrite(image_file, cv_image) + print(f"Saved: {lidar_file}, {image_file}") + +# Main function +def main(output_dir='home/manan/CS588/GEMstack/GEMstack/offboard/calibration'): + rospy.init_node('capture_ouster_oak', anonymous=True) + rospy.Subscriber('/oak/rgb/image_raw', Image, camera_callback) + rospy.Subscriber('/ouster/points', PointCloud2, lidar_callback) + + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + index = 0 + print("Press 's' to save data or 'q' to quit.") + + while not rospy.is_shutdown(): + if camera_image is not None: + cv2.imshow("Camera Image", bridge.imgmsg_to_cv2(camera_image, "bgr8")) + key = cv2.waitKey(30) + if key == ord('s'): # Save data on 's' key press + save_data(f"{output_dir}/lidar_{index}.npz", f"{output_dir}/image_{index}.png") + index += 1 + elif key == ord('q'): # Quit on 'q' key press + break + +if __name__ == "__main__": + main() diff --git a/GEMstack/offboard/calibration/compute_yaw_offset.py b/GEMstack/offboard/calibration/compute_yaw_offset.py new file mode 100644 index 000000000..19ec01f2a --- /dev/null +++ b/GEMstack/offboard/calibration/compute_yaw_offset.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 +import numpy as np +import open3d as o3d +import argparse + +def load_lidar_scan(lidar_file): + """Load a LiDAR scan from an npz or npy file.""" + data = np.load(lidar_file, allow_pickle=True) + if isinstance(data, np.ndarray): + return data.astype(np.float32) + else: + key = list(data.keys())[0] + return data[key].astype(np.float32) + +def manual_crop(pcd): + """Interactively select a polygon region to crop the LiDAR point cloud.""" + print("\nCrop the LiDAR scan:") + print("1. Press 'K' to lock the screen and enter selection mode.") + print("2. Use Ctrl + Left Click to draw a polygon around the region of interest.") + print("3. Press 'C' to crop the selection, then 'F' to exit.") + vis = o3d.visualization.VisualizerWithEditing() + vis.create_window(window_name="Crop Point Cloud", width=800, height=600) + vis.add_geometry(pcd) + vis.run() # User draws polygon and crops + vis.destroy_window() + # Extract cropped points + cropped_pcd = vis.get_cropped_geometry() + return cropped_pcd + +def manual_select_objects(pcd): + """Interactively select calibration objects in the cropped point cloud.""" + print("\nSelect calibration objects:") + print("Use SHIFT+LEFT CLICK to pick points on each calibration object.") + print("Press 'Q' when done.") + vis = o3d.visualization.VisualizerWithEditing() + vis.create_window(window_name="Select Calibration Objects", width=800, height=600) + vis.add_geometry(pcd) + vis.run() # User selects points + vis.destroy_window() + picked_indices = vis.get_picked_points() + if not picked_indices: + print("No points selected. Exiting.") + exit(1) + selected = np.asarray(pcd.points)[picked_indices, :] + return selected + +def get_vehicle_reference_points(num_points): + """Prompt for vehicle frame coordinates of each selected object.""" + vehicle_points = [] + for i in range(num_points): + inp = input(f"Enter vehicle (x,y) for object {i+1} (e.g., 5.5,0): ") + try: + x, y = map(float, inp.split(',')) + vehicle_points.append([x, y]) + except: + print("Invalid input. Use comma-separated values (e.g., 5.5,0).") + exit(1) + return np.array(vehicle_points) + +def compute_yaw_offset(lidar_xy, vehicle_xy): + """Compute yaw offset (degrees) between LiDAR and vehicle frames.""" + A = lidar_xy - np.mean(lidar_xy, axis=0) + B = vehicle_xy - np.mean(vehicle_xy, axis=0) + H = A.T @ B + U, _, Vt = np.linalg.svd(H) + R = Vt.T @ U.T + yaw_rad = np.arctan2(R[1, 0], R[0, 0]) + return np.degrees(yaw_rad) + +def main(args): + # Step 1: Load LiDAR scan + points = load_lidar_scan(args.lidar_scan) + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(points) + print(f"Loaded LiDAR scan with {len(points)} points.") + + # Step 2: Crop the point cloud interactively + cropped_pcd = manual_crop(pcd) + print(f"Cropped to {len(cropped_pcd.points)} points.") + + # Step 3: Select calibration objects in the cropped cloud + selected_points = manual_select_objects(cropped_pcd) + print(f"Selected {len(selected_points)} calibration objects.") + + # Step 4: Get vehicle frame coordinates for these objects + vehicle_xy = get_vehicle_reference_points(len(selected_points)) + + # Step 5: Compute yaw offset + lidar_xy = selected_points[:, :2] + yaw_offset_deg = compute_yaw_offset(lidar_xy, vehicle_xy) + print(f"Yaw offset (LiDAR -> Vehicle): {yaw_offset_deg:.2f}°") + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Crop LiDAR scan and compute yaw offset.") + parser.add_argument("--lidar_scan", type=str, required=True, + help="Path to LiDAR scan file (e.g., lidar_0.npz)") + args = parser.parse_args() + main(args) diff --git a/GEMstack/offboard/calibration/data/camera_intrinsics.json b/GEMstack/offboard/calibration/data/camera_intrinsics.json new file mode 100644 index 000000000..74318680c --- /dev/null +++ b/GEMstack/offboard/calibration/data/camera_intrinsics.json @@ -0,0 +1,7 @@ +{ + "fx": 684.8333129882812, + "fy": 684.6096801757812, + "cx": 573.37109375, + "cy": 363.700927734375 + } + \ No newline at end of file diff --git a/GEMstack/offboard/calibration/data/extrinsics.npz b/GEMstack/offboard/calibration/data/extrinsics.npz new file mode 100644 index 000000000..663fe14bd Binary files /dev/null and b/GEMstack/offboard/calibration/data/extrinsics.npz differ diff --git a/GEMstack/offboard/calibration/data/gem_e4.yaml b/GEMstack/offboard/calibration/data/gem_e4.yaml new file mode 100644 index 000000000..517340a03 --- /dev/null +++ b/GEMstack/offboard/calibration/data/gem_e4.yaml @@ -0,0 +1,54 @@ +T_frontcamera_vehicle: +- - 0.04979815921110274 + - -0.02889308605338095 + - 0.998341277156963 + - 0.6874899022540225 +- - -0.9986898492450907 + - 0.010343220052680617 + - 0.05011489006575621 + - -0.006750728511462573 +- - -0.011774038000135968 + - -0.9995289850272167 + - -0.02834016107658961 + - -0.07694233627246522 +- - 0.0 + - 0.0 + - 0.0 + - 1.0 +T_toplidar_frontcamera: +- - 0.029096143490519977 + - -0.9995463314295703 + - -0.007781115579835407 + - -0.045768971801312515 +- - -0.005777780139254174 + - 0.007616104089319731 + - -0.999954305063568 + - -0.3868171492765085 +- - 0.9995599190096819 + - 0.02913977151915098 + - -0.005553559684576381 + - -0.6975440651652165 +- - 0.0 + - 0.0 + - 0.0 + - 1.0 +T_toplidar_vehicle: +- - 0.9995177984237671 + - -0.020904183387756348 + - 0.022959932684898376 + - 0.0 +- - 0.020975051447749138 + - 0.9997758865356445 + - -0.0028501423075795174 + - 0.0 +- - -0.0228952094912529 + - 0.003330353880301118 + - 0.9997323155403137 + - 0.33000001311302185 +- - 0.0 + - 0.0 + - 0.0 + - 1.0 +calibration_type: absolute_vehicle_calibration +date: '2025-02-25 22:31:02.057447' +vehicle: gem_e4 diff --git a/GEMstack/offboard/calibration/data/ground_points.npy b/GEMstack/offboard/calibration/data/ground_points.npy new file mode 100644 index 000000000..143d327d8 Binary files /dev/null and b/GEMstack/offboard/calibration/data/ground_points.npy differ diff --git a/GEMstack/offboard/calibration/data/lidar_cal_points.npy b/GEMstack/offboard/calibration/data/lidar_cal_points.npy new file mode 100644 index 000000000..749ba02c4 Binary files /dev/null and b/GEMstack/offboard/calibration/data/lidar_cal_points.npy differ diff --git a/GEMstack/offboard/calibration/extract_ground_points.py b/GEMstack/offboard/calibration/extract_ground_points.py new file mode 100644 index 000000000..c1c89b0d8 --- /dev/null +++ b/GEMstack/offboard/calibration/extract_ground_points.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 +import open3d as o3d +import numpy as np +import argparse +import os +import cv2 + +def load_lidar_scan(npz_path): + """ + Load a LiDAR point cloud from a file. + Assumes the file stores one array of shape (N, 3), either in + an npz container or as a direct npy file. + """ + if not os.path.isfile(npz_path): + raise FileNotFoundError(f"Could not find LiDAR file: {npz_path}") + data = np.load(npz_path, allow_pickle=True) + # If the file is saved as an npy array directly + if isinstance(data, np.ndarray): + return data.astype(np.float32) + else: + # Else, assume it's an npz file and get the first stored array. + key = list(data.keys())[0] + return data[key].astype(np.float32) + +def crop_ground_points(pcd): + """ + Use Open3D's interactive VisualizerWithEditing to let you select + vertices that define a polygon bounding the flat, level ground. + This routine then selects all points that fall within the + defined polygon (based on the x-y coordinates). + """ + print("Crop ground region:") + print(" Use shift+click to select vertices that enclose the flat ground area (e.g., a parking lot).") + print(" Press 'q' (or close the window) when finished.") + + vis = o3d.visualization.VisualizerWithEditing() + vis.create_window(window_name="Crop Ground", width=800, height=600) + vis.add_geometry(pcd) + vis.run() # Wait for user interaction. + vis.destroy_window() + + picked_indices = vis.get_picked_points() + print("DEBUG: Picked indices for cropping:", picked_indices) + if not picked_indices: + print("No cropping vertices selected; returning the original point cloud.") + return pcd + + # Get the (x,y) coordinates of the picked vertices. + picked_points = np.asarray(pcd.points)[picked_indices, :] + polygon = picked_points[:, :2] + + # Prepare the polygon for cv2.pointPolygonTest: + contour = polygon.reshape((-1, 1, 2)).astype(np.float32) + + all_points = np.asarray(pcd.points) + indices_inside = [] + for i, pt in enumerate(all_points): + # Test using the (x,y) coordinates. + if cv2.pointPolygonTest(contour, (pt[0], pt[1]), False) >= 0: + indices_inside.append(i) + + print("DEBUG: Number of points inside polygon:", len(indices_inside)) + cropped_pcd = pcd.select_by_index(indices_inside) + return cropped_pcd + +def main(args): + # Load the LiDAR scan (ideally a full scan that includes the ground) + lidar_points = load_lidar_scan(args.lidar_scan) + print("Loaded LiDAR scan with {} points".format(lidar_points.shape[0])) + + # Create an Open3D point cloud from the data. + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(lidar_points) + + # Ask user if they want to crop to isolate the ground. + crop_response = input("Do you want to crop the point cloud to the ground region? (y/n): ").strip().lower() + if crop_response == 'y': + pcd = crop_ground_points(pcd) + print("After cropping, {} points remain.".format(np.asarray(pcd.points).shape[0])) + o3d.visualization.draw_geometries([pcd], window_name="Cropped Ground") + + # Save the cropped (ground only) points to a .npy file. + np.save(args.output, np.asarray(pcd.points)) + print("Ground points saved to", args.output) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Extract ground (flat) LiDAR points for calibration.") + parser.add_argument("--lidar_scan", type=str, required=True, + help="Path to the LiDAR scan file (e.g., a full scan in .npz or .npy format)") + parser.add_argument("--output", type=str, required=True, + help="Output file name for the ground points (e.g., ground_points.npy)") + args = parser.parse_args() + main(args) diff --git a/GEMstack/offboard/calibration/gem_e4.yaml b/GEMstack/offboard/calibration/gem_e4.yaml new file mode 100644 index 000000000..146dac1be --- /dev/null +++ b/GEMstack/offboard/calibration/gem_e4.yaml @@ -0,0 +1,63 @@ +T_frontcamera_vehicle: +- - 0.04979815921110274 + - -0.02889308605338095 + - 0.998341277156963 + - 0.6874899022540225 +- - -0.9986898492450907 + - 0.010343220052680617 + - 0.05011489006575621 + - -0.006750728511462573 +- - -0.011774038000135968 + - -0.9995289850272167 + - -0.02834016107658961 + - -0.07694233627246522 +- - 0.0 + - 0.0 + - 0.0 + - 1.0 +T_toplidar_frontcamera: +- - 0.029096143490519977 + - -0.9995463314295703 + - -0.007781115579835407 + - -0.045768971801312515 +- - -0.005777780139254174 + - 0.007616104089319731 + - -0.999954305063568 + - -0.3868171492765085 +- - 0.9995599190096819 + - 0.02913977151915098 + - -0.005553559684576381 + - -0.6975440651652165 +- - 0.0 + - 0.0 + - 0.0 + - 1.0 +T_toplidar_vehicle: +- - 0.9995177984237671 + - -0.020904183387756348 + - 0.022959932684898376 + - 0.0 +- - 0.020975051447749138 + - 0.9997758865356445 + - -0.0028501423075795174 + - 0.0 +- - -0.0228952094912529 + - 0.003330353880301118 + - 0.9997323155403137 + - 0.33000001311302185 +- - 0.0 + - 0.0 + - 0.0 + - 1.0 +date: '2025-02-25 22:31:02.057447' +calibration_type: absolute_vehicle_calibration +gnss_location: +- 1.1 +- 0 +- 1.62 +gnss_yaw: 0.0 +rear_axle_height: 0.33 +reference: rear_axle_center +vehicle: gem_e4 +top_lidar: !include "gem_e4_ouster.yaml" +front_camera: !include "gem_e4_oak.yaml" diff --git a/GEMstack/offboard/calibration/image_snaps/image_0.png b/GEMstack/offboard/calibration/image_snaps/image_0.png new file mode 100644 index 000000000..856240b97 Binary files /dev/null and b/GEMstack/offboard/calibration/image_snaps/image_0.png differ diff --git a/GEMstack/offboard/calibration/image_snaps/image_1.png b/GEMstack/offboard/calibration/image_snaps/image_1.png new file mode 100644 index 000000000..987b1325f Binary files /dev/null and b/GEMstack/offboard/calibration/image_snaps/image_1.png differ diff --git a/GEMstack/offboard/calibration/image_snaps/image_2.png b/GEMstack/offboard/calibration/image_snaps/image_2.png new file mode 100644 index 000000000..f907c677c Binary files /dev/null and b/GEMstack/offboard/calibration/image_snaps/image_2.png differ diff --git a/GEMstack/offboard/calibration/image_snaps/image_3.png b/GEMstack/offboard/calibration/image_snaps/image_3.png new file mode 100644 index 000000000..6fa4c3dd0 Binary files /dev/null and b/GEMstack/offboard/calibration/image_snaps/image_3.png differ diff --git a/GEMstack/offboard/calibration/image_snaps/image_4.png b/GEMstack/offboard/calibration/image_snaps/image_4.png new file mode 100644 index 000000000..a314082e1 Binary files /dev/null and b/GEMstack/offboard/calibration/image_snaps/image_4.png differ diff --git a/GEMstack/offboard/calibration/image_snaps/image_5.png b/GEMstack/offboard/calibration/image_snaps/image_5.png new file mode 100644 index 000000000..f33420140 Binary files /dev/null and b/GEMstack/offboard/calibration/image_snaps/image_5.png differ diff --git a/GEMstack/offboard/calibration/image_snaps/image_6.png b/GEMstack/offboard/calibration/image_snaps/image_6.png new file mode 100644 index 000000000..43d396faf Binary files /dev/null and b/GEMstack/offboard/calibration/image_snaps/image_6.png differ diff --git a/GEMstack/offboard/calibration/lidar_scan/lidar_0.npz b/GEMstack/offboard/calibration/lidar_scan/lidar_0.npz new file mode 100644 index 000000000..5b4af5a34 Binary files /dev/null and b/GEMstack/offboard/calibration/lidar_scan/lidar_0.npz differ diff --git a/GEMstack/offboard/calibration/lidar_scan/lidar_1.npz b/GEMstack/offboard/calibration/lidar_scan/lidar_1.npz new file mode 100644 index 000000000..9d06280c7 Binary files /dev/null and b/GEMstack/offboard/calibration/lidar_scan/lidar_1.npz differ diff --git a/GEMstack/offboard/calibration/lidar_scan/lidar_2.npz b/GEMstack/offboard/calibration/lidar_scan/lidar_2.npz new file mode 100644 index 000000000..f483aa4b9 Binary files /dev/null and b/GEMstack/offboard/calibration/lidar_scan/lidar_2.npz differ diff --git a/GEMstack/offboard/calibration/lidar_scan/lidar_3.npz b/GEMstack/offboard/calibration/lidar_scan/lidar_3.npz new file mode 100644 index 000000000..1f8ee007e Binary files /dev/null and b/GEMstack/offboard/calibration/lidar_scan/lidar_3.npz differ diff --git a/GEMstack/offboard/calibration/lidar_scan/lidar_4.npz b/GEMstack/offboard/calibration/lidar_scan/lidar_4.npz new file mode 100644 index 000000000..20ed5e82a Binary files /dev/null and b/GEMstack/offboard/calibration/lidar_scan/lidar_4.npz differ diff --git a/GEMstack/offboard/calibration/lidar_scan/lidar_5.npz b/GEMstack/offboard/calibration/lidar_scan/lidar_5.npz new file mode 100644 index 000000000..22f495657 Binary files /dev/null and b/GEMstack/offboard/calibration/lidar_scan/lidar_5.npz differ diff --git a/GEMstack/offboard/calibration/lidar_scan/lidar_6.npz b/GEMstack/offboard/calibration/lidar_scan/lidar_6.npz new file mode 100644 index 000000000..74c7d9656 Binary files /dev/null and b/GEMstack/offboard/calibration/lidar_scan/lidar_6.npz differ diff --git a/GEMstack/offboard/calibration/pick_lidar_points.py b/GEMstack/offboard/calibration/pick_lidar_points.py new file mode 100644 index 000000000..093a33c4e --- /dev/null +++ b/GEMstack/offboard/calibration/pick_lidar_points.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python3 +import open3d as o3d +import numpy as np +import argparse +import os +import cv2 + +def load_lidar_scan(npz_path): + """ + Load the LiDAR point cloud from the npz file. + Assumes the file stores one array with shape (N, 3). + """ + if not os.path.isfile(npz_path): + raise FileNotFoundError(f"Could not find LiDAR file: {npz_path}") + data = np.load(npz_path) + # Extract the first stored array (assumed to be the whole point cloud). + lidar_points = data[list(data.keys())[0]] + return lidar_points.astype(np.float32) + +def crop_point_cloud(pcd): + """ + Use Open3D's VisualizerWithEditing to let the user pick polygon vertices. + Then, using these vertices (projected in the x-y plane), select all points + from the point cloud that lie within the polygon. + + This method enables you to crop a dense region rather than only returning the selected vertices. + """ + print("Cropping step:") + print(" Use shift+click to select vertices that define the boundary of the region of interest (e.g., the face of the box).") + print(" After clicking the desired vertices (e.g., 4 points), press 'q' or close the window.") + + vis = o3d.visualization.VisualizerWithEditing() + vis.create_window(window_name="Crop ROI", width=800, height=600) + vis.add_geometry(pcd) + vis.run() # User interacts and then closes. + vis.destroy_window() + + picked_indices = vis.get_picked_points() + print("DEBUG: Cropping picked indices:", picked_indices) + if not picked_indices: + print("No cropping indices selected. Returning original point cloud.") + return pcd + + # Get the points you clicked. + picked_points = np.asarray(pcd.points)[picked_indices, :] + # Use the first two coordinates (x,y) to define the polygon. + poly_2d = picked_points[:, :2] + + # Prepare the polygon contour for cv2.pointPolygonTest. + # It expects an array of shape (n, 1, 2) of type float32. + contour = poly_2d.reshape((-1, 1, 2)).astype(np.float32) + + # Now, iterate over all points in the cloud and test if they fall inside the polygon. + all_points = np.asarray(pcd.points) + indices_inside = [] + for i, pt in enumerate(all_points): + # Test using (x, y) of the current point. + if cv2.pointPolygonTest(contour, (pt[0], pt[1]), False) >= 0: + indices_inside.append(i) + + print("DEBUG: Number of points inside cropping polygon:", len(indices_inside)) + cropped_pcd = pcd.select_by_index(indices_inside) + return cropped_pcd + +def pick_calibration_points(pcd): + """ + Use Open3D's VisualizerWithEditing to interactively pick calibration points + from the (possibly cropped) point cloud. + + The user should select the key feature points (e.g., corners of the calibration target). + """ + print("Calibration point selection:") + print(" Use shift+click to pick the feature points (e.g., corners) in the point cloud.") + print(" Then press 'q' or close the window.") + + vis = o3d.visualization.VisualizerWithEditing() + vis.create_window(window_name="Pick Calibration Points", width=800, height=600) + vis.add_geometry(pcd) + vis.run() + vis.destroy_window() + + picked_indices = vis.get_picked_points() + print("DEBUG: Calibration picked indices:", picked_indices) + if not picked_indices: + print("No calibration points were selected. Exiting.") + exit(1) + selected_points = np.asarray(pcd.points)[picked_indices, :] + return selected_points + +def main(args): + # Load LiDAR scan from file. + lidar_points = load_lidar_scan(args.lidar_npz) + print(f"Loaded LiDAR scan with {lidar_points.shape[0]} points from {args.lidar_npz}") + + # Create an Open3D point cloud. + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(lidar_points) + + # Optional cropping: if the full scan is large, crop to the region containing your target. + crop_response = input("Do you want to crop the point cloud? (y/n): ").strip().lower() + if crop_response == 'y': + pcd = crop_point_cloud(pcd) + print(f"After cropping, point cloud has {np.asarray(pcd.points).shape[0]} points.") + o3d.visualization.draw_geometries([pcd], window_name="Cropped Point Cloud") + + # Pick calibration points from the (possibly cropped) point cloud. + cal_points = pick_calibration_points(pcd) + if cal_points.shape[0] < 3: + print("Error: You must select at least 3 points for calibration.") + exit(1) + print("Selected LiDAR calibration points:") + print(cal_points) + + # Save selected calibration points to file. + output_path = args.output + np.save(output_path, cal_points) + print(f"Calibration points saved to {output_path}") + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Interactively pick LiDAR calibration points (with optional cropping) from a captured LiDAR scan." + ) + parser.add_argument("--lidar_npz", type=str, required=True, + help="Path to the LiDAR npz file (from the paired capture)") + parser.add_argument("--output", type=str, default="lidar_cal_points.npy", + help="Output filename for saving the picked calibration points") + args = parser.parse_args() + main(args) diff --git a/GEMstack/offboard/calibration/sensor_fusion.py b/GEMstack/offboard/calibration/sensor_fusion.py new file mode 100644 index 000000000..216f1b162 --- /dev/null +++ b/GEMstack/offboard/calibration/sensor_fusion.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 +import cv2 +import numpy as np +import json +import argparse + +def load_extrinsics(extrinsics_file): + """ + Load calibrated extrinsics from a .npz file. + Assumes the file contains keys 'R' and 't'. + """ + data = np.load(extrinsics_file) + R = data['R'] + t = data['t'] + return R, t + +def load_intrinsics(intrinsics_file): + """ + Load camera intrinsics from a JSON file. + Expects keys: 'fx', 'fy', 'cx', and 'cy'. + """ + with open(intrinsics_file, 'r') as f: + intrinsics = json.load(f) + fx = intrinsics["fx"] + fy = intrinsics["fy"] + cx = intrinsics["cx"] + cy = intrinsics["cy"] + K = np.array([[fx, 0, cx], + [0, fy, cy], + [0, 0, 1]], dtype=np.float32) + return K + +def load_lidar_scan(lidar_file): + """ + Load a LiDAR scan from a file. + This function handles both npy (returns a NumPy array) and npz files. + """ + data = np.load(lidar_file, allow_pickle=True) + if isinstance(data, np.ndarray): + return data.astype(np.float32) + else: + key = list(data.keys())[0] + return data[key].astype(np.float32) + +def transform_lidar_points(lidar_points, R, t): + """ + Transform LiDAR points from the LiDAR frame into the camera frame. + p_cam = R * p_lidar + t. + """ + P_cam = (R @ lidar_points.T + t.reshape(3,1)).T + return P_cam + +def project_points(points_3d, K): + """ + Project 3D points (in the camera frame) into 2D image coordinates using the camera matrix K. + Only projects points with z > 0. + """ + proj_points = [] + for pt in points_3d: + if pt[2] > 0: # only project points in front of the camera + u = K[0, 0] * (pt[0] / pt[2]) + K[0, 2] + v = K[1, 1] * (pt[1] / pt[2]) + K[1, 2] + proj_points.append((int(u), int(v))) + return proj_points + +def main(args): + # Load extrinsics (R and t) + R, t = load_extrinsics(args.extrinsics) + print("Loaded extrinsics") + + # Load camera intrinsics. + K = load_intrinsics(args.intrinsics) + print("Loaded intrinsics") + + # Load the LiDAR scan (e.g., calibration points or full scan) + lidar_points = load_lidar_scan(args.lidar_scan) + print("Loaded LiDAR scan with {} points".format(lidar_points.shape[0])) + + # Transform LiDAR points into the camera coordinate frame. + lidar_in_camera = transform_lidar_points(lidar_points, R, t) + + # Project the transformed points into the image plane. + projected_pts = project_points(lidar_in_camera, K) + + # Load the camera image. + image = cv2.imread(args.image) + if image is None: + print("Error: could not load image", args.image) + return + + # Draw projected LiDAR points on the image. + for pt in projected_pts: + cv2.circle(image, pt, 2, (0, 0, 255), -1) + + # Display the resulting image. + cv2.imshow("LiDAR Points Projected onto Image", image) + cv2.waitKey(0) + cv2.destroyAllWindows() + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description="Sensor Fusion: Project LiDAR points into the camera image using extrinsic calibration.") + parser.add_argument("--extrinsics", type=str, required=True, + help="Path to extrinsics file (e.g., extrinsics.npz)") + parser.add_argument("--intrinsics", type=str, required=True, + help="Path to camera intrinsics JSON file (e.g., camera_intrinsics.json)") + parser.add_argument("--lidar_scan", type=str, required=True, + help="Path to the LiDAR scan file (e.g., lidar_cal_points.npy)") + parser.add_argument("--image", type=str, required=True, + help="Path to the camera image file (e.g., image_0.png)") + args = parser.parse_args() + main(args) diff --git a/GEMstack/onboard/execution/entrypoint.py b/GEMstack/onboard/execution/entrypoint.py index b1e7a8d10..39606ff91 100644 --- a/GEMstack/onboard/execution/entrypoint.py +++ b/GEMstack/onboard/execution/entrypoint.py @@ -98,6 +98,7 @@ def caution_callback(k,variant): logmeta = config.load_config_recursive(os.path.join(logfolder,'meta.yaml')) replay_topics = replay_settings.get('ros_topics',[]) + mission_executor.replay_topics(replay_topics,logfolder) #TODO: launch a roslog replay of the topics in ros_topics, disable in the vehicle interface for (name,s) in pipeline_settings.items(): diff --git a/GEMstack/onboard/execution/execution.py b/GEMstack/onboard/execution/execution.py index a65c2acc5..161138ec9 100644 --- a/GEMstack/onboard/execution/execution.py +++ b/GEMstack/onboard/execution/execution.py @@ -163,7 +163,7 @@ def validate_components(components : Dict[str,ComponentExecutor], provided : Lis else: assert provided_all or i in provided, "Component {} input {} is not provided by previous components".format(k,i) if i not in state: - executor_debug_print("Component {} input {} does not exist in AllState object",k,i) + executor_debug_print(0,"Component {} input {} does not exist in AllState object",k,i) if possible_inputs != ['all']: assert i in possible_inputs, "Component {} is not supposed to receive input {}".format(k,i) outputs = c.c.state_outputs() @@ -176,7 +176,7 @@ def validate_components(components : Dict[str,ComponentExecutor], provided : Lis if 'all' != o: provided.add(o) if o not in state: - executor_debug_print("Component {} output {} does not exist in AllState object",k,o) + executor_debug_print(0,"Component {} output {} does not exist in AllState object",k,o) else: provided_all = True for k,c in components.items(): @@ -389,7 +389,7 @@ def make_component(self, config_info, component_name, parent_module=None, extra_ raise if not isinstance(component,Component): raise RuntimeError("Component {} is not a subclass of Component".format(component_name)) - replacement = self.logging_manager.component_replayer(component_name, component) + replacement = self.logging_manager.component_replayer(self.vehicle_interface, component_name, component) if replacement is not None: executor_debug_print(1,"Replaying component {} from long {} with outputs {}",component_name,replacement.logfn,component.state_outputs()) component = replacement @@ -455,6 +455,14 @@ def replay_components(self, replayed_components : list, replay_folder : str): LogReplay objects. """ self.logging_manager.replay_components(replayed_components,replay_folder) + + def replay_topics(self, replayed_topics : list, replay_folder : str): + """Declare that the given components should be replayed from a log folder. + + Further make_component calls to this component will be replaced with + LogReplay objects. + """ + self.logging_manager.replay_topics(replayed_topics,replay_folder) def event(self,event_description : str, event_print_string : str = None): """Logs an event to the metadata and prints a message to the console.""" @@ -494,7 +502,7 @@ def run(self): #start running components for k,c in self.all_components.items(): c.start() - + #start running mission self.state = AllState.zero() self.state.mission.type = MissionEnum.IDLE @@ -559,7 +567,7 @@ def run(self): for k,c in self.all_components.items(): executor_debug_print(2,"Stopping",k) c.stop() - + self.logging_manager.close() executor_debug_print(0,"Done with execution loop") @@ -630,7 +638,7 @@ def run_until_switch(self): """Runs a pipeline until a switch is requested.""" if self.current_pipeline == 'recovery': self.state.mission.type = MissionEnum.RECOVERY_STOP - + (perception_components,planning_components,other_components) = self.pipelines[self.current_pipeline] components = list(perception_components.values()) + list(planning_components.values()) + list(other_components.values()) + list(self.always_run_components.values()) dt_min = min([c.dt for c in components if c.dt != 0.0]) @@ -639,6 +647,9 @@ def run_until_switch(self): self.state.t = self.vehicle_interface.time() self.logging_manager.set_vehicle_time(self.state.t) self.last_loop_time = time.time() + #publish ros topics + if(self.logging_manager.rosbag_player): + self.logging_manager.rosbag_player.update_topics(self.state.t) #check for vehicle faults self.check_for_hardware_faults() diff --git a/GEMstack/onboard/execution/logging.py b/GEMstack/onboard/execution/logging.py index 31ebd962b..99c107be5 100644 --- a/GEMstack/onboard/execution/logging.py +++ b/GEMstack/onboard/execution/logging.py @@ -8,6 +8,11 @@ import subprocess import numpy as np import cv2 +import requests +from msal import PublicClientApplication +import json +import rosbag +import rospy class LoggingManager: """A top level manager of the logging process. This is responsible for @@ -16,7 +21,12 @@ class LoggingManager: def __init__(self): self.log_folder = None # type: Optional[str] self.replayed_components = dict() # type Dict[str,str] + self.replayed_topics = dict() # type Dict[str,str] + self.rosbag_player = None + self.logged_components = set() # type: Set[str] + self.logged_topics = set() # type: Set[str] + self.component_output_loggers = dict() # type: Dict[str,list] self.behavior_log = None self.rosbag_process = None @@ -27,6 +37,7 @@ def __init__(self): self.vehicle_time = None self.start_vehicle_time = None self.debug_messages = {} + self.onedrive_manager = None def logging(self) -> bool: return self.log_folder is not None @@ -74,16 +85,40 @@ def replay_components(self, replayed_components : list, replay_folder : str): if c not in logged_components: raise ValueError("Replay component",c,"was not logged in",replay_folder,"(see settings.yaml)") self.replayed_components[c] = replay_folder + + def replay_topics(self, replayed_topics : list, replay_folder : str): + """Declare that the given components should be replayed from a log folder. + + Further make_component calls to this component will be replaced with + BagReplay objects. + """ + #sanity check: was this item logged? + settings = config.load_config_recursive(os.path.join(replay_folder,'settings.yaml')) + try: + logged_topics = settings['run']['log']['ros_topics'] + except KeyError: + logged_topics = [] + for c in replayed_topics: + if c not in logged_topics: + raise ValueError("Replay topic",c,"was not logged in",replay_folder,"'s vehicle.bag file (see settings.yaml)") + self.replayed_topics[c] = replay_folder + if(not self.rosbag_player): + self.rosbag_player = RosbagPlayer(replay_folder, self.replayed_topics) + + + + + - def component_replayer(self, component_name : str, component : Component) -> Optional[LogReplay]: + def component_replayer(self, vehicle_interface, component_name : str, component : Component) -> Optional[LogReplay]: if component_name in self.replayed_components: #replace behavior of class with the LogReplay class replay_folder = self.replayed_components[component_name] outputs = component.state_outputs() rate = component.rate() assert rate is not None and rate > 0, "Replayed component {} must have a positive rate".format(component_name) - return LogReplay(outputs, - os.path.join(replay_folder,'behavior_log.json'), + return LogReplay(vehicle_interface, outputs, + os.path.join(replay_folder,'behavior.json'), rate=rate) return None @@ -207,7 +242,7 @@ def dump_debug(self): else: isevent[col] = True f.write(','.join(columns)+'\n') - nrows = max(len(v[col]) for col in v) + nrows = max((len(v[col]) for col in v), default=0) for i in range(nrows): row = [] for col,vals in v.items(): @@ -261,8 +296,10 @@ def log_component_stderr(self, component : str, msg : List[str]) -> None: timestr = datetime.datetime.fromtimestamp(self.vehicle_time).strftime("%H:%M:%S.%f")[:-3] for l in msg: self.component_output_loggers[component][1].write(timestr + ': ' + l + '\n') - + def close(self): + + self.dump_debug() self.debug_messages = {} if self.rosbag_process is not None: @@ -276,10 +313,73 @@ def close(self): print('Log file size in MegaBytes is {}'.format(loginfo.st_size / (1024 * 1024))) print('-------------------------------------------') self.rosbag_process = None + + record_bag = input("Do you want to upload this Rosbag? Y/N (default: Y): ") or "Y" + if(record_bag not in ["N", "no", "n", "No"]): + self.onedrive_manager = OneDriveManager() + self.onedrive_manager.upload_to_onedrive(self.log_folder) + + def __del__(self): self.close() +class OneDriveManager(): + def __init__(self): + self.config_found = False + try: + with open("onedrive_config.json", "r") as f: + config = json.load(f) + self.CLIENT_ID = config.get("CLIENT_ID") + self.TENANT_ID = config.get("TENANT_ID") + self.DRIVE_ID = config.get("DRIVE_ID") + self.ITEM_ID = config.get("ITEM_ID") + self.credentials = True + + except Exception as e: + print("No Onedrive config file found") + + + def upload_to_onedrive(self, log_folder): + + + AUTHORITY = f'https://login.microsoftonline.com/{self.TENANT_ID}' + SCOPES = ['Files.ReadWrite.All'] + + app = PublicClientApplication(self.CLIENT_ID, authority=AUTHORITY) + accounts = app.get_accounts() + + print("Opening Authentication Window") + + if accounts: + result = app.acquire_token_silent(SCOPES, account=accounts[0]) + else: + + result = app.acquire_token_interactive(SCOPES) + + if 'access_token' in result: + access_token = result['access_token'] + headers = { + 'Authorization': f'Bearer {access_token}', + 'Content-Type': 'application/octet-stream' + } + file_path = os.path.join(log_folder, 'vehicle.bag') + file_name = log_folder[5:]+ "_" + os.path.basename(file_path) + upload_url = ( + f'https://graph.microsoft.com/v1.0/drives/{self.DRIVE_ID}/items/' + f'{self.ITEM_ID}:/{file_name}:/content' + ) + + with open(file_path, 'rb') as file: + response = requests.put(upload_url, headers=headers, data=file) + + if response.status_code == 201 or response.status_code == 200: + print(f"✅ Successfully uploaded '{file_name}' to OneDrive.") + else: + print(f"❌ Upload failed: {response.status_code} - {response.text}") + else: + print("❌ Authentication failed.") + class LogReplay(Component): """Substitutes the output of a component with replayed data from a log file. @@ -334,6 +434,49 @@ def cleanup(self): self.logfile.close() +class RosbagPlayer: + ''' + Class which manages Ros bag replay. Note that this does not work unless the executor is running + ''' + def __init__(self, bag_path, topics): + self.bag = rosbag.Bag(os.path.join(bag_path, 'vehicle.bag'), 'r') + self.current_time = None + self.offset = -1 + + self.publishers = {} + for topic, msg, _ in self.bag.read_messages(): + if topic in topics and topic not in self.publishers: + msg_type = type(msg) + self.publishers[topic] = rospy.Publisher(topic, msg_type, queue_size=10) + rospy.loginfo(f"Created publisher for topic: {topic}") + print(f"Created publisher for topic: {topic}") + + + def update_topics(self, target_timestamp): + """ + Plays from the current position in the bag to the target timestamp. + :param target_timestamp: gem stack time to play until. + Will remember the currenttime and only play from current time to the target timestamp + """ + if self.offset <0: + self.offset = target_timestamp - self.bag.get_start_time() + if self.current_time is None: + self.current_time = self.bag.get_start_time() + + first_message = True + for topic, msg, t in self.bag.read_messages(start_time=rospy.Time(self.current_time )): + if t.to_sec() + self.offset > target_timestamp: + break # Stop when reaching the target time + if first_message and t.to_sec() == self.current_time: + first_message = False + continue + if topic in self.publishers: + self.publishers[topic].publish(msg) + + self.current_time = t.to_sec() + + def close(self): + self.bag.close() class VehicleBehaviorLogger(Component): def __init__(self,behavior_log, vehicle_interface): diff --git a/GEMstack/onboard/interface/gem_hardware.py b/GEMstack/onboard/interface/gem_hardware.py index e13ff817e..6a326a28f 100644 --- a/GEMstack/onboard/interface/gem_hardware.py +++ b/GEMstack/onboard/interface/gem_hardware.py @@ -1,11 +1,13 @@ from .gem import * from ...utils import settings import math +import time # ROS Headers import rospy from std_msgs.msg import String, Bool, Float32, Float64 from sensor_msgs.msg import Image,PointCloud2 +import message_filters try: from novatel_gps_msgs.msg import NovatelPosition, NovatelXYZ, Inspva except ImportError: @@ -26,6 +28,8 @@ import numpy as np from ...utils import conversions +import time + class GEMHardwareInterface(GEMInterface): """Interface for connnecting to the physical GEM e2 vehicle.""" def __init__(self): @@ -55,6 +59,7 @@ def __init__(self): self.front_depth_sub = None self.top_lidar_sub = None self.stereo_sub = None + self.sync = None self.faults = [] # -------------------- PACMod setup -------------------- @@ -151,6 +156,7 @@ def subscribe_sensor(self, name, callback, type = None): if name == 'gnss': topic = self.ros_sensor_topics[name] if topic.endswith('inspva'): + #GEM e2 uses Novatel GNSS if type is not None and (type is not Inspva and type is not GNSSReading): raise ValueError("GEMHardwareInterface GEM e2 only supports Inspva/GNSSReading for GNSS") if type is Inspva: @@ -169,7 +175,7 @@ def callback_with_gnss_reading(inspva_msg: Inspva): callback(GNSSReading(pose,speed,inspva_msg.status)) self.gnss_sub = rospy.Subscriber(topic, Inspva, callback_with_gnss_reading) else: - #assume it's septentrio + #assume it's septentrio on GEM e4 if type is not None and (type is not INSNavGeod and type is not GNSSReading): raise ValueError("GEMHardwareInterface GEM e4 only supports INSNavGeod/GNSSReading for GNSS") if type is INSNavGeod: @@ -177,8 +183,9 @@ def callback_with_gnss_reading(inspva_msg: Inspva): else: def callback_with_gnss_reading(msg: INSNavGeod): pose = ObjectPose(ObjectFrameEnum.GLOBAL, - x=msg.longitude, - y=msg.latitude, + t=self.time(), + x=math.degrees(msg.longitude), #Septentrio GNSS uses radians rather than degrees + y=math.degrees(msg.latitude), z=msg.height, yaw=math.radians(msg.heading), #heading from north in degrees (TODO: maybe?? check this) roll=math.radians(msg.roll), @@ -227,6 +234,40 @@ def callback_with_cv2(msg : Image): cv_image = conversions.ros_Image_to_cv2(msg, desired_encoding="passthrough") callback(cv_image) self.front_depth_sub = rospy.Subscriber(topic, Image, callback_with_cv2) + elif name == 'sensor_fusion_Lidar_Camera_GNSS': + def callback_with_cv2_numpy_gnss(rgb_image_msg: Image, lidar_pc2_msg: PointCloud2, gnss_msg: INSNavGeod): + points = conversions.ros_PointCloud2_to_numpy(lidar_pc2_msg, want_rgb=False) + cv_image = conversions.ros_Image_to_cv2(rgb_image_msg, desired_encoding="bgr8") + pose = ObjectPose(ObjectFrameEnum.GLOBAL, + t = 0, + x=gnss_msg.longitude, + y=gnss_msg.latitude, + z=gnss_msg.height, + yaw=math.radians(gnss_msg.heading), #heading from north in degrees (TODO: maybe?? check this) + roll=math.radians(gnss_msg.roll), + pitch=math.radians(gnss_msg.pitch), + ) + speed = np.sqrt(gnss_msg.ve**2 + gnss_msg.vn**2) + callback(cv_image,points,GNSSReading(pose,speed,('error' if gnss_msg.error else 'ok'))) + topic_camera = self.ros_sensor_topics['front_camera'] + topic_lidar = self.ros_sensor_topics['top_lidar'] + topic_gnss = self.ros_sensor_topics['gnss'] + self.front_camera_sub = message_filters.Subscriber(topic_camera, Image) + self.top_lidar_sub = message_filters.Subscriber(topic_lidar, PointCloud2) + self.gnss_sub = message_filters.Subscriber(topic_gnss, INSNavGeod) + self.sync = message_filters.ApproximateTimeSynchronizer([self.front_camera_sub, self.top_lidar_sub,self.gnss_sub], queue_size=10, slop=0.1) + self.sync.registerCallback(callback_with_cv2_numpy_gnss) + elif name == 'sensor_fusion_Lidar_Camera': + def callback_with_cv2_numpy(rgb_image_msg: Image, lidar_pc2_msg: PointCloud2): + points = conversions.ros_PointCloud2_to_numpy(lidar_pc2_msg, want_rgb=False) + cv_image = conversions.ros_Image_to_cv2(rgb_image_msg, desired_encoding="bgr8") + callback(cv_image,points) + topic_camera = self.ros_sensor_topics['front_camera'] + topic_lidar = self.ros_sensor_topics['top_lidar'] + self.front_camera_sub = message_filters.Subscriber(topic_camera, Image) + self.top_lidar_sub = message_filters.Subscriber(topic_lidar, PointCloud2) + self.sync = message_filters.ApproximateTimeSynchronizer([self.front_camera_sub, self.top_lidar_sub], queue_size=10, slop=0.1) + self.sync.registerCallback(callback_with_cv2_numpy) # PACMod enable callback function diff --git a/GEMstack/onboard/perception/IdTracker.py b/GEMstack/onboard/perception/IdTracker.py new file mode 100644 index 000000000..15641fa00 --- /dev/null +++ b/GEMstack/onboard/perception/IdTracker.py @@ -0,0 +1,12 @@ +class IdTracker(): + """Abstracts out id tracking + """ + def __init__(self): + self.__id = 0 + + def get_new_id(self) -> int: + """Returns a unique agent id + """ + assigned_id = self.__id + self.__id += 1 # id will intentionally overflow to get back to 0 + return assigned_id diff --git a/GEMstack/onboard/perception/agent_detection.py b/GEMstack/onboard/perception/agent_detection.py index 5d600f792..4b17d10f3 100644 --- a/GEMstack/onboard/perception/agent_detection.py +++ b/GEMstack/onboard/perception/agent_detection.py @@ -5,29 +5,52 @@ import threading import copy +import numpy as np + class OmniscientAgentDetector(Component): """Obtains agent detections from a simulator""" - def __init__(self,vehicle_interface : GEMInterface): + + def __init__(self, vehicle_interface: GEMInterface): self.vehicle_interface = vehicle_interface self.agents = {} self.lock = threading.Lock() + self.start_pose = None + def rate(self): return 4.0 - + def state_inputs(self): - return [] - + return ['vehicle'] + def state_outputs(self): return ['agents'] def initialize(self): - self.vehicle_interface.subscribe_sensor('agent_detector',self.agent_callback, AgentState) - - def agent_callback(self, name : str, agent : AgentState): + self.vehicle_interface.subscribe_sensor('agent_detector', self.agent_callback, AgentState) + + def agent_callback(self, name: str, agent: AgentState): with self.lock: self.agents[name] = agent - def update(self) -> Dict[str,AgentState]: + def update(self, vehicle : VehicleState) -> Dict[str, AgentState]: with self.lock: - return copy.deepcopy(self.agents) + res = {} + ped_num = 0 + + if self.start_pose is None: + self.start_pose = vehicle.pose + # print("\nVehicle start state self.start_pose:", self.start_pose) + # print("\nVehicle current state:", vehicle.pose, vehicle.v) + + for n, a in self.agents.items(): + # print("\nBefore to_frame: Agent:", n, a.pose, a.velocity) + a = a.to_frame(ObjectFrameEnum.START, current_pose = a.pose, start_pose_abs = self.start_pose) + # print("\nAfter to_frame START: Agent:", n, a.pose, a.velocity) + # print('==============', a.pose.frame==ObjectFrameEnum.START) + res[n] = a + if a.type == AgentEnum.PEDESTRIAN: + ped_num += 1 + if ped_num > 0: + print("\nDetected", ped_num, "pedestrians") + return res diff --git a/GEMstack/onboard/perception/calibration/camera_intrinsics.json b/GEMstack/onboard/perception/calibration/camera_intrinsics.json new file mode 100644 index 000000000..74318680c --- /dev/null +++ b/GEMstack/onboard/perception/calibration/camera_intrinsics.json @@ -0,0 +1,7 @@ +{ + "fx": 684.8333129882812, + "fy": 684.6096801757812, + "cx": 573.37109375, + "cy": 363.700927734375 + } + \ No newline at end of file diff --git a/GEMstack/onboard/perception/calibration/extrinsics/R.npy b/GEMstack/onboard/perception/calibration/extrinsics/R.npy new file mode 100644 index 000000000..7d44aa76d Binary files /dev/null and b/GEMstack/onboard/perception/calibration/extrinsics/R.npy differ diff --git a/GEMstack/onboard/perception/calibration/extrinsics/rvec.npy b/GEMstack/onboard/perception/calibration/extrinsics/rvec.npy new file mode 100644 index 000000000..62d1809f1 Binary files /dev/null and b/GEMstack/onboard/perception/calibration/extrinsics/rvec.npy differ diff --git a/GEMstack/onboard/perception/calibration/extrinsics/t.npy b/GEMstack/onboard/perception/calibration/extrinsics/t.npy new file mode 100644 index 000000000..b133fa55f Binary files /dev/null and b/GEMstack/onboard/perception/calibration/extrinsics/t.npy differ diff --git a/GEMstack/onboard/perception/pedestrian_detection.py b/GEMstack/onboard/perception/pedestrian_detection.py new file mode 100644 index 000000000..8a3125ecd --- /dev/null +++ b/GEMstack/onboard/perception/pedestrian_detection.py @@ -0,0 +1,627 @@ +""" +Top ouster lidar + Oak front camera fusion, object detection +""" +""" +Terminal 1: +--------------- +source /opt/ros/noetic/setup.bash && source /catkin_ws/devel/setup.bash +roscore + +Terminal 2: +--------------- +source /opt/ros/noetic/setup.bash && source /catkin_ws/devel/setup.bash +python3 GEMstack/onboard/perception/transform.py + +Terminal 3: +--------------- +source /opt/ros/noetic/setup.bash && source /catkin_ws/devel/setup.bash +rosbag play -l ~/rosbags/moving_vehicle_stopping_pedestrian.bag --topics /oak/rgb/camera_info /oak/rgb/image_raw /ouster/points /septentrio_gnss/imu /septentrio_gnss/insnavgeod + +Terminal 4: +--------------- +source /opt/ros/noetic/setup.bash && source /catkin_ws/devel/setup.bash +cd GEMStack +python3 main.py --variant=detector_only launch/pedestrian_detection.yaml + +Terminal 5: +--------------- +source /opt/ros/noetic/setup.bash && source /catkin_ws/devel/setup.bash +rviz +""" + +# Python +import os +from typing import List, Dict +from collections import defaultdict +# from datetime import datetime +from copy import deepcopy +# ROS, CV +import rospy +import tf +from cv_bridge import CvBridge +from sensor_msgs.msg import Image, PointCloud2 +from visualization_msgs.msg import MarkerArray +# YOLO +from ultralytics import YOLO +from ultralytics.engine.results import Results, Boxes +# GEMStack +from ...state import AllState,VehicleState,ObjectPose,ObjectFrameEnum,AgentState,AgentEnum,AgentActivityEnum,ObjectFrameEnum +from .pedestrian_detection_utils import * +from ..interface.gem import GEMInterface, GNSSReading +from ..component import Component +from .IdTracker import IdTracker +from scipy.spatial.transform import Rotation as R + + +def box_to_fake_agent(box): + """Creates a fake agent state from an (x,y,w,h) bounding box. + + The location and size are pretty much meaningless since this is just giving a 2D location. + """ + x,y,w,h = box + pose = ObjectPose(t=0,x=x+w/2,y=y+h/2,z=0,yaw=0,pitch=0,roll=0,frame=ObjectFrameEnum.CURRENT) + dims = (w,h,0) + return AgentState(pose=pose,dimensions=dims,outline=None,type=AgentEnum.PEDESTRIAN,activity=AgentActivityEnum.MOVING,velocity=(0,0,0),yaw_rate=0) + + +class PedestrianDetector2D(Component): + # TODO: Pull params into a JSON/yaml + # TODO: Convert some lists into np.arrays, vectorize calculations + # TODO: Implement logging instead of print, cleanup comments + # TODO: Cleanup funcs + split into separate classes + # TODO: Decide if we want to name dets "peds" or "objs"/"agents" + # Maybe peds for now and Agents in agent_detection.py? + def __init__(self, vehicle_interface : GEMInterface) -> None: + self.vehicle_interface = vehicle_interface + # Publish debug/viz rostopics if true + self.debug = True + # Setup variables + self.bridge = CvBridge() + # TODO: Wrap detector into GEMDetector? + self.detector = YOLO(os.getcwd()+'/GEMstack/knowledge/detection/yolov8n.pt') + # track_id: AgentState + self.prev_agents = dict() + self.current_agents = dict() + self.current_agent_obj_dims = dict() + # TODO Implement time + self.confidence = 0.7 + self.classes_to_detect = 0 + self.ground_threshold = -2.0 + self.max_human_depth = 0.9 + self.vehicle_frame = True # Indicate whether pedestrians centroids and point clouds are in the vehicle frame + + # Load calibration data + # TODO: Maybe lets add one word or link what R t K are? + self.R_lidar_to_oak = load_extrinsics(os.getcwd() + '/GEMstack/onboard/perception/calibration/extrinsics/R.npy') + self.t_lidar_to_oak = load_extrinsics(os.getcwd() + '/GEMstack/onboard/perception/calibration/extrinsics/t.npy') + self.K = load_intrinsics(os.getcwd() + '/GEMstack/onboard/perception/calibration/camera_intrinsics.json') + + self.R_lidar_to_vehicle = np.array([[0.9995121, 0.02132941, 0.02281911], + [-0.02124771, 0.99976695, -0.00381707], + [-0.02289521, 0.00333035, 0.9997323]]) + self.t_lidar_to_vehicle = np.array([[0.0], [0.0], [0.35]]) + + # To calculate vehicle to start frame data + self.t_start_to_world = None + self.t_vehicle_to_world = None + self.t_vehicle_to_start = None + + # GEMStack Subscribers and sychronizers + # LIDAR Camera fusion + self.vehicle_interface.subscribe_sensor('sensor_fusion_Lidar_Camera',self.ouster_oak_callback) + + # TF listener to get transformation from LiDAR to Camera + self.tf_listener = tf.TransformListener() + + if self.debug: self.init_debug() + + self.prev_time = None # Time in seconds since last scan for basic velocity calculation + self.curr_time = None # Time in seconds of current scan for basic velocity calculation + self.id_tracker = IdTracker() + + # Update function variables + self.t_start = None # Estimated start frame time + self.start_pose_abs = None # Get this from GNSS (GLOBAL frame) + self.current_vehicle_state = None # Current vehicle state from GNSS in GLOBAL frame + self.previous_vehicle_state = None # Previous vehicle state from GNSS in GLOBAL frame + + def init_debug(self,) -> None: + # Debug Publishers + self.pub_pedestrians_pc2 = rospy.Publisher("/point_cloud/pedestrians", PointCloud2, queue_size=10) + self.pub_obj_centers_pc2 = rospy.Publisher("/point_cloud/obj_centers", PointCloud2, queue_size=10) + self.pub_bboxes_markers = rospy.Publisher("/markers/bboxes", MarkerArray, queue_size=10) + self.pub_image = rospy.Publisher("/camera/image_detection", Image, queue_size=1) + + def update(self, vehicle : VehicleState) -> Dict[str,AgentState]: + # Initial vehicle pose data + if(self.current_vehicle_state == None and self.previous_vehicle_state == None): + self.current_vehicle_state = vehicle + # We get vehicle state from GNSS in global state + # Storing initial pose + if (self.start_pose_abs == None): + self.start_pose_abs = vehicle.pose + if self.t_start == None: + self.t_start = vehicle.pose.t + return self.current_agents + else: + self.previous_vehicle_state = self.current_vehicle_state + self.current_vehicle_state = vehicle + + # Update times for basic velocity calculation + self.prev_time = self.curr_time + self.curr_time = self.current_vehicle_state.pose.t + + # Edge Cases: + + # edge case to handle no pedestrian data + if(self.current_agent_obj_dims == {}): + return {} + + # edge case to handle empty lists: + if(len(self.current_agent_obj_dims['pose']) == 0 or len(self.current_agent_obj_dims['dims']) == 0): + return {} + + # Edge case to handle differently sized lists: + # TODO: Handle different lengths properly + if len(self.current_agent_obj_dims['pose']) != len(self.current_agent_obj_dims['dims']): + raise Exception( f"Length of extracted poses ({len(self.current_agent_obj_dims['pose'])}) and dimensions ({len(self.current_agent_obj_dims['dims'])}) are not equal") + + # (f"Global state : {vehicle}") + + # Convert pose to start state. Need to use previous_vehicle state as pedestrian info is delayed + vehicle_start_pose = vehicle.pose.to_frame(ObjectFrameEnum.START,self.previous_vehicle_state.pose,self.start_pose_abs) + + # converting to start frame + # for dim, pose in zip(self.current_agent_obj_dims['dims'], self.current_agent_obj_dims['pose']): + # print("DIM: ", type(dim), dim) + # print("POSE: ", type(pose), pose) + self.current_agent_obj_dims['pose'] = [np.array(vehicle_start_pose.apply(pose)) for pose in self.current_agent_obj_dims['pose']] + temp_dims = list() + for dim in self.current_agent_obj_dims['dims']: + temp_dims.append(np.array([vehicle_start_pose.apply(corner) for corner in dim])) + self.current_agent_obj_dims['dims'] = temp_dims + + # Prepare variables for find_vels_and_ids + self.prev_agents = deepcopy(self.current_agents) + self.current_agents.clear() + # Note this below function will probably throw a type error. I think we'll need to index the + # tuples by index 0 in the lists but I'm not sure: + agents = self.create_agent_states(obj_centers=self.current_agent_obj_dims['pose'], obj_dims=self.current_agent_obj_dims['dims']) + + # Calculating the velocity of agents and tracking their ids + backOrder = self.find_vels_and_ids(agents=agents) + + # Create a dictionary of vehicle frame agents: + vehicle_agents = self.create_vehicle_dict(agents, backOrder) + # Create a list containing the ids of the new agents in the vehicle frame agent's order + + return vehicle_agents + + def create_vehicle_dict(self, agents: List[AgentState], backOrder: List[int]): + vehicle_agents = {} + + for idx in backOrder: + vehicle_agents[backOrder[idx]] = agents[idx] + vehicle_agents[backOrder[idx]].activity = self.current_agents[backOrder[idx]].activity + vehicle_agents[backOrder[idx]].velocity = self.current_agents[backOrder[idx]].velocity + + return vehicle_agents + + # TODO: Improve Algo Knn, ransac, etc. + def find_centers(self, clusters: List[List[np.ndarray]]) -> List[np.ndarray]: + clusters = [np.array(clust) for clust in clusters] + centers = [np.array(()) if clust.size == 0 else np.mean(clust, axis=0) for clust in clusters] + return centers + + # Beware: AgentState(PhysicalObject) builds bbox from + # dims [-l/2,l/2] x [-w/2,w/2] x [0,h], not + # [-l/2,l/2] x [-w/2,w/2] x [-h/2,h/2] + def find_dims(self, clusters: List[List[np.ndarray]]) -> List[np.ndarray]: + # Add some small constant to height to compensate for + # objects distance to ground we filtered from lidar, + # other heuristics to imrpove stability for find_ funcs ? + clusters = [np.array(clust) for clust in clusters] + # x, y, z 1 value + dims = [np.array(()) if clust.size == 0 else np.max(clust, axis= 0) - np.min(clust, axis= 0) for clust in clusters] + for i in range(len(dims)): + if dims[i].size == 0: continue + else: + # -dims[i]/2, dims[i]/2 + # 8 point bbox + dims[i] = np.vstack([[-dims[i][0]/2.0, -dims[i][1]/2.0, -dims[i][2]/2.0], + [-dims[i][0]/2.0, -dims[i][1]/2.0, dims[i][2]/2.0], + [-dims[i][0]/2.0, dims[i][1]/2.0, -dims[i][2]/2.0], + [-dims[i][0]/2.0, dims[i][1]/2.0, dims[i][2]/2.0], + [dims[i][0]/2.0, -dims[i][1]/2.0, -dims[i][2]/2.0], + [dims[i][0]/2.0, -dims[i][1]/2.0, dims[i][2]/2.0], + [dims[i][0]/2.0, dims[i][1]/2.0, -dims[i][2]/2.0], + [dims[i][0]/2.0, dims[i][1]/2.0, dims[i][2]/2.0]] + ) + + # Dims are 2 points: [ [min_x, min_y, min_z], [max_x, max_y, max_z] ] + # Need that for transform + return dims + + # TODO: Separate debug/viz class, bbox and 2d 3d points funcs + def viz_object_states(self, cv_image, boxes, extracted_pts_all): + # Extract 3D pedestrians points in lidar frame + # ** These are camera frame after transform_lidar_points, right? + # ** It was in camera frame before. I fixed it. Now they are in lidar frame! + pedestrians_3d_pts = [[] if len(extracted_pts) == 0 else list(extracted_pts[:, -3:]) for extracted_pts in extracted_pts_all] + + # Object center viz + obj_3d_obj_centers = list() + obj_3d_obj_dims = list() + for track_id, agent in self.current_agents.items(): + if agent.pose.x != None and agent.pose.y != None and agent.pose.z != None: + obj_3d_obj_centers.append((agent.pose.x, agent.pose.y, agent.pose.z)) # ** + if agent.dimensions != None and agent.dimensions[0] != None and agent.dimensions[1] != None and agent.dimensions[2] != None: + obj_3d_obj_dims.append(agent.dimensions) + + # Extract 2D pedestrians points and bbox in camera frame + extracted_2d_pts = [[] if len(extracted_pts) == 0 else list(extracted_pts[:, :2].astype(int)) for extracted_pts in extracted_pts_all] + flattened_pedestrians_2d_pts = list() + for pts in extracted_2d_pts: flattened_pedestrians_2d_pts.extend(pts) + + for ind, bbox in enumerate(boxes): + xywh = bbox.xywh[0].tolist() + cv_image = vis_2d_bbox(cv_image, xywh, bbox) + + flattened_pedestrians_3d_pts = list() + for pts in pedestrians_3d_pts: flattened_pedestrians_3d_pts.extend(pts) + + if len(flattened_pedestrians_3d_pts) > 0: + # Draw projected 2D LiDAR points on the image. + for pt in flattened_pedestrians_2d_pts: + cv2.circle(cv_image, pt, 2, (0, 0, 255), -1) + ros_img = self.bridge.cv2_to_imgmsg(cv_image, 'bgr8') + self.pub_image.publish(ros_img) + + # Draw 3D pedestrian pointclouds + if self.vehicle_frame: + # If in vehicle frame, tranform 3D pedestrians points to vehicle frame for better visualization. + flattened_pedestrians_3d_pts_vehicle = transform_lidar_points(np.array(flattened_pedestrians_3d_pts), self.R_lidar_to_vehicle, self.t_lidar_to_vehicle) + flattened_pedestrians_3d_pts = flattened_pedestrians_3d_pts_vehicle + + + + # Create point cloud from extracted 3D points + ros_extracted_pedestrian_pc2 = create_point_cloud(flattened_pedestrians_3d_pts) + self.pub_pedestrians_pc2.publish(ros_extracted_pedestrian_pc2) + + # Draw 3D pedestrian centers and dimensions + if len(obj_3d_obj_centers) > 0 and len(obj_3d_obj_dims) > 0: + # Draw 3D pedestrian center pointclouds + ros_obj_3d_obj_centers_pc2 = create_point_cloud(obj_3d_obj_centers, color=(0, 128, 0)) + self.pub_obj_centers_pc2.publish(ros_obj_3d_obj_centers_pc2) + + # Draw 3D pedestrian bboxes markers + # Create bbox marker from pedestrain dimensions + ros_delete_bboxes_markers = delete_bbox_marker() + self.pub_bboxes_markers.publish(ros_delete_bboxes_markers) + ros_pedestrians_bboxes_markers = create_bbox_marker(obj_3d_obj_centers, obj_3d_obj_dims) + self.pub_bboxes_markers.publish(ros_pedestrians_bboxes_markers) + + + def update_object_states(self, track_result: List[Results], extracted_pts_all: List[np.ndarray]) -> None: + # self.prev_agents = self.current_agents.copy() + # self.current_agents.clear() + + # self.current_agent_obj_dims.clear() + # Return if no track results available + if track_result[0].boxes.id == None: + return + + # Change pedestrians_3d_pts to dicts matching track_ids + + # Extract 3D pedestrians points in lidar frame + # ** These are camera frame after transform_lidar_points, right? + # ** It was in camera frame before. I fixed it. Now they are in lidar frame! + pedestrians_3d_pts = [[] if len(extracted_pts) == 0 else list(extracted_pts[:, -3:]) for extracted_pts in extracted_pts_all] + if len(pedestrians_3d_pts) != len(track_result[0].boxes): + raise Exception( f"Length of extracted pedestrian clusters ({len(pedestrians_3d_pts)}) and number of camera bboxes ({len(track_result[0].boxes)}) are not equal") + + + # TODO: Slower but cleaner to pass dicts of AgentState + # or at least {track_ids: centers/pts/etc} + # TODO: Combine funcs for efficiency in C. + # Separate numpy prob still faster for now + obj_centers = self.find_centers(pedestrians_3d_pts) # Centers are calculated in lidar frame here + obj_dims = self.find_dims(pedestrians_3d_pts) # 8 point dims of bounding box + # Pose is stored in vehicle frame and converted to start frame in the update function + obj_centers_vehicle = [] + obj_dims_vehicle = [] + if self.vehicle_frame: + for obj_center in obj_centers: + if obj_center.size > 0: + obj_center = np.array([obj_center]) + obj_center_vehicle = transform_lidar_points(obj_center, self.R_lidar_to_vehicle, self.t_lidar_to_vehicle)[0] + obj_centers_vehicle.append(obj_center_vehicle) + else: + obj_centers_vehicle.append(np.array(())) + obj_centers = obj_centers_vehicle + + if(len(obj_center) != 0) and (len(obj_dims) != 0): + for obj_dim in obj_dims: + if len(obj_dim) > 0: + # obj_dim_min = np.array([obj_dim[0]]) + # obj_dim_max = np.array([obj_dim[1]]) + # obj_dim_min_vehicle = transform_lidar_points(obj_dim_min, self.R_lidar_to_vehicle, self.t_lidar_to_vehicle)[0] + # obj_dim_max_vehicle = transform_lidar_points(obj_dim_max, self.R_lidar_to_vehicle, self.t_lidar_to_vehicle)[0] + # # Dims are 2 points: [ [min_x, min_y, min_z], [max_x, max_y, max_z] ] + # obj_dims_vehicle.append(np.vstack([obj_dim_min_vehicle, obj_dim_max_vehicle])) + obj_dim_vehicle = transform_lidar_points(obj_dim, self.R_lidar_to_vehicle, self.t_lidar_to_vehicle) + obj_dims_vehicle.append(obj_dim_vehicle) + else: obj_dims_vehicle.append(np.array(())) + obj_dims = obj_dims_vehicle + + self.current_agent_obj_dims["pose"] = obj_centers + self.current_agent_obj_dims["dims"] = obj_dims + + + # TODO: Refactor to make more efficient + # TODO: Moving Average across last N iterations pos/vel? Less spurious vals + def find_vels_and_ids(self, agents: List[AgentState]): + # self.prev_agents was assigned a deepcopy of self.current_agents and then self.current_agents + # was cleared before this function was called + + # Create a list containing the ids of the new agents in the vehicle frame agent's order + backOrder = [] + for each in agents: + backOrder.append(None) + + # Nothing was scanned, erase current (for current output) and previous list (for next time through because nothing was scanned) + if (len(agents) == 0): + self.current_agents = {} + return + + assigned = False + num_agents = len(agents) + + for idx in range(num_agents): + assigned = False + + # Loop through previous agents backwards + for prev_id, prev_state in reversed(self.prev_agents.items()): + # If a scanned agent overlaps with a previous agent, assume that they're the same agent + if self.agents_overlap(agents[idx], prev_state): + assigned = True + + if self.prev_time == None: + # This will be triggered if the very first message has pedestrians in it + vel = [0, 0, 0] + else: + delta_t = self.curr_time - self.prev_time + vel = list((np.array([agents[idx].pose.x, agents[idx].pose.y, agents[idx].pose.z]) - np.array([prev_state.pose.x, prev_state.pose.y, prev_state.pose.z])) / delta_t) + # print("VELOCITY:") + # print(vel) + + # Fix start frame agent and store in this dict: + # TODO: Use np.isclose + agents[idx].activity = AgentActivityEnum.STOPPED if (np.all(vel == 0) or len(vel) == 0) else AgentActivityEnum.MOVING + agents[idx].velocity = (0, 0, 0) if len(vel) == 0 else tuple(vel) + + self.current_agents[prev_id] = agents[idx] + backOrder[idx] = prev_id + del self.prev_agents[prev_id] # Remove previous agent from previous agents so it doesn't get assigned multiple times on accident + break + + if not assigned: + # Set velocity to 0 and assign the new agent a new id with IdTracker + id = self.id_tracker.get_new_id() + + # Fix start frame agent and store in this dict: + agents[idx].activity = AgentActivityEnum.UNDETERMINED + agents[idx].velocity = (0, 0, 0) + self.current_agents[id] = agents[idx] + backOrder[idx] = id + return backOrder + + # Calculates whether 2 agents overlap. True if they do, false if not + def agents_overlap(self, curr_agent: AgentState, prev_agent: AgentState) -> bool: + # Calculate corners of AgentState + # Beware: AgentState(PhysicalObject) builds bbox from + # dims [-l/2,l/2] x [-w/2,w/2] x [0,h], not + # [-l/2,l/2] x [-w/2,w/2] x [-h/2,h/2] + # TODO: confirm (z -> l, x -> w, y -> h) + + # Extract the corners of the agents: + (x1_min, x1_max), (y1_min, y1_max), (z1_min, z1_max) = curr_agent.bounds() # Bounds of current agent + (x2_min, x2_max), (y2_min, y2_max), (z2_min, z2_max) = prev_agent.bounds() # Bounds of previous agent + + # True if they overlap, false if not + return ( + ( (x1_min <= x2_min and x2_min <= x1_max) or (x2_min <= x1_min and x1_min <= x2_max) ) and + ( (y1_min <= y2_min and y2_min <= y1_max) or (y2_min <= y1_min and y1_min <= y2_max) ) and + ( (z1_min <= z2_min and z2_min <= z1_max) or (z2_min <= z1_min and z1_min <= z2_max) ) + ) + + def create_agent_states(self, obj_centers: List[np.ndarray], obj_dims: List[np.ndarray]) -> List[AgentState]: + # Dims are 2 points: [ [min_x, min_y, min_z], [max_x, max_y, max_z] ] + + # Gate to check whether dims and centers are empty (will happen if no pedestrians are scanned): + for idx in range(len(obj_dims) -1, -1, -1): + # print("type:", type(obj_centers[idx]), type(obj_dims[idx])) + if obj_centers[idx].size == 0 or obj_dims[idx].size == 0: + del obj_centers[idx] + del obj_dims[idx] + if (len(obj_centers) != len(obj_dims)): + raise Exception(f"Length of object centers ({len(obj_centers)}) and dimensions ({len(obj_dims)}) are not equal") + + # Convert from 2 point to 1 point dims + # obj_dims = [np.abs( dim[0] - dim[1]) for dim in obj_dims] + # elif (obj_centers[idx].size != obj_dims[0].size): + # del obj_centers[idx] + # del obj_dims[idx] + # Create list of agent states in current vehicle frame: + + xyz_lengths_start = [np.max(obj_dim, axis=0) - np.min(obj_dim, axis=0) for obj_dim in obj_dims] + xyz_lengths_start = [xyz_length.astype(float) for xyz_length in xyz_lengths_start] + agents = [] + num_pairings = len(obj_centers) + for idx in range(num_pairings): + # Create agent in current frame: + agents.append(AgentState( + pose=ObjectPose(t=self.curr_time - self.t_start, x=obj_centers[idx][0], y=obj_centers[idx][1], z=obj_centers[idx][2] - xyz_lengths_start[idx][2]/2.0, yaw=0,pitch=0,roll=0,frame=ObjectFrameEnum.CURRENT), + # Beware: AgentState(PhysicalObject) builds bbox from + # dims [-l/2,l/2] x [-w/2,w/2] x [0,h], not + # [-l/2,l/2] x [-w/2,w/2] x [-h/2,h/2] + # (l, w, h) + # TODO: confirm (z -> l, x -> w, y -> h) + dimensions=(xyz_lengths_start[idx][0], xyz_lengths_start[idx][1], xyz_lengths_start[idx][2]), + outline=None, + type=AgentEnum.PEDESTRIAN, + activity=AgentActivityEnum.UNDETERMINED, # Temporary + velocity=None, # Temporary + yaw_rate=0, + )) + + return agents + + + # TODO : Generate Transformation matrix from vehicle to start + # Just keeping this here incase we need it, at this moment this function will not be used. + # def generate_vehicle_start_frame(self) -> np.ndarray: + # ''' + # Creates T matrix for WORLD to START Frame and T matrix for WORLD to VEHICLE Frame + # Return T VEHICLE to START + # ''' + + # # T World to Start + # start_x, start_y, start_z = self.current_vehicle_state.pose.x, self.current_vehicle_state.pose.y, self.current_vehicle_state.pose.z + # start_yaw, start_roll, start_pitch = self.current_vehicle_state.pose.yaw, self.current_vehicle_state.pose.pitch, self.current_vehicle_state.pose.roll + # rotation_world_start = R.from_euler('xyz',[start_roll, start_pitch,start_yaw],degrees=False).as_matrix() + # self.t_start_to_world = np.eye(4) + # self.t_start_to_world[:3,:3] = rotation_world_start + # self.t_start_to_world[:3,3] = [start_x, start_y, start_z] + + + # # Converting vehicle_state from START to CURRENT + # self.current_vehicle_state.to_frame(frame=ObjectFrameEnum.CURRENT, current_pose=self.current_vehicle_state.pose, start_pose_abs=self.start_pose_abs) + + # # T World to Vehicle + # vehicle_x, vehicle_y, vehicle_z = self.current_vehicle_state.pose.x, self.current_vehicle_state.pose.y, self.current_vehicle_state.pose.z + # vehicle_yaw, vehicle_roll, vehicle_pitch = self.current_vehicle_state.pose.yaw, self.current_vehicle_state.pose.pitch, self.current_vehicle_state.pose.roll + # rotation_world_vehicle = R.from_euler('xyz',[vehicle_roll, vehicle_pitch, vehicle_yaw],degrees=False).as_matrix() + # self.t_vehicle_to_world = np.eye(4) + # self.t_vehicle_to_world[:3,:3] = rotation_world_vehicle + # self.t_vehicle_to_world[:3,3] = [vehicle_x, vehicle_y, vehicle_z] + + # return np.linalg.inv(self.t_start_to_world @ np.linalg.inv(self.t_vehicle_to_world)) + + + # TODO: Slower but cleaner to input self.current_agents dict + # TODO: Moving Average across last N iterations pos/vel? Less spurious vals + # TODO Akul: Fix velocity calculation to calculate in ObjectFrameEnum.START + # Work towards own tracking class instead of simple YOLO track? + # Fix 1: division by time + # Fix 2: Put centers and dims in start frame for velocity calc + final agentstate in update_object_states + # ret: Dict[track_id: vel[x, y, z]] + # def find_vels(self, track_ids: List[int], obj_centers: List[np.ndarray], obj_dims: List[np.ndarray]) -> Dict[int, np.ndarray]: + # # Object not seen -> velocity = None + # track_id_center_map = dict(zip(track_ids, obj_centers)) + # vels = defaultdict(lambda: np.array(())) # None is faster, np.array matches other find_ methods. + + # for prev_track_id, prev_agent in self.prev_agents.items(): + # if prev_track_id in track_ids: + # # TODO: Add prev_agents to memory to avoid None velocity + # # We should only be missing prev pose on first sight of track_id Agent. + # # print("shape 1: ", track_id_center_map[prev_agent.track_id]) + # # print("shape 2: ", np.array([prev_agent.pose.x, prev_agent.pose.y, prev_agent.pose.z])) + # # prev can be 3 separate Nones, current is just empty array... make this symmetrical + # if prev_agent.pose.x and prev_agent.pose.y and prev_agent.pose.z and track_id_center_map[prev_agent.track_id].shape == 3: + # vels[prev_track_id] = (track_id_center_map[prev_track_id] - np.array([prev_agent.pose.x, prev_agent.pose.y, prev_agent.pose.z])) / (self.curr_time - self.prev_time) + # return vels + + def ouster_oak_callback(self, cv_image: cv2.Mat, lidar_points: np.ndarray): + self.cv_image = cv_image + self.lidar_points = lidar_points + + # Convert to cv2 image and run detector + # cv_image = self.bridge.imgmsg_to_cv2(rgb_image_msg, "bgr8") + track_result = self.detector.track(source=cv_image, classes=self.classes_to_detect, persist=True, conf=self.confidence) + + # Convert 1D PointCloud2 data to x, y, z coords + # lidar_points = convert_pointcloud2_to_xyz(lidar_pc2_msg) + # print("len lidar_points", len(lidar_points)) + + # Downsample xyz point clouds + downsampled_points = downsample_points(lidar_points) + + # Transform LiDAR points into the camera coordinate frame. + lidar_in_camera = transform_lidar_points(downsampled_points, self.R_lidar_to_oak, self.t_lidar_to_oak) + + # Project the transformed points into the image plane. + projected_pts = project_points(lidar_in_camera, self.K, downsampled_points) + + # Process bboxes + boxes = track_result[0].boxes + + extracted_pts_all = list() + + for ind, bbox in enumerate(boxes): + xywh = bbox.xywh[0].tolist() + + # Extracting projected pts + x, y, w, h = xywh + left_bound = int(x - w / 2) + right_bound = int(x + w / 2) + top_bound = int(y - h / 2) + bottom_bound = int(y + h / 2) + + pts = np.array(projected_pts) + # Checking projected_pts length is very important + if len(projected_pts) > 0: + extracted_pts = pts[(pts[:, 0] > left_bound) & + (pts[:, 0] < right_bound) & + (pts[:, 1] > top_bound) & + (pts[:, 1] < bottom_bound) + ] + + # Apply ground and max distance filter to the extracted 5D points + extracted_pts = filter_ground_points(extracted_pts, self.ground_threshold) + extracted_pts = filter_depth_points(extracted_pts, self.max_human_depth) + extracted_pts_all.append(extracted_pts) + else: extracted_pts_all.append(np.array(())) + + # Generate Transformation matrix for vehicle to start + self.update_object_states(track_result, extracted_pts_all) + if self.debug: self.viz_object_states(cv_image, boxes, extracted_pts_all) + + + def rate(self): + return 4.0 + + def state_inputs(self): + return ['vehicle'] + + def state_outputs(self): + return ['agents'] + + +class FakePedestrianDetector2D(Component): + """Triggers a pedestrian detection at some random time ranges""" + def __init__(self,vehicle_interface : GEMInterface): + self.vehicle_interface = vehicle_interface + self.times = [(5.0,20.0),(30.0,35.0)] + self.t_start = None + + def rate(self): + return 4.0 + + def state_inputs(self): + return ['vehicle'] + + def state_outputs(self): + return ['agents'] + + def update(self, vehicle : VehicleState) -> Dict[str,AgentState]: + if self.t_start is None: + self.t_start = self.vehicle_interface.time() + t = self.vehicle_interface.time() - self.t_start + res = {} + for times in self.times: + if t >= times[0] and t <= times[1]: + res['pedestrian0'] = box_to_fake_agent((0,0,0,0)) + print("Detected a pedestrian") + return res \ No newline at end of file diff --git a/GEMstack/onboard/perception/pedestrian_detection_utils.py b/GEMstack/onboard/perception/pedestrian_detection_utils.py new file mode 100644 index 000000000..788ee2e54 --- /dev/null +++ b/GEMstack/onboard/perception/pedestrian_detection_utils.py @@ -0,0 +1,244 @@ +from sensor_msgs.msg import PointCloud2, PointField +from visualization_msgs.msg import Marker, MarkerArray +import numpy as np +import sensor_msgs.point_cloud2 as pc2 +import open3d as o3d +import cv2 +import json +import rospy +import numpy as np +import struct + + +def convert_pointcloud2_to_xyz(lidar_pc2_msg: PointCloud2): + """ Convert 1D PointCloud2 data to x, y, z coords """ + return np.array(list(pc2.read_points(lidar_pc2_msg, skip_nans=True)), dtype=np.float32)[:, :3] + + +def downsample_points(lidar_points): + """ Downsample point clouds """ + # Convert numpy array to Open3D point cloud + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(lidar_points) + + # Apply voxel grid downsampling + voxel_size = 0.1 # Adjust for desired resolution + downsampled_pcd = pcd.voxel_down_sample(voxel_size=voxel_size) + + # Convert back to numpy array + transformed_points = np.asarray(downsampled_pcd.points) + return transformed_points + + +def filter_ground_points(lidar_points, ground_threshold = 0): + """ Filter points given an elevation of ground threshold """ + filtered_array = lidar_points[lidar_points[:, 4] > ground_threshold] + return filtered_array + + +def filter_depth_points(lidar_points, max_human_depth=0.9): + """ Filter points beyond a max possible human depth in a point cluster """ + if lidar_points.shape[0] == 0: return lidar_points + lidar_points_dist = lidar_points[:, 2] + min_dist = np.min(lidar_points_dist) + max_dist = np.max(lidar_points_dist) + max_possible_dist = min_dist + max_human_depth + actual_dist = min(max_dist, max_possible_dist) + filtered_array = lidar_points[lidar_points_dist < actual_dist] + return filtered_array + + +# Credits: The following lines of codes (from 33 to 92) are adapted from the Calibration Team B +def load_extrinsics(extrinsics_file): + """ + Load calibrated extrinsics from a .npz file. + Assumes the file contains keys 'R' and 't'. + """ + data = np.load(extrinsics_file) + return data + + +def load_intrinsics(intrinsics_file): + """ + Load camera intrinsics from a JSON file. + Expects keys: 'fx', 'fy', 'cx', and 'cy'. + """ + with open(intrinsics_file, 'r') as f: + intrinsics = json.load(f) + fx = intrinsics["fx"] + fy = intrinsics["fy"] + cx = intrinsics["cx"] + cy = intrinsics["cy"] + K = np.array([[fx, 0, cx], + [0, fy, cy], + [0, 0, 1]], dtype=np.float32) + return K + + +def load_lidar_scan(lidar_file): + """ + Load a LiDAR scan from a file. + This function handles both npy (returns a NumPy array) and npz files. + """ + data = np.load(lidar_file, allow_pickle=True) + if isinstance(data, np.ndarray): + return data.astype(np.float32) + else: + key = list(data.keys())[0] + return data[key].astype(np.float32) + + +def transform_lidar_points(lidar_points, R, t): + """ + Transform LiDAR points from the LiDAR frame into the camera frame. + p_cam = R * p_lidar + t. + """ + P_cam = (R @ lidar_points.T + t.reshape(3,1)).T + return P_cam + + +def project_points(points_3d, K, lidar_points): + """ + Project 3D points (in the camera frame) into 2D image coordinates using the camera matrix K. + Only projects points with z > 0. + """ + proj_points = [] + for pt, l_pt in zip(points_3d, lidar_points): + if pt[2] > 0: # only project points in front of the camera + u = K[0, 0] * (pt[0] / pt[2]) + K[0, 2] + v = K[1, 1] * (pt[1] / pt[2]) + K[1, 2] + # 5D data point + proj_points.append((int(u), int(v), l_pt[0], l_pt[1], l_pt[2])) + return np.array(proj_points) + + +def vis_2d_bbox(image, xywh, box): + # Setup + label_text = "Pedestrian " + font = cv2.FONT_HERSHEY_SIMPLEX + font_scale = 0.5 + font_color = (255, 255, 255) + line_type = 1 + text_thickness = 2 + + x, y, w, h = xywh + + if box.id is not None: + id = box.id.item() + else: + id = 0 + + # Draw bounding box + cv2.rectangle(image, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)), (255, 0, 255), 3) + + # Define text label + x = int(x - w / 2) + y = int(y - h / 2) + label = label_text + str(id) + " : " + str(round(box.conf.item(), 2)) + + # Get text size + text_size, baseline = cv2.getTextSize(label, font, font_scale, line_type) + text_w, text_h = text_size + + # Position text above the bounding box + text_x = x + text_y = y - 10 if y - 10 > 10 else y + h + text_h + + # Draw main text on top of the outline + cv2.putText(image, label, (text_x, text_y - baseline), font, font_scale, font_color, text_thickness) + return image + + +def create_point_cloud(points, color=(255, 0, 0)): + """ + Converts a list of (x, y, z) points into a PointCloud2 message. + """ + header = rospy.Header() + header.stamp = rospy.Time.now() + header.frame_id = "os_sensor" # Change to your TF frame + + fields = [ + PointField(name="x", offset=0, datatype=PointField.FLOAT32, count=1), + PointField(name="y", offset=4, datatype=PointField.FLOAT32, count=1), + PointField(name="z", offset=8, datatype=PointField.FLOAT32, count=1), + PointField(name="rgb", offset=12, datatype=PointField.FLOAT32, count=1), + ] + + # Convert RGB color to packed float32 + r, g, b = color + packed_color = struct.unpack('f', struct.pack('I', (r << 16) | (g << 8) | b))[0] + + point_cloud_data = [(x, y, z, packed_color) for x, y, z in points] + + return pc2.create_cloud(header, fields, point_cloud_data) + + +def create_bbox_marker(centroids, dimensions): + """ + Create 3D bbox markers from centroids and dimensions + """ + marker_array = MarkerArray() + + for i, (centroid, dimension) in enumerate(zip(centroids, dimensions)): + # Skip if no centroid or dimension + if (centroid == None) or (dimension == None): + continue + + marker = Marker() + marker.header.frame_id = "os_sensor" # Reference frame + marker.header.stamp = rospy.Time.now() + marker.ns = "bounding_boxes" + marker.id = i # Unique ID for each marker + marker.type = Marker.CUBE # Cube for bounding box + marker.action = Marker.ADD + + # Position (center of the bounding box) + c_x, c_y, c_z = centroid + if (not isinstance(c_x, float)) or (not isinstance(c_y, float)) or (not isinstance(c_z, float)): + continue + + marker.pose.position.x = c_x + marker.pose.position.y = c_y + marker.pose.position.z = c_z + + # Orientation (default, no rotation) + marker.pose.orientation.x = 0.0 + marker.pose.orientation.y = 0.0 + marker.pose.orientation.z = 0.0 + marker.pose.orientation.w = 1.0 + + # Bounding box dimensions + d_x, d_y, d_z = dimension + if (not isinstance(d_x, float)) or (not isinstance(d_y, float)) or (not isinstance(d_z, float)): + continue + + marker.scale.x = d_x + marker.scale.y = d_y + marker.scale.z = d_z + + # Random colors for each bounding box + marker.color.r = 0.0 # Varying colors + marker.color.g = 1.0 + marker.color.b = 1.5 + marker.color.a = 0.2 # Transparency + + marker.lifetime = rospy.Duration() # Persistent + marker_array.markers.append(marker) + return marker_array + + +def delete_bbox_marker(): + """ + Delete 3D bbox markers given ID ranges + """ + marker_array = MarkerArray() + for i in range(6): + marker = Marker() + marker.ns = "bounding_boxes" + marker.id = i + marker.action = Marker.DELETE + marker_array.markers.append(marker) + return marker_array + + + diff --git a/GEMstack/onboard/perception/state_estimation.py b/GEMstack/onboard/perception/state_estimation.py index 4aef25659..edd19f1c6 100644 --- a/GEMstack/onboard/perception/state_estimation.py +++ b/GEMstack/onboard/perception/state_estimation.py @@ -37,7 +37,8 @@ def state_outputs(self) -> List[str]: return ['vehicle'] def healthy(self): - return self.gnss_pose is not None + return True + # return self.gnss_pose is not None def update(self) -> VehicleState: if self.gnss_pose is None: diff --git a/GEMstack/onboard/perception/transform.py b/GEMstack/onboard/perception/transform.py new file mode 100644 index 000000000..8032752b1 --- /dev/null +++ b/GEMstack/onboard/perception/transform.py @@ -0,0 +1,21 @@ +import rospy +import tf +import numpy as np + +def publish_tf(): + rospy.init_node('pointcloud_tf_broadcaster') + br = tf.TransformBroadcaster() + rate = rospy.Rate(10) # 10 Hz + + while not rospy.is_shutdown(): + br.sendTransform( + (0, 0, 0), # (x, y, z) translation + tf.transformations.quaternion_from_euler(0, 0, 0), # (roll, pitch, yaw) + rospy.Time.now(), + "os_sensor", # Child frame (sensor) + "map" # Parent frame (world) + ) + rate.sleep() + +if __name__ == "__main__": + publish_tf() \ No newline at end of file diff --git a/GEMstack/onboard/planning/blink_component.py b/GEMstack/onboard/planning/blink_component.py new file mode 100644 index 000000000..b8fd1c21b --- /dev/null +++ b/GEMstack/onboard/planning/blink_component.py @@ -0,0 +1,85 @@ +from ..component import Component +from ..interface.gem import GEMInterface,GEMVehicleCommand,GEMVehicleReading +from ..interface.gem_hardware import GEMHardwareInterface +from typing import List + +import time + +class BlinkDistress(Component): + """Your control loop code should go here. You will use GEMVehicleCommand + to communicate with drive-by-wire system to control the vehicle's turn signals. + """ + def __init__(self, vehicle_interface : GEMHardwareInterface): + self.vehicle_interface = vehicle_interface + self.command = GEMVehicleCommand(gear=0, + accelerator_pedal_position=0, + brake_pedal_position=0, + steering_wheel_angle=0, + accelerator_pedal_speed=0, + brake_pedal_speed = 0, + steering_wheel_speed = 0) + + self.prev_time = time.time() + self.curr_time = time.time() + + + def rate(self): + """Requested update frequency, in Hz""" + return 0.5 + + def initialize(self): + """Run first""" + pass + + def cleanup(self): + """Run last""" + pass + + def state_inputs(self) -> List[str]: + """Returns the list of AllState inputs this component requires.""" + return ['intent'] + # return [] + + def update(self, *args): + """Run in a loop""" + # we need to set up a GEMVehicleCommand which encapsulates all commands that will be + # sent to the drive-by-wire system, simultaneously. To avoid doing arbitrary things + # to the vehicle, let's maintain the current values (e.g., accelerator, brake pedal, + # steering angle) from its current readings. + # command = self.vehicle_interface.command_from_reading() + command = self.command + # print(command.left_turn_signal, command) + # TODO: alter command to execute turn signals, then uncomment line below to send + # the command to vehicle + # self.vehicle_interface.send_command(command) + self.curr_time = time.time() + + intent = args[0] + + if(intent == 2): + if(self.curr_time - self.prev_time > 2): + if command.left_turn_signal is True: + # print("code goes if") + command.left_turn_signal = False + command.right_turn_signal = True + + elif command.right_turn_signal is True: + # print("code goes elif") + command.left_turn_signal = False + command.right_turn_signal = False + + else: + # print("code goes else") + command.left_turn_signal = True + command.right_turn_signal = False + + self.prev_time = self.curr_time + self.command = command + + self.vehicle_interface.send_command(command) + # print(command.left_turn_signal, command.right_turn_signal) + + def healthy(self): + """Returns True if the element is in a stable state.""" + return True + diff --git a/GEMstack/onboard/planning/driving_logic_component.py b/GEMstack/onboard/planning/driving_logic_component.py new file mode 100644 index 000000000..9793c7246 --- /dev/null +++ b/GEMstack/onboard/planning/driving_logic_component.py @@ -0,0 +1,13 @@ +from ..component import Component +from typing import List + +class Driving_Logic(Component): + def __init__(self): + self.intent = 2 # halting + + def state_outputs(self) -> List[str]: + """Returns the list of AllState outputs this component generates.""" + return ['intent'] + def update(self, *args, **kwargs): + """Update the component.""" + return self.intent \ No newline at end of file diff --git a/GEMstack/onboard/planning/longitudinal_planning.py b/GEMstack/onboard/planning/longitudinal_planning.py new file mode 100644 index 000000000..f740ebaf0 --- /dev/null +++ b/GEMstack/onboard/planning/longitudinal_planning.py @@ -0,0 +1,486 @@ +# File: GEMstack/onboard/planning/longitudinal_planning.py +from typing import List, Tuple +import math +from ..component import Component +from ...state import AllState, VehicleState, EntityRelation, EntityRelationEnum, Path, Trajectory, Route, \ + ObjectFrameEnum +from ...utils import serialization +from ...mathutils import transforms +import numpy as np + +DEBUG = False # Set to False to disable debug output + + +def generate_dense_points(points: List[Tuple[float, float]], density: int = 10) -> List[Tuple[float, float]]: + if not points: + return [] + if len(points) == 1: + return points.copy() + + dense_points = [points[0]] + for i in range(len(points) - 1): + p0 = points[i] + p1 = points[i + 1] + dx = p1[0] - p0[0] + dy = p1[1] - p0[1] + seg_length = math.hypot(dx, dy) + + n_interp = int(round(seg_length * density)) + + for j in range(1, n_interp + 1): + fraction = j / (n_interp + 1) + x_interp = p0[0] + fraction * dx + y_interp = p0[1] + fraction * dy + dense_points.append((x_interp, y_interp)) + + dense_points.append(p1) + + return dense_points + + +def compute_cumulative_distances(points: List[List[float]]) -> List[float]: + s_vals = [0.0] + for i in range(1, len(points)): + dx = points[i][0] - points[i - 1][0] + dy = points[i][1] - points[i - 1][1] + ds = math.hypot(dx, dy) + s_vals.append(s_vals[-1] + ds) + + if DEBUG: + print("[DEBUG] compute_cumulative_distances: s_vals =", s_vals) + + return s_vals + + +def longitudinal_plan(path, acceleration, deceleration, max_speed, current_speed): + path_normalized = path.arc_length_parameterize() + points = list(path_normalized.points) + dense_points = generate_dense_points(points) + s_vals = compute_cumulative_distances(dense_points) + L = s_vals[-1] # Total path length + stopping_distance = (current_speed ** 2) / (2 * deceleration) + + if DEBUG: + print("[DEBUG] compute_cumulative_distances: s_vals =", s_vals) + print("[DEBUG] longitudinal_plan: Total path length L =", L) + print("[DEBUG] longitudinal_plan: Braking distance needed =", stopping_distance) + + if stopping_distance > L: # Case where there is not enough stopping distance to stop before path ends (calls emergency brake) + return longitudinal_brake(path, deceleration, current_speed) + + if current_speed > max_speed: # Case where car is exceeding the max speed so we need to slow down (do initial slowdown) + if DEBUG: + print(f"[DEBUG] Handling case where current_speed ({current_speed:.2f}) > max_speed ({max_speed:.2f})") + + # Initial deceleration phase to reach max_speed + initial_decel_distance = (current_speed ** 2 - max_speed ** 2) / (2 * deceleration) + initial_decel_time = (current_speed - max_speed) / deceleration + remaining_distance = L - initial_decel_distance + + if DEBUG: + print( + f"[DEBUG] Phase 1 - Initial Decel: distance = {initial_decel_distance:.2f}, time = {initial_decel_time:.2f}") + print(f"[DEBUG] Remaining distance after reaching max_speed: {remaining_distance:.2f}") + + # Calculate final deceleration distance needed to stop from max_speed + final_decel_distance = (max_speed ** 2) / (2 * deceleration) + cruise_distance = remaining_distance - final_decel_distance + + if DEBUG: + print(f"[DEBUG] Phase 2 - Cruise: distance = {cruise_distance:.2f}") + print(f"[DEBUG] Phase 3 - Final Decel: distance = {final_decel_distance:.2f}") + + times = [] + for s in s_vals: + if s <= initial_decel_distance: # Phase 1: Initial deceleration to max_speed + v = math.sqrt(current_speed ** 2 - 2 * deceleration * s) + t = (current_speed - v) / deceleration + if DEBUG: # Print every 10m + print(f"[DEBUG] Initial Decel: s = {s:.2f}, v = {v:.2f}, t = {t:.2f}") + + elif s <= initial_decel_distance + cruise_distance: # Phase 2: Cruise at max_speed + s_in_cruise = s - initial_decel_distance + t = initial_decel_time + s_in_cruise / max_speed + if DEBUG: # Print every 10m + print(f"[DEBUG] Cruise: s = {s:.2f}, v = {max_speed:.2f}, t = {t:.2f}") + + else: # Phase 3: Final deceleration to stop + s_in_final_decel = s - (initial_decel_distance + cruise_distance) + v = math.sqrt(max(max_speed ** 2 - 2 * deceleration * s_in_final_decel, 0.0)) + t = initial_decel_time + cruise_distance / max_speed + (max_speed - v) / deceleration + if DEBUG: # Print every 10m + print(f"[DEBUG] Final Decel: s = {s:.2f}, v = {v:.2f}, t = {t:.2f}") + + times.append(t) + + if DEBUG: + print("[DEBUG] Trajectory complete: Three phases executed") + print(f"[DEBUG] Total time: {times[-1]:.2f}") + + return Trajectory(frame=path.frame, points=dense_points, times=times) + + if acceleration <= 0: + if DEBUG: + print(f"[DEBUG] No acceleration allowed. Current speed: {current_speed:.2f}") + + # Pure deceleration phase + s_decel = (current_speed ** 2) / (2 * deceleration) + T_decel = current_speed / deceleration + + if DEBUG: + print(f"[DEBUG] Will maintain speed until s_decel: {s_decel:.2f}") + print(f"[DEBUG] Total deceleration time will be: {T_decel:.2f}") + + times = [] + for s in s_vals: + if s <= L - s_decel: # Maintain current speed until deceleration point + t_point = s / current_speed + if DEBUG: + print(f"[DEBUG] Constant Speed Phase: s = {s:.2f}, v = {current_speed:.2f}, t = {t_point:.2f}") + else: # Deceleration phase + s_in_decel = s - (L - s_decel) + v = math.sqrt(max(current_speed ** 2 - 2 * deceleration * s_in_decel, 0.0)) + t_point = (L - s_decel) / current_speed + (current_speed - v) / deceleration + if DEBUG: + print(f"[DEBUG] Deceleration Phase: s = {s:.2f}, v = {v:.2f}, t = {t_point:.2f}") + + times.append(t_point) + + return Trajectory(frame=path.frame, points=dense_points, times=times) + + # Determine max possible peak speed given distance + v_peak_possible = math.sqrt( + (2 * acceleration * deceleration * L + deceleration * current_speed ** 2) / (acceleration + deceleration)) + v_target = min(max_speed, v_peak_possible) + + if DEBUG: + print("[DEBUG] longitudinal_plan: v_peak_possible =", v_peak_possible, "v_target =", v_target) + + # Compute acceleration phase + s_accel = max(0.0, (v_target ** 2 - current_speed ** 2) / (2 * acceleration)) + t_accel = max(0.0, (v_target - current_speed) / acceleration) + + # Compute deceleration phase + s_decel = max(0.0, (v_target ** 2) / (2 * deceleration)) + t_decel = max(0.0, v_target / deceleration) + + # Compute cruise phase + s_cruise = max(0.0, L - s_accel - s_decel) + t_cruise = s_cruise / v_target if v_target > 0 else 0.0 + + if DEBUG: + print("[DEBUG] longitudinal_plan: s_accel =", s_accel, "t_accel =", t_accel) + print("[DEBUG] longitudinal_plan: s_decel =", s_decel, "t_decel =", t_decel) + print("[DEBUG] longitudinal_plan: s_cruise =", s_cruise, "t_cruise =", t_cruise) + + times = [] + for s in s_vals: + if s <= s_accel: # Acceleration phase + v = math.sqrt(current_speed ** 2 + 2 * acceleration * s) + t_point = (v - current_speed) / acceleration + + if DEBUG: + print(f"[DEBUG] Acceleration Phase: s = {s:.2f}, v = {v:.2f}, t = {t_point:.2f}") + + elif s <= s_accel + s_cruise: # Cruise phase + t_point = t_accel + (s - s_accel) / v_target + + if DEBUG: + print(f"[DEBUG] Cruise Phase: s = {s:.2f}, t = {t_point:.2f}") + + else: # Deceleration phase + s_decel_phase = s - s_accel - s_cruise + v_decel = math.sqrt(max(v_target ** 2 - 2 * deceleration * s_decel_phase, 0.0)) + t_point = t_accel + t_cruise + (v_target - v_decel) / deceleration + + if t_point < times[-1]: # Ensure time always increases + t_point = times[-1] + 0.01 # Small time correction step + + if DEBUG: + print(f"[DEBUG] Deceleration Phase: s = {s:.2f}, v = {v_decel:.2f}, t = {t_point:.2f}") + + times.append(t_point) + + if DEBUG: + print("[DEBUG] longitudinal_plan: Final times =", times) + + return Trajectory(frame=path.frame, points=dense_points, times=times) + + +def longitudinal_brake(path: Path, deceleration: float, current_speed: float, + emergency_decel: float = 8.0) -> Trajectory: + # Vehicle already stopped - maintain position + if current_speed <= 0: + print("[DEBUG] longitudinal_brake: Zero velocity case! ", [path.points[0]] * len(path.points)) + return Trajectory( + frame=path.frame, + points=[path.points[0]] * len(path.points), + times=[float(i) for i in range(len(path.points))] + ) + + # Get total path length + path_length = sum( + np.linalg.norm(np.array(path.points[i + 1]) - np.array(path.points[i])) + for i in range(len(path.points) - 1) + ) + + # Calculate stopping distance with normal deceleration + T_stop_normal = current_speed / deceleration + s_stop_normal = current_speed * T_stop_normal - 0.5 * deceleration * (T_stop_normal ** 2) + + # Check if emergency braking is needed + if s_stop_normal > path_length: + if DEBUG: + print("[DEBUG] longitudinal_brake: Emergency braking needed!") + print(f"[DEBUG] longitudinal_brake: Normal stopping distance: {s_stop_normal:.2f}m") + print(f"[DEBUG] longitudinal_brake: Available distance: {path_length:.2f}m") + + # Calculate emergency braking parameters + T_stop = current_speed / emergency_decel + s_stop = current_speed * T_stop - 0.5 * emergency_decel * (T_stop ** 2) + + if DEBUG: + print(f"[DEBUG] longitudinal_brake: Emergency stopping distance: {s_stop:.2f}m") + print(f"[DEBUG] longitudinal_brake: Emergency stopping time: {T_stop:.2f}s") + + decel_to_use = emergency_decel + + else: + if DEBUG: + print("[DEBUG] longitudinal_brake: Normal braking sufficient") + T_stop = T_stop_normal + decel_to_use = deceleration + + # Generate time points (use more points for smoother trajectory) + num_points = max(len(path.points), 50) + times = np.linspace(0, T_stop, num_points) + + # Calculate distances at each time point using physics equation + distances = current_speed * times - 0.5 * decel_to_use * (times ** 2) + + # Generate points along the path + points = [] + for d in distances: + if d <= path_length: + points.append(path.eval(d)) + else: + points.append(path.eval(d)) + + if DEBUG: + print(f"[DEBUG] longitudinal_brake: Using deceleration of {decel_to_use:.2f} m/s²") + print(f"[DEBUG] longitudinal_brake: Final stopping time: {T_stop:.2f}s") + + return Trajectory(frame=path.frame, points=points, times=times.tolist()) + + times = [] + for s in s_vals: + if s <= s_accel: # Acceleration phase + v = math.sqrt(current_speed ** 2 + 2 * acceleration * s) + t_point = (v - current_speed) / acceleration + + if DEBUG: + print(f"[DEBUG] Acceleration Phase: s = {s:.2f}, v = {v:.2f}, t = {t_point:.2f}") + + elif s <= s_accel + s_cruise: # Cruise phase + t_point = t_accel + (s - s_accel) / v_target + + if DEBUG: + print(f"[DEBUG] Cruise Phase: s = {s:.2f}, t = {t_point:.2f}") + + else: # Deceleration phase + s_decel_phase = s - s_accel - s_cruise + v_decel = math.sqrt(max(v_target ** 2 - 2 * deceleration * s_decel_phase, 0.0)) + t_point = t_accel + t_cruise + (v_target - v_decel) / deceleration + + if t_point < times[-1]: # Ensure time always increases + t_point = times[-1] + 0.01 # Small time correction step + + if DEBUG: + print(f"[DEBUG] Deceleration Phase: s = {s:.2f}, v = {v_decel:.2f}, t = {t_point:.2f}") + + times.append(t_point) + + if DEBUG: + print("[DEBUG] longitudinal_plan: Final times =", times) + + return Trajectory(frame=path.frame, points=dense_points, times=times) + +def longitudinal_brake(path: Path, deceleration: float, current_speed: float, emergency_decel: float = 8.0) -> Trajectory: + # Vehicle already stopped - maintain position + if current_speed <= 0: + print("[DEBUG] longitudinal_brake: Zero velocity case! ", [path.points[0]] * len(path.points)) + return Trajectory( + frame=path.frame, + points=[path.points[0]] * len(path.points), + times=[float(i) for i in range(len(path.points))] + ) + + # Get total path length + path_length = sum( + np.linalg.norm(np.array(path.points[i+1]) - np.array(path.points[i])) + for i in range(len(path.points)-1) + ) + + # Calculate stopping distance with normal deceleration + T_stop_normal = current_speed / deceleration + s_stop_normal = current_speed * T_stop_normal - 0.5 * deceleration * (T_stop_normal ** 2) + + # Check if emergency braking is needed + if s_stop_normal > path_length: + if DEBUG: + print("[DEBUG] longitudinal_brake: Emergency braking needed!") + print(f"[DEBUG] longitudinal_brake: Normal stopping distance: {s_stop_normal:.2f}m") + print(f"[DEBUG] longitudinal_brake: Available distance: {path_length:.2f}m") + + # Calculate emergency braking parameters + T_stop = current_speed / emergency_decel + s_stop = current_speed * T_stop - 0.5 * emergency_decel * (T_stop ** 2) + + if DEBUG: + print(f"[DEBUG] longitudinal_brake: Emergency stopping distance: {s_stop:.2f}m") + print(f"[DEBUG] longitudinal_brake: Emergency stopping time: {T_stop:.2f}s") + + decel_to_use = emergency_decel + + else: + if DEBUG: + print("[DEBUG] longitudinal_brake: Normal braking sufficient") + T_stop = T_stop_normal + decel_to_use = deceleration + + # Generate time points (use more points for smoother trajectory) + num_points = max(len(path.points), 50) + times = np.linspace(0, T_stop, num_points) + + # Calculate distances at each time point using physics equation + distances = current_speed * times - 0.5 * decel_to_use * (times ** 2) + + # Generate points along the path + points = [] + for d in distances: + if d <= path_length: + points.append(path.eval(d)) + else: + points.append(path.eval(d)) + + if DEBUG: + print(f"[DEBUG] longitudinal_brake: Using deceleration of {decel_to_use:.2f} m/s²") + print(f"[DEBUG] longitudinal_brake: Final stopping time: {T_stop:.2f}s") + + return Trajectory(frame=path.frame, points=points, times=times.tolist()) + +class YieldTrajectoryPlanner(Component): + """Follows the given route. Brakes if the ego–vehicle must yield + (e.g. to a pedestrian) or if the end of the route is near; otherwise, + it accelerates (or cruises) toward a desired speed. + """ + + def __init__(self): + self.route_progress = None + self.t_last = None + self.acceleration = 5 + self.desired_speed = 2.0 + self.deceleration = 2.0 + self.emergency_brake = 8.0 + + def state_inputs(self): + return ['all'] + + def state_outputs(self) -> List[str]: + return ['trajectory'] + + def rate(self): + return 10.0 + + def update(self, state: AllState): + vehicle = state.vehicle # type: VehicleState + route = state.route # type: Route + t = state.t + + if DEBUG: + print("[DEBUG] YieldTrajectoryPlanner.update: t =", t) + + if self.t_last is None: + self.t_last = t + dt = t - self.t_last + if DEBUG: + print("[DEBUG] YieldTrajectoryPlanner.update: dt =", dt) + + curr_x = vehicle.pose.x + curr_y = vehicle.pose.y + curr_v = vehicle.v + if DEBUG: + print(f"[DEBUG] YieldTrajectoryPlanner.update: Vehicle position = ({curr_x}, {curr_y}), speed = {curr_v}, ") + + # Determine progress along the route. + if self.route_progress is None: + self.route_progress = 0.0 + _, closest_parameter = route.closest_point_local( + [curr_x, curr_y], + (self.route_progress - 5.0, self.route_progress + 5.0) + ) + if DEBUG: + print("[DEBUG] YieldTrajectoryPlanner.update: Closest parameter on route =", closest_parameter) + self.route_progress = closest_parameter + + # Extract a 10 m segment of the route for planning lookahead. + route_with_lookahead = route.trim(closest_parameter, closest_parameter + 10.0) + if DEBUG: + print("[DEBUG] YieldTrajectoryPlanner.update: Route Lookahead =", route_with_lookahead) + + print("[DEBUG] state", state.relations) + # Check whether any yield relations (e.g. due to pedestrians) require braking. + stay_braking = False + pointSet = set() + for i in range(len(route_with_lookahead.points)): + if tuple(route_with_lookahead.points[i]) in pointSet: + stay_braking = True + break + pointSet.add(tuple(route_with_lookahead.points[i])) + + should_brake = any( + r.type == EntityRelationEnum.STOPPING_AT and r.obj1 == '' + for r in state.relations + ) + should_decelerate = any( + r.type == EntityRelationEnum.YIELDING and r.obj1 == '' + for r in state.relations + ) if should_brake == False else False + + should_accelerate = (not should_brake and not should_decelerate and curr_v < self.desired_speed) + + if DEBUG: + print("[DEBUG] YieldTrajectoryPlanner.update: stay_braking =", stay_braking) + print("[DEBUG] YieldTrajectoryPlanner.update: should_brake =", should_brake) + print("[DEBUG] YieldTrajectoryPlanner.update: should_accelerate =", should_accelerate) + print("[DEBUG] YieldTrajectoryPlanner.update: should_decelerate =", should_decelerate) + + if stay_braking: + traj = longitudinal_brake(route_with_lookahead, 0.0, 0.0, 0.0) + if DEBUG: + print("[DEBUG] YieldTrajectoryPlanner.update: Using longitudinal_brake (stay braking).") + elif should_brake: + traj = longitudinal_brake(route_with_lookahead, self.emergency_brake, curr_v) + if DEBUG: + print("[DEBUG] YieldTrajectoryPlanner.update: Using longitudinal_brake.") + elif should_decelerate: + traj = longitudinal_brake(route_with_lookahead, self.deceleration, curr_v) + if DEBUG: + print("[DEBUG] YieldTrajectoryPlanner.update: Using longitudinal_brake.") + elif should_accelerate: + traj = longitudinal_plan(route_with_lookahead, self.acceleration, + self.deceleration, self.desired_speed, curr_v) + if DEBUG: + print("[DEBUG] YieldTrajectoryPlanner.update: Using longitudinal_plan (accelerate).") + else: + # Maintain current speed if not accelerating or braking. + traj = longitudinal_plan(route_with_lookahead, 0.0, self.deceleration, self.desired_speed, curr_v) + if DEBUG: + print( + "[DEBUG] YieldTrajectoryPlanner.update: Maintaining current speed with longitudinal_plan (0 accel).") + + self.t_last = t + if DEBUG: + print('[DEBUG] Current Velocity of the Car: LOOK!', curr_v, self.desired_speed) + print("[DEBUG] YieldTrajectoryPlanner.update: Returning trajectory with", len(traj.points), "points.") + return traj diff --git a/GEMstack/onboard/planning/parking_component.py b/GEMstack/onboard/planning/parking_component.py new file mode 100644 index 000000000..96200432b --- /dev/null +++ b/GEMstack/onboard/planning/parking_component.py @@ -0,0 +1,36 @@ +from typing import List +from ..component import Component +from ...utils import serialization +from ...state import Route,ObjectFrameEnum, AllState, PlannerEnum, MissionPlan +import os +import numpy as np +import time + +class ParkingSim(Component): + def __init__(self): + self.start_time = time.time() + pass + + def state_inputs(self): + return ["all"] + + def rate(self): + return 10.0 + + def state_outputs(self)-> List[str]: + return ["mission_plan"] + + def update(self, state: AllState): + # Calculate elapsed time since initialization. + elapsed_time = time.time() - self.start_time + + # After 4 seconds, change the mission plan to use PARKING. + if elapsed_time >= 4.0: + print("Entering parking mode") + mission_plan = MissionPlan(1, 6, 0, PlannerEnum.PARKING) + else: + print("Entering RRT mode") + mission_plan = MissionPlan(1, 6, 0, PlannerEnum.RRT_STAR) + + print("ParkingSim update with state:",mission_plan) + return mission_plan \ No newline at end of file diff --git a/GEMstack/onboard/planning/pedestrian_yield_logic.py b/GEMstack/onboard/planning/pedestrian_yield_logic.py new file mode 100644 index 000000000..647c863ef --- /dev/null +++ b/GEMstack/onboard/planning/pedestrian_yield_logic.py @@ -0,0 +1,457 @@ +from ...state import AgentState, AgentEnum, EntityRelation, EntityRelationEnum, ObjectFrameEnum, VehicleState +from ..component import Component +from typing import List, Dict + +import numpy as np + + +DEBUG = True # Set to False to disable debug output + +""" Vehicle Configuration """ +GEM_MODEL = 'e4' # e2 or e4 +buffer_x, buffer_y = 3, 1 +if GEM_MODEL == 'e2': + # e2 axle dimensions, refer to GEMstack/knowledge/vehicle/model/gem_e2.urdf + size_x, size_y = 2.53, 1.35 # measured from base_link.STL + lx_f2r = 0.88 + 0.87 # distance between front axle and rear axle, from gem_e2.urdf wheel_links + ly_axle = 0.6 * 2 # length of axle + l_rear_axle_to_front = 1.28 + 0.87 # measured from base_link.STL + l_rear_axle_to_rear = size_x - l_rear_axle_to_front +elif GEM_MODEL == 'e4': + # e4 axle dimensions, refer to GEMstack/knowledge/vehicle/model/gem_e4.urdf + size_x, size_y = 3.35, 1.35 # measured from base_link.STL + lx_f2r = 1.2746 + 1.2904 # distance between front axle and rear axle, from gem_e4.urdf wheel_links + ly_axle = 0.6545 * 2 # length of front and rear axles, from gem_e4.urdf wheel_links + l_rear_axle_to_front = 1.68 + 1.29 # measured from base_link.STL + l_rear_axle_to_rear = size_x - l_rear_axle_to_front +# add buffer to outer dimensions, defined by half of the x, y buffer area dimensions +buffer_front = l_rear_axle_to_front + buffer_x +buffer_rear = -(l_rear_axle_to_rear + buffer_x) +buffer_left = size_y / 2 + buffer_y +buffer_right = -(size_y / 2 + buffer_y) +# comfortable deceleration for lookahead time calculation +comfort_decel = 2.0 + + +class PedestrianYielder(Component): + """Yields for all pedestrians in the scene. + + Result is stored in the relations graph. + """ + + def rate(self): + return None + + def state_inputs(self): + return ['agents', 'vehicle'] + + def state_outputs(self): + return ['relations'] + + def update(self, agents: Dict[str, AgentState], vehicle: VehicleState) -> List[EntityRelation]: + if DEBUG: + print("PedestrianYielder vehicle pose:", vehicle.pose, vehicle.v) + res = [] + for n, a in agents.items(): + if DEBUG: + print(f"[DEBUG] PedestrianYielder.update: Agent:", a.pose, a.velocity) + + """ collision estimation based on agent states in vehicle frame """ + if a.type == AgentEnum.PEDESTRIAN: + check, t_min, min_dist, pt_min = check_collision_in_vehicle_frame(a, vehicle) + if DEBUG: + print( + f"[DEBUG] ID {n}, relation:{check}, minimum distance:{min_dist}, time to min_dist: {t_min}, point of min_dist:{pt_min}") + # collision may occur, slow down + if check == 'YIELD': + res.append(EntityRelation(type=EntityRelationEnum.YIELDING, obj1='', obj2=n)) + # collision in a short time, emergency stop + elif check == 'STOP': + res.append(EntityRelation(type=EntityRelationEnum.STOPPING_AT, obj1='', obj2=n)) + + return res + + +""" Planning in vehicle frame without waypoints """ +def check_collision_in_vehicle_frame(agent: AgentState, vehicle: VehicleState): + xp, yp = agent.pose.x, agent.pose.y + vx, vy = agent.velocity[:2] + xv = vehicle.pose.x + yv = vehicle.pose.y + yaw = vehicle.pose.yaw + vel = vehicle.v + # time to stop from current velocity with comfortable deceleration + t_look = vel / comfort_decel + # calculate relative pedestrian position and velocity in vehicle frame + if agent.pose.frame == ObjectFrameEnum.CURRENT: + # xp, yp, vx, vy are already in vehicle frame + pass + elif agent.pose.frame == ObjectFrameEnum.START: + # convert xp, yp, vx, vy to vehicle frame + R = np.array([[np.cos(yaw), np.sin(yaw)], [-np.sin(yaw), np.cos(yaw)]], dtype=np.float32) + dx, dy = xp - xv, yp - yv + dvx, dvy = vx - vel * np.cos(yaw), vy - vel * np.sin(yaw) + # pedestrian pose and velocity in vehicle frame + xp, yp = np.dot(R, np.array([dx, dy])) + vx, vy = np.dot(R, np.array([dvx, dvy])) + + # If pedestrian already in buffer area + if buffer_rear <= xp <= buffer_front and buffer_right <= yp <= buffer_left: + return 'STOP', 0, 0, (xp, yp) + t_min, min_dist, pt_min = find_min_distance_and_time(xp, yp, vx, vy) + # if the minimum distance between the position and the buffer area is less than 0, than a collision is expected + if min_dist is not None: + if min_dist <= 0: + if t_min <= 0: + check = 'STOP' + elif t_min <= t_look: + check = 'YIELD' + else: + check = 'RUN' + else: + check = 'RUN' + else: + check = 'RUN' + return check, t_min, min_dist, pt_min + + +def find_min_distance_and_time(xp, yp, vx, vy): + # path function: Ax + By + C = vy * x - vx * y + (yp * vx - xp * vy) = 0 + vx = vx if vx != 0 else 1e-6 + vy = vy if vy != 0 else 1e-6 + A = vy + B = -vx + C = yp * vx - xp * vy + + def point_to_line(x0, y0, A, B, C): + # calculate the shortest distance from a point (x0, y0) to the line Ax + By + C = 0 """ + numerator = abs(A * x0 + B * y0 + C) + denominator = np.sqrt(A ** 2 + B ** 2) + x_foot = x0 - (A * (A * x0 + B * y0 + C)) / denominator + y_foot = y0 - (B * (A * x0 + B * y0 + C)) / denominator + dist = numerator / denominator if denominator != 0 else np.inf + return dist, (x_foot, y_foot) + + def point_dist(x1, y1, x2, y2): + return np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2) + + """ Compute intersections at the front, left and right edge """ + if xp >= buffer_front: + # front edge intersection: x = x_buff = xt = xp + vx * t_f + t_f = (buffer_front - xp) / vx + yt = yp + vy * t_f + if t_f < 0: # object moving away with higher speed than vehicle, start point has minimum distance + t_min = 0 + pt_min = xp, yp + if buffer_right <= yp <= buffer_left: + min_dist = xp - buffer_front # the distance to the front edge + elif yt > buffer_left: + min_dist = point_dist(xp, yp, buffer_front, buffer_left) # the distance to front left corner + else: + min_dist = point_dist(xp, yp, buffer_front, buffer_right) # the distance to front right corner + else: + if buffer_right <= yt <= buffer_left: # intersect at front edge, is collision + t_min = t_f + min_dist = 0 + pt_min = buffer_front, yt + elif yt > buffer_left: # intersect at front left + if yp <= yt: + min_dist, pt_min = point_to_line(buffer_front, buffer_left, A, B, C) + t_min = (pt_min[0] - xp) / vx + else: # left edge interaction: y = y_buff = yt = yp + vy * t_l + t_l = (buffer_left - yp) / vy + xt = xp + vx * t_l + if buffer_rear <= xt <= buffer_front: # intersect at left edge, is collision + t_min = t_l + min_dist = 0 + pt_min = xt, buffer_left + else: + min_dist, pt_min = point_to_line(buffer_rear, buffer_left, A, B, C) + t_min = (pt_min[0] - xp) / vx + else: # intersect at front right + if yp >= yt: + min_dist, pt_min = point_to_line(buffer_front, buffer_right, A, B, C) + t_min = (pt_min[0] - xp) / vx + else: # right edge interaction: y = y_buff = yt = yp + vy * t_l + t_r = (buffer_right - yp) / vy + xt = xp + vx * t_r + if buffer_rear <= xt <= buffer_front: # intersect at right edge, is collision + t_min = t_r + min_dist = 0 + pt_min = xt, buffer_right + else: + min_dist, pt_min = point_to_line(buffer_rear, buffer_right, A, B, C) + t_min = (pt_min[0] - xp) / vx + else: + if yp >= buffer_left: + # left edge interaction: y = y_buff = yt = yp + vy * t_l + t_l = (buffer_left - yp) / vy + xt = xp + vx * t_l + if t_l < 0: # object moving away, start point has minimum distance + t_min = 0 + pt_min = xp, yp + if buffer_rear <= xp <= buffer_front: + min_dist = yp - buffer_left # the distance to the left edge + else: + min_dist = point_dist(xp, yp, buffer_rear, buffer_left) # the distance to rear right corner + else: + if buffer_rear <= xt <= buffer_front: # intersect at left edge, is collision + t_min = t_l + min_dist = 0 + pt_min = xt, buffer_left + elif xt > buffer_front: + min_dist, pt_min = point_to_line(buffer_front, buffer_left, A, B, C) + t_min = (pt_min[0] - xp) / vx + else: + min_dist, pt_min = point_to_line(buffer_rear, buffer_left, A, B, C) + t_min = (pt_min[0] - xp) / vx + elif yp <= buffer_right: + # right edge interaction: y = -y_buff = yt = yp + vy * t_l + t_r = (buffer_right - yp) / vy + xt = xp + vx * t_r + if t_r < 0: # object moving away, start point has minimum distance + t_min = 0 + pt_min = xp, yp + if buffer_rear<= xp <= buffer_front: + min_dist = -yp - buffer_right # the distance to the right edge + else: + min_dist = point_dist(xp, yp, buffer_rear, buffer_right) # the distance to rear right corner + else: + if buffer_rear <= xt <= buffer_front: # intersect at left edge, is collision + t_min = t_r + min_dist = 0 + pt_min = xt, buffer_right + elif xt > buffer_front: + min_dist, pt_min = point_to_line(buffer_front, buffer_right, A, B, C) + t_min = (pt_min[0] - xp) / vx + else: + min_dist, pt_min = point_to_line(buffer_rear, buffer_right, A, B, C) + t_min = (pt_min[0] - xp) / vx + elif xp >= buffer_rear: + t_min = 0 + min_dist = -1 + pt_min = xp, yp + else: # rear position, should not be seen by the front camera + t_min = None + min_dist = None + pt_min = None + + return t_min, min_dist, pt_min + + +# def find_min_distance_and_time_by_optimizer(xp, yp, vx, vy, buffer, time_scale): +# from scipy.optimize import minimize_scalar +# def pos_t(t): +# xt, yt = xp + vx * t, yp + vy * t +# return shortest_distance_to_buffer_in_vehicle_frame((xt, yt), buffer) +# +# res = minimize_scalar(pos_t, bounds=(0, time_scale)) +# if res.success: +# t_min = res.x +# min_dist = res.fun +# return t_min, min_dist +# else: +# return None, None + + +""" Planning in start frame with waypoints """ +# # TODO: to get further unreached waypoints only +# def get_waypoint_arc_and_new_yaw(curr_x, curr_y, curr_yaw, waypoint): +# """ +# Input: +# curr_x, curr_y, curr_yaw: vehicle current pose +# waypoint: next waypoint +# Output: +# len_arc, radius, (x_c, y_c): length, radius and the center of the arc-shaped path +# next_yaw: vehicle yaw at the waypoint. +# """ +# # TODO: check if it works when the angles cross zero or delta greater than pi +# wp_x, wp_y = waypoint +# dist_curr2wp = np.sqrt((wp_x - curr_x) ** 2 + (wp_y - curr_y) ** 2) +# alpha = np.arctan2(wp_y - curr_y, wp_x - curr_x) - curr_yaw +# delta = 2 * alpha +# radius = dist_curr2wp / 2 / np.sin(alpha) +# len_arc = delta * radius +# x_c, y_c = curr_x - radius * np.sin(curr_yaw), curr_y + radius * np.cos(curr_yaw) +# next_yaw = curr_yaw + delta +# return len_arc, radius, (x_c, y_c), next_yaw +# +# +# def check_collision_with_waypoints(pose, velocity, yaw_rate, radius, center, expand, +# start_point, end_point, time_vehicle_in, time_vehicle_out): +# """ +# Given an arc as a path and a point with velocity, check if the point is crossing the path. +# Input: +# pose, velocity, yaw_rate: pedestrian pose, velocity and angular velocity +# radius, center, start_point, end_point: parameters of the path as an arc +# expand: enlarge the arc as the vehicle moving area +# start_point, end_point: start point and end point of the path +# time_vehicle_in, time_vehicle_out: the time that the vehicle is in the section of the path +# Output: +# is_crossing: boolean, if the pedestrian is crossing the path +# arc_dist_collision: distance from start_point to the closest pedestrian enter or exit point mapped to the path +# """ +# # TODO: consider pedestrian path with yaw_rate +# xp, yp = pose.x, pose.y +# vx, vy = velocity +# xc, yc = center +# x1, y1 = start_point +# x2, y2 = end_point +# theta1 = np.arctan2(y1 - yc, x1 - xc) +# theta2 = np.arctan2(y2 - yc, x2 - xc) +# r_inner = max(radius - expand, 0) +# r_outer = radius + expand +# +# def is_angle_between(pt, start_angle, end_angle): +# xp, yp = pt +# angle = np.arctan2(yp - yc, xp - xc) +# if start_angle < end_angle: +# return start_angle <= angle <= end_angle +# else: +# return end_angle <= angle <= start_angle +# +# def is_in_ring(pt, xc, yc, r_inner, r_outer): +# xp, yp = pt +# dist = np.sqrt((xp - xc) ** 2 + (yp - yc) ** 2) +# return r_inner <= dist <= r_outer +# +# def find_arc_intersection(xc, yc, xp, yp, vx, vy, r): +# # solve equations: (xt - xc)^2 + (yt - yc)^2 = (R+/-b)^2 +# A = vx ** 2 + vy ** 2 +# B = 2 * (vx * (xp - xc) + vy * (yp - yc)) +# C = (xp - xc) ** 2 + (yp - yc) ** 2 - r ** 2 +# root = B ** 2 - 4 * A * C +# t_list = [] # time +# pt_list = [] # intersection point +# if root < 0: +# return t_list, pt_list # no intersection +# else: +# t1 = (-B - np.sqrt(root)) / (2 * A) +# t2 = (-B + np.sqrt(root)) / (2 * A) +# # t should be larger than 0 +# if t1 > 0: +# pt1 = (xp + vx * t1, yp + vy * t1) +# if is_angle_between(pt1, theta1, theta2): +# t_list.append(t1) +# pt_list.append(pt1) +# if t2 > 0: +# pt2 = (xp + vx * t2, yp + vy * t2) +# if is_angle_between(pt2, theta1, theta2): +# t_list.append(t2) +# pt_list.append(pt2) +# return t_list, pt_list +# +# def find_edge_intersection(xc, yc, xp, yp, vx, vy, theta, r_inner, r_outer): +# x_inner, y_inner = xc + r_inner * np.cos(theta), yc + r_inner * np.sin(theta) +# x_outer, y_outer = xc + r_outer * np.cos(theta), yc + r_outer * np.sin(theta) +# t_list = [] # time +# pt_list = [] # intersection point +# +# dx = x_outer - x_inner +# dy = y_outer - y_inner +# # solve t from: xt = xp + vx * t, yt = xp + vy * t and dx * (yt - y_inner) = dy * (xt - x_inner) +# if dx * vy - dy * vx == 0: +# return t_list, pt_list # parallel, no intersection +# else: +# t = (dy * xp - dx * yp + dx * y_inner - dy * x_inner) / (dx * vy - dy * vx) +# pt = xp + vx * t, yp + vy * t +# if is_in_ring(pt, xc, yc, r_inner, r_outer): +# t_list.append(t) +# pt_list.append(pt) +# return t_list, pt_list +# +# # find the time and points a pedestrian in and out of the path +# t_list = [] +# # Case pedestrian in the path at the beginning +# if is_in_ring((xp, yp), xc, yc, r_inner, r_outer) and is_angle_between((xp, yp), theta1, theta2): +# t_list.append(0) +# # Case pedestrian cross the arcs and the start and end edges of the path section +# t_inner, _ = find_arc_intersection(xc, yc, xp, yp, vx, vy, r_inner) if r_inner > 0 else [], [] +# t_outer, _ = find_arc_intersection(xc, yc, xp, yp, vx, vy, r_outer) +# t_theta1, _ = find_edge_intersection(xc, yc, xp, yp, vx, vy, theta1, r_inner, r_outer) +# t_theta2, _ = find_edge_intersection(xc, yc, xp, yp, vx, vy, theta2, r_inner, r_outer) +# # Combine all points together, The elements in both lists correspond one by one in order +# t_list = t_list + t_inner + t_outer + t_theta1 + t_theta2 +# +# def arc_length_from_start_point(pt, center, radius, start_angle): +# """ +# Calculate the arc length between the point mapping to the arc and the start point as the distance along the path +# Assume angle difference of the arc is not greater than 180 degrees +# """ +# x, y = pt +# xc, yc = center +# angle = np.arctan2(y - yc, x - xc) +# delta_angle = abs(angle - start_angle) +# if delta_angle > np.pi: +# delta_angle = 2 * np.pi - delta_angle +# return radius * delta_angle +# +# is_collision = False +# arc_dist_collision = None +# # collision if there is intersection and the time is between time_vehicle_in and time_vehicle_out +# if min(t_list) <= time_vehicle_out or max(t_list) >= time_vehicle_in: +# is_collision = True +# t_min = max(time_vehicle_in, min(t_list)) +# t_max = min(time_vehicle_out, max(t_list)) +# # map the point to the arc of path for calculating distance from the vehicle follow the path +# pt_min = xp + vx * t_min, yp + vy * t_min +# pt_max = xp + vx * t_max, yp + vy * t_max +# arc_time_min = arc_length_from_start_point(pt_min, center, radius, theta1) +# arc_time_max = arc_length_from_start_point(pt_max, center, radius, theta2) +# arc_dist_collision = min(arc_time_min, arc_time_max) +# +# return is_collision, arc_dist_collision +# +# +# def yield_in_start_frame(path_further, state, agents: Dict[str,AgentState]): +# """ +# All calculations are in start frame, origin reference: rear_axle_center (refer to GEMstack/knowledge/calibration) +# """ +# # current states +# vehicle = state.vehicle +# curr_x = vehicle.pose.x +# curr_y = vehicle.pose.y +# curr_yaw = vehicle.pose.yaw +# curr_v = vehicle.v +# +# # determine lookahead distance using current velocity and a decent deceleration +# t_brake = curr_v / comfort_decel # time to brake down to zero +# dist_lookahead = curr_v * t_brake +# +# distance = 0 +# temp_x, temp_y, temp_yaw = curr_x, curr_y, curr_yaw +# is_collision = False +# dist_collision = None +# for waypoint in path_further: +# # the path to next waypoint and vehicle yaw at next waypoint +# len_arc, radius, center, next_yaw = get_waypoint_arc_and_new_yaw(temp_x, temp_y, temp_yaw, waypoint) +# +# # the time of vehicle go in and out of the section to next waypoint in current velocity +# time_vehicle_in = (distance - l_rear_axle_to_front) / curr_v # consider distance from the center of rear axle to the front +# time_vehicle_out = (distance + len_arc + l_rear_axle_to_rear) / curr_v # consider distance from the center of rear axle to the rear +# +# # check all the pedestrian, get their time and position if they are going to cross the path +# arc_dist_collision_list = [] +# for n, ped in agents.items(): +# if ped.type == AgentEnum.PEDESTRIAN: +# # check collision: pedestrian is in the section of the path between time_vehicle_in and time_vehicle_out +# is_collision, arc_dist_collision = check_collision_with_waypoints(ped.pose, ped.velocity,ped.yaw_rate, +# radius, center, buffer_y, +# (temp_x, temp_y), waypoint, +# time_vehicle_in, time_vehicle_out) +# if is_collision: +# arc_dist_collision_list.append(arc_dist_collision) +# +# if is_collision: +# # use the minimum collision distance to yield +# dist_collision = distance + min(arc_dist_collision_list) +# break +# # update total distance by add the arc length of the section to the next waypoint, update pose at next waypoint +# distance += len_arc +# temp_x, temp_y = waypoint +# temp_yaw = next_yaw +# # end the loop if total distance is larger than lookahead distance +# if distance > dist_lookahead: +# break +# +# return is_collision, dist_collision diff --git a/GEMstack/onboard/planning/pure_pursuit.py b/GEMstack/onboard/planning/pure_pursuit.py index e4b6f4f4e..846316bce 100644 --- a/GEMstack/onboard/planning/pure_pursuit.py +++ b/GEMstack/onboard/planning/pure_pursuit.py @@ -105,9 +105,13 @@ def compute(self, state : VehicleState, component : Component = None): else: desired_x,desired_y = self.path.eval(des_parameter) desired_yaw = np.arctan2(desired_y-curr_y,desired_x-curr_x) - #print("Desired point",(desired_x,desired_y)," with lookahead distance",self.look_ahead + self.look_ahead_scale * speed) - #print("Current yaw",curr_yaw,"desired yaw",desired_yaw) - + + print("Desired point",(desired_x,desired_y)," with lookahead distance",self.look_ahead + self.look_ahead_scale * speed) + print("Current x",curr_x,"Current y",curr_y) + print("Current yaw",curr_yaw,"Desired yaw",desired_yaw) + print("Parameter : ", self.current_path_parameter) + print("Last index : ", self.path.domain()[1]) + # distance between the desired point and the vehicle L = transforms.vector2_dist((desired_x,desired_y),(curr_x,curr_y)) @@ -142,6 +146,7 @@ def compute(self, state : VehicleState, component : Component = None): #past the end, just stop desired_speed = 0.0 feedforward_accel = -2.0 + f_delta = 0 else: if self.desired_speed_source=='path': current_trajectory_time = self.trajectory.parameter_to_time(self.current_path_parameter) @@ -165,6 +170,17 @@ def compute(self, state : VehicleState, component : Component = None): else: #decay speed when crosstrack error is high desired_speed *= np.exp(-abs(ct_error)*0.4) + + if len(self.trajectory.points) < 2 or self.current_path_parameter >= self.path.domain()[1]: + if component is not None: + component.debug_event('Past the end of trajectory') + #past the end, just stop + desired_speed = 0.0 + feedforward_accel = -2.0 + f_delta = 0 + + + if desired_speed > self.speed_limit: desired_speed = self.speed_limit output_accel = self.pid_speed.advance(e = desired_speed - speed, t = t, feedforward_term=feedforward_accel) diff --git a/GEMstack/onboard/planning/route_planning_component.py b/GEMstack/onboard/planning/route_planning_component.py new file mode 100644 index 000000000..01887fec4 --- /dev/null +++ b/GEMstack/onboard/planning/route_planning_component.py @@ -0,0 +1,48 @@ +import os +from typing import List + +import numpy as np +from GEMstack.onboard.component import Component +from GEMstack.state.all import AllState +from GEMstack.state.physical_object import ObjectFrameEnum +from GEMstack.state.route import PlannerEnum, Route + + + +class RoutePlanningComponent(Component): + """Reads a route from disk and returns it as the desired route.""" + def __init__(self): + print("Route Planning Component init") + + def state_inputs(self): + return ["all"] + + def state_outputs(self) -> List[str]: + return ['route'] + + def rate(self): + return 10.0 + + def update(self, state: AllState): + print("Route Planner's mission:", state.mission_plan) + print("Vehicle x:", state.vehicle.pose.x) + print("Vehicle y:", state.vehicle.pose.y) + print("Vehicle yaw:", state.vehicle.pose.yaw) + if state.mission_plan.planner_type == PlannerEnum.PARKING: + print("Parking mode") + elif state.mission_plan.planner_type == PlannerEnum.RRT_STAR: + print("RRT mode") + else: + print("Unknown mode") + + ## Return a route after doing some processing based on mission plan REMOVE ONCE OTHER PLANNERS ARE IMPLEMENTED + base_path = os.path.dirname(__file__) + file_path = os.path.join(base_path, "../../knowledge/routes/forward_15m.csv") + + waypoints = np.loadtxt(file_path, delimiter=',', dtype=float) + if waypoints.shape[1] == 3: + waypoints = waypoints[:,:2] + print("waypoints", waypoints) + route = Route(frame=ObjectFrameEnum.START,points=waypoints.tolist()) + print("route", route) + return route \ No newline at end of file diff --git a/GEMstack/onboard/planning/stanley.py b/GEMstack/onboard/planning/stanley.py new file mode 100644 index 000000000..60782712a --- /dev/null +++ b/GEMstack/onboard/planning/stanley.py @@ -0,0 +1,243 @@ +from ...mathutils.control import PID +from ...utils import settings +from ...mathutils import transforms +from ...knowledge.vehicle.dynamics import acceleration_to_pedal_positions +from ...state.vehicle import VehicleState,ObjectFrameEnum +from ...state.trajectory import Path,Trajectory,compute_headings +from ...knowledge.vehicle.geometry import front2steer +from ..interface.gem import GEMVehicleCommand +from ..component import Component +import numpy as np + +class PurePursuit(object): + """Implements a pure pursuit controller on a second-order Dubins vehicle.""" + def __init__(self, lookahead = None, lookahead_scale = None, crosstrack_gain = None, desired_speed = None): + self.look_ahead = lookahead if lookahead is not None else settings.get('control.pure_pursuit.lookahead',4.0) + self.look_ahead_scale = lookahead_scale if lookahead_scale is not None else settings.get('control.pure_pursuit.lookahead_scale',3.0) + self.crosstrack_gain = crosstrack_gain if crosstrack_gain is not None else settings.get('control.pure_pursuit.crosstrack_gain',0.41) + self.front_wheel_angle_scale = 3.0 + self.wheelbase = settings.get('vehicle.geometry.wheelbase') + self.wheel_angle_range = [settings.get('vehicle.geometry.min_wheel_angle'),settings.get('vehicle.geometry.max_wheel_angle')] + self.steering_angle_range = [settings.get('vehicle.geometry.min_steering_angle'),settings.get('vehicle.geometry.max_steering_angle')] + + if desired_speed is not None: + self.desired_speed_source = desired_speed + else: + self.desired_speed_source = settings.get('control.pure_pursuit.desired_speed',"path") + self.desired_speed = self.desired_speed_source if isinstance(self.desired_speed_source,(int,float)) else None + self.speed_limit = settings.get('vehicle.limits.max_speed') + self.max_accel = settings.get('vehicle.limits.max_acceleration') # m/s^2 + self.max_decel = settings.get('vehicle.limits.max_deceleration') # m/s^2 + self.pid_speed = PID(settings.get('control.longitudinal_control.pid_p',0.5), settings.get('control.longitudinal_control.pid_d',0.0), settings.get('control.longitudinal_control.pid_i',0.1), windup_limit=20) + + self.path_arg = None + self.path = None # type: Trajectory + #if trajectory = None, then only an untimed path was provided and we can't use the path velocity as the reference + self.trajectory = None # type: Trajectory + + self.current_path_parameter = 0.0 + self.current_traj_parameter = 0.0 + self.t_last = None + + def set_path(self, path : Path): + if path == self.path_arg: + return + self.path_arg = path + if len(path.points[0]) > 2: + path = path.get_dims([0,1]) + if not isinstance(path,Trajectory): + self.path = path.arc_length_parameterize() + self.trajectory = None + self.current_traj_parameter = 0.0 + if self.desired_speed_source in ['path','trajectory']: + raise ValueError("Can't provide an untimed path to PurePursuit and expect it to use the path velocity. Set control.pure_pursuit.desired_speed to a constant.") + else: + self.path = path.arc_length_parameterize() + self.trajectory = path + self.current_traj_parameter = self.trajectory.domain()[0] + self.current_path_parameter = 0.0 + + def compute(self, state : VehicleState, component : Component = None): + assert state.pose.frame != ObjectFrameEnum.CURRENT + t = state.pose.t + + if self.t_last is None: + self.t_last = t + dt = t - self.t_last + + curr_x = state.pose.x + curr_y = state.pose.y + curr_yaw = state.pose.yaw if state.pose.yaw is not None else 0.0 + speed = state.v + + if self.path is None: + #just stop + accel = self.pid_speed.advance(0.0, t) + # TODO + raise RuntimeError("Behavior without path not implemented") + + if self.path.frame != state.pose.frame: + print("Transforming path from",self.path.frame.name,"to",state.pose.frame.name) + self.path = self.path.to_frame(state.pose.frame, current_pose=state.pose) + if self.trajectory is not None: + if self.trajectory.frame != state.pose.frame: + print("Transforming trajectory from",self.trajectory.frame.name,"to",state.pose.frame.name) + self.trajectory = self.trajectory.to_frame(state.pose.frame, current_pose=state.pose) + + closest_dist,closest_parameter = self.path.closest_point_local((curr_x,curr_y),[self.current_path_parameter-5.0,self.current_path_parameter+5.0]) + self.current_path_parameter = closest_parameter + self.current_traj_parameter += dt + #TODO: calculate parameter that is look_ahead distance away from the closest point? + #(rather than just advancing the parameter) + + #Stanley + des_parameter = closest_parameter + self.look_ahead + + #Pure pursuit + #des_parameter = closest_parameter + self.look_ahead + self.look_ahead_scale * speed + + print("Closest parameter: " + str(closest_parameter),"distance to path",closest_dist) + if closest_dist > 0.1: + print("Closest point",self.path.eval(closest_parameter),"vs",(curr_x,curr_y)) + if des_parameter >= self.path.domain()[1]: + #we're at the end of the path, calculate desired point by extrapolating from the end of the path + end_pt = self.path.points[-1] + if len(self.path.points) > 1: + end_dir = self.path.eval_tangent(self.path.domain()[1]) + else: + #path is just a single point, just look at current direction + end_dir = (np.cos(curr_yaw),np.sin(curr_yaw)) + desired_x,desired_y = transforms.vector_madd(end_pt,end_dir,(des_parameter-self.path.domain()[1])) + else: + desired_x,desired_y = self.path.eval(des_parameter) + desired_yaw = np.arctan2(desired_y-curr_y,desired_x-curr_x) + #print("Desired point",(desired_x,desired_y)," with lookahead distance",self.look_ahead + self.look_ahead_scale * speed) + #print("Current yaw",curr_yaw,"desired yaw",desired_yaw) + + + # distance between the desired point and the vehicle + L = transforms.vector2_dist((desired_x,desired_y),(curr_x,curr_y)) + + # find the curvature and the angle + alpha = transforms.normalize_angle(desired_yaw - curr_yaw) + if alpha > np.pi: + alpha -= np.pi*2 + + ## STANLEY + closest_x,closest_y = self.path.eval(closest_parameter) + + + e_numerator = (desired_x-closest_x) * (closest_y-curr_y) - (closest_x-curr_x) * (desired_y-closest_y) + + e_denominator = np.sqrt((desired_x-closest_x)**2 + (desired_y-closest_y)**2) + + e = e_numerator / e_denominator + + k = self.crosstrack_gain + + crosstrack_steering = np.arctan2(k * e, speed) + heading_error = alpha # change to (desired_to_closest_yaw - curr_yaw) + + angle_i = alpha + crosstrack_steering + angle = angle_i*self.front_wheel_angle_scale + + ## Pure pursuit + # ----------------- tuning this part as needed ----------------- + + # k = self.crosstrack_gain + # angle_i = np.arctan((k * 2 * self.wheelbase * np.sin(alpha)) / L) + # angle = angle_i*self.front_wheel_angle_scale + # ----------------- tuning this part as needed ----------------- + + f_delta = np.clip(angle, self.wheel_angle_range[0], self.wheel_angle_range[1]) + + #print("Closest point distance: " + str(L)) + print("Forward velocity: " + str(speed)) + ct_error = np.sin(alpha) * L + print("Crosstrack Error: " + str(round(ct_error,3))) + print("Front wheel angle: " + str(round(np.degrees(f_delta),2)) + " degrees") + steering_angle = np.clip(front2steer(f_delta), self.steering_angle_range[0], self.steering_angle_range[1]) + print("Steering wheel angle: " + str(round(np.degrees(steering_angle),2)) + " degrees" ) + + desired_speed = self.desired_speed + feedforward_accel = 0.0 + if self.desired_speed_source in ['path','trajectory']: + #determine desired speed from trajectory + if len(self.trajectory.points) < 2 or self.current_path_parameter >= self.path.domain()[1]: + if component is not None: + component.debug_event('Past the end of trajectory') + #past the end, just stop + desired_speed = 0.0 + feedforward_accel = -2.0 + else: + if self.desired_speed_source=='path': + current_trajectory_time = self.trajectory.parameter_to_time(self.current_path_parameter) + else: + current_trajectory_time = self.current_traj_parameter + deriv = self.trajectory.eval_derivative(current_trajectory_time) + desired_speed = min(np.linalg.norm(deriv),self.speed_limit) + difference_dt = 0.1 + if current_trajectory_time >= self.trajectory.domain()[1]: + prev_deriv = self.trajectory.eval_derivative(current_trajectory_time - difference_dt) + prev_desired_speed = min(np.linalg.norm(prev_deriv),self.speed_limit) + feedforward_accel = (desired_speed - prev_desired_speed)/difference_dt + print("Desired speed",desired_speed,"m/s",", from prior",prev_desired_speed,"m/s") + else: + next_deriv = self.trajectory.eval_derivative(current_trajectory_time + difference_dt) + next_desired_speed = min(np.linalg.norm(next_deriv),self.speed_limit) + feedforward_accel = (next_desired_speed - desired_speed)/difference_dt + print("Desired speed",desired_speed,"m/s",", trying to reach desired",next_desired_speed,"m/s") + feedforward_accel= np.clip(feedforward_accel, -self.max_decel, self.max_accel) + print("Feedforward accel: " + str(feedforward_accel) + " m/s^2") + else: + #decay speed when crosstrack error is high + desired_speed *= np.exp(-abs(ct_error)*0.4) + if desired_speed > self.speed_limit: + desired_speed = self.speed_limit + output_accel = self.pid_speed.advance(e = desired_speed - speed, t = t, feedforward_term=feedforward_accel) + if component is not None: + component.debug('curr pt',(curr_x,curr_y)) + component.debug('curr param',self.current_path_parameter) + component.debug('desired pt',(desired_x,desired_y)) + component.debug('desired yaw (rad)',desired_yaw) + component.debug('crosstrack error',ct_error) + component.debug('front wheel angle (rad)',f_delta) + component.debug('desired speed (m/s)',desired_speed) + component.debug('feedforward accel (m/s^2)',feedforward_accel) + component.debug('output accel (m/s^2)',output_accel) + print("Output accel: " + str(output_accel) + " m/s^2") + + if output_accel > self.max_accel: + output_accel = self.max_accel + + if output_accel < -self.max_decel: + output_accel = -self.max_decel + + self.t_last = t + return (output_accel, f_delta) + + +class PurePursuitTrajectoryTracker(Component): + def __init__(self,vehicle_interface=None, **args): + self.pure_pursuit = PurePursuit(**args) + self.vehicle_interface = vehicle_interface + + def rate(self): + return 50.0 + + def state_inputs(self): + return ['vehicle','trajectory'] + + def state_outputs(self): + return [] + + def update(self, vehicle : VehicleState, trajectory: Trajectory): + self.pure_pursuit.set_path(trajectory) + accel,wheel_angle = self.pure_pursuit.compute(vehicle, self) + #print("Desired wheel angle",wheel_angle) + steering_angle = np.clip(front2steer(wheel_angle), self.pure_pursuit.steering_angle_range[0], self.pure_pursuit.steering_angle_range[1]) + #print("Desired steering angle",steering_angle) + self.vehicle_interface.send_command(self.vehicle_interface.simple_command(accel,steering_angle, vehicle)) + + def healthy(self): + return self.pure_pursuit.path is not None \ No newline at end of file diff --git a/GEMstack/onboard/planning/summon_component.py b/GEMstack/onboard/planning/summon_component.py new file mode 100644 index 000000000..8e3e9c082 --- /dev/null +++ b/GEMstack/onboard/planning/summon_component.py @@ -0,0 +1,37 @@ +from typing import List +from ..component import Component +from ...utils import serialization +from ...state import Route,ObjectFrameEnum, AllState, PlannerEnum +import os +import numpy as np + +class SummonSim(Component): + """Simulates a summoning team component that receives an AllState object and outputs a route object.""" + def __init__(self): + self.counter = 0 + + def state_inputs(self): + return ["all"] + + def state_outputs(self) -> List[str]: + return ["route"] + + def rate(self): + return 10.0 + + def update(self, state: AllState): + print("SummonSim update with state:", state) + base_path = os.path.dirname(__file__) + file_path = os.path.join(base_path, "../../knowledge/routes/forward_15m.csv") + + waypoints = np.loadtxt(file_path, delimiter=',', dtype=float) + if waypoints.shape[1] == 3: + waypoints = waypoints[:,:2] + print("waypoints", waypoints) + route = Route(frame=ObjectFrameEnum.START,points=waypoints.tolist()) + print("route", route) + return route + + + + \ No newline at end of file diff --git a/GEMstack/onboard/visualization/mpl_visualization.py b/GEMstack/onboard/visualization/mpl_visualization.py index ef9a24851..08392e9ed 100644 --- a/GEMstack/onboard/visualization/mpl_visualization.py +++ b/GEMstack/onboard/visualization/mpl_visualization.py @@ -5,6 +5,8 @@ import matplotlib.animation as animation import time from collections import deque +from ...state.agent import AgentEnum +import numpy as np class MPLVisualization(Component): """Runs a matplotlib visualization at 10Hz. @@ -21,8 +23,12 @@ def __init__(self, rate : float = 10.0, save_as : str = None): self.axs = None self.tstart = 0 self.plot_t_range = 10 - self.plot_values = {} - self.plot_events = {} + + # Separate vehicle and pedestrian tracking + self.vehicle_plot_values = {} + self.pedestrian_plot_values = {} + self.vehicle_plot_events = {} + self.pedestrian_plot_events = {} def rate(self) -> float: return self._rate @@ -41,7 +47,7 @@ def initialize(self): self.writer.setup(plt.gcf(), self.save_as, dpi=100) plt.ion() # to run GUI event loop - self.fig,self.axs = plt.subplots(1,2,figsize=(12,6)) + self.fig,self.axs = plt.subplots(1,3,figsize=(18,6)) self.fig.canvas.mpl_connect('close_event', self.on_close) plt.show(block=False) self.tstart = time.time() @@ -53,30 +59,48 @@ def on_close(self,event): def debug(self, source, item, value): t = time.time() - self.tstart item = source+'.'+item - if item not in self.plot_values: - self.plot_values[item] = deque() - plot = self.plot_values[item] - self.plot_values[item].append((t,value)) + # Determine which plot dict to use based on source + if source.startswith('ped_'): + target_dict = self.pedestrian_plot_values + else: + target_dict = self.vehicle_plot_values + + if item not in target_dict: + target_dict[item] = deque() + plot = target_dict[item] + plot.append((t,value)) while t - plot[0][0] > self.plot_t_range: plot.popleft() def debug_event(self, source, event): t = time.time() - self.tstart event = source+'.'+event - if event not in self.plot_events: - self.plot_events[event] = deque() - plot = self.plot_events[event] + target_dict = self.pedestrian_plot_events if source.startswith('ped_') else self.vehicle_plot_events + + if event not in target_dict: + target_dict[event] = deque() + plot = target_dict[event] plot.append(t) while t - plot[0] > self.plot_t_range: plot.popleft() def update(self, state): if not plt.fignum_exists(self.fig.number): - #plot closed return self.num_updates += 1 - self.debug("vehicle","velocity",state.vehicle.v) - self.debug("vehicle","front wheel angle",state.vehicle.front_wheel_angle) + + # Vehicle metrics + self.debug("vehicle", "velocity", state.vehicle.v) + self.debug("vehicle", "front_wheel_angle", state.vehicle.front_wheel_angle) + + # Pedestrian metrics + for agent_id, agent in state.agents.items(): + if agent.type == AgentEnum.PEDESTRIAN: + # self.debug(f"ped_{agent_id}", "x", agent.pose.x) + # self.debug(f"ped_{agent_id}", "y", agent.pose.y) + self.debug(f"ped_{agent_id}", "velocity", np.linalg.norm(agent.velocity)) # Magnitude of resultant velocity + self.debug(f"ped_{agent_id}", "yaw_rate", agent.yaw_rate) + time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(state.t)) #frame=ObjectFrameEnum.CURRENT #state = state.to_frame(frame) @@ -84,18 +108,27 @@ def update(self, state): yrange = [state.vehicle.pose.y - 10, state.vehicle.pose.y + 10] #plot main visualization mpl_visualization.plot(state,title="Scene %d at %s"%(self.num_updates,time_str),xrange=xrange,yrange=yrange,show=False,ax=self.axs[0]) - #plot figure on axs[1] + + # Vehicle plot (axs[1]) self.axs[1].clear() - for k,v in self.plot_values.items(): + for k,v in self.vehicle_plot_values.items(): t = [x[0] for x in v] y = [x[1] for x in v] self.axs[1].plot(t,y,label=k) - for i,(k,v) in enumerate(self.plot_events.items()): - for t in v: - self.axs[1].axvline(x=t,linestyle='--',color='C'+str(i),label=k) + self.axs[1].set_title('Vehicle Metrics') self.axs[1].set_xlabel('Time (s)') self.axs[1].legend() + # Pedestrian plot (axs[2]) + self.axs[2].clear() + for k,v in self.pedestrian_plot_values.items(): + t = [x[0] for x in v] + y = [x[1] for x in v] + self.axs[2].plot(t,y,label=k) + self.axs[2].set_title('Pedestrian Metrics') + self.axs[2].set_xlabel('Time (s)') + self.axs[2].legend() + self.fig.canvas.draw_idle() self.fig.canvas.flush_events() diff --git a/GEMstack/state/__init__.py b/GEMstack/state/__init__.py index 8ddc0c5b0..e031def93 100644 --- a/GEMstack/state/__init__.py +++ b/GEMstack/state/__init__.py @@ -15,7 +15,8 @@ 'VehicleIntent','VehicleIntentEnum', 'AgentIntent', 'EntityRelationEnum','EntityRelation','EntityRelationGraph', - 'MissionEnum','MissionObjective', + 'MissionEnum','MissionObjective', 'MissionPlan', + 'PlannerEnum', 'Route', 'PredicateValues', 'AllState'] @@ -32,6 +33,7 @@ from .agent_intent import AgentIntent from .relations import EntityRelation, EntityRelationEnum, EntityRelationGraph from .mission import MissionEnum,MissionObjective -from .route import Route +from .route import Route, PlannerEnum +from .mission_plan import MissionPlan from .predicates import PredicateValues from .all import AllState diff --git a/GEMstack/state/all.py b/GEMstack/state/all.py index fc976fe24..eb5586687 100644 --- a/GEMstack/state/all.py +++ b/GEMstack/state/all.py @@ -7,6 +7,7 @@ from .agent_intent import AgentIntent,AgentIntentMixture from .relations import EntityRelation from .mission import MissionObjective +from .mission_plan import MissionPlan from .route import Route from .trajectory import Trajectory from .predicates import PredicateValues @@ -26,7 +27,9 @@ class AllState(SceneState): # planner-output state mission : MissionObjective = field(default_factory=MissionObjective) + mission_plan: MissionPlan = None intent : VehicleIntent = field(default_factory=VehicleIntent) + # planner_type : Optional[PlannerEnum] = None route : Optional[Route] = None trajectory : Optional[Trajectory] = None diff --git a/GEMstack/state/mission_plan.py b/GEMstack/state/mission_plan.py new file mode 100644 index 000000000..cd869095b --- /dev/null +++ b/GEMstack/state/mission_plan.py @@ -0,0 +1,15 @@ +from dataclasses import dataclass +from ..utils.serialization import register +from .route import PlannerEnum + +@dataclass +@register +class MissionPlan: + goal_x: float + goal_y: float + goal_orientation: float + planner_type : PlannerEnum = PlannerEnum.RRT_STAR + # other mission-specific parameters can be added here + + + \ No newline at end of file diff --git a/GEMstack/state/physical_object.py b/GEMstack/state/physical_object.py index 5147c1ee8..4b8f0ddf7 100644 --- a/GEMstack/state/physical_object.py +++ b/GEMstack/state/physical_object.py @@ -77,6 +77,7 @@ def apply(self,point): If point is 2D, then the pitch and roll are ignored. """ + # print("apply: ", type(point), point) assert len(point) in [2,3],"Must provide a 2D or 3D point" oz = self.z if self.z is not None else 0.0 if self.frame == ObjectFrameEnum.GLOBAL: diff --git a/GEMstack/state/route.py b/GEMstack/state/route.py index ce373d7a4..ceee253e4 100644 --- a/GEMstack/state/route.py +++ b/GEMstack/state/route.py @@ -4,6 +4,16 @@ from .trajectory import Path from typing import List,Tuple,Optional +from enum import Enum + +@dataclass +@register +class PlannerEnum(Enum): + RRT_STAR = 0 #position / yaw in m / radians relative to starting pose of vehicle + HYBRID_A_STAR = 1 #position / yaw in m / radians relative to current pose of vehicle + PARKING = 2 #position in longitude / latitude, yaw=heading in radians with respect to true north (used in GNSS) + + @dataclass @register class Route(Path): diff --git a/GEMstack/state/vehicle.py b/GEMstack/state/vehicle.py index 364046a71..7dd552193 100644 --- a/GEMstack/state/vehicle.py +++ b/GEMstack/state/vehicle.py @@ -29,7 +29,7 @@ class VehicleState: steering_wheel_angle : float #angle of the steering wheel, in radians front_wheel_angle : float #angle of the front wheels, in radians. Related to steering_wheel_angle by a fixed transform heading_rate : float #the rate at which the vehicle is turning, in radians/s. Related to v and front_wheel_angle by a fixed transform - gear : VehicleGearEnum #the current gear + gear : int #the current gear left_turn_indicator : bool = False #whether left turn indicator is on right_turn_indicator : bool = False #whether right turn indicator is on headlights_on : bool = False #whether headlights are on diff --git a/GEMstack/utils/klampt_visualization.py b/GEMstack/utils/klampt_visualization.py index 0e9219e76..741e968e8 100644 --- a/GEMstack/utils/klampt_visualization.py +++ b/GEMstack/utils/klampt_visualization.py @@ -6,6 +6,10 @@ from . import settings from ..state import ObjectFrameEnum,ObjectPose,PhysicalObject,VehicleState,VehicleGearEnum,Path,Obstacle,AgentState,AgentEnum,Roadgraph,RoadgraphLane,RoadgraphLaneEnum,RoadgraphCurve,RoadgraphCurveEnum,RoadgraphRegion,RoadgraphRegionEnum,RoadgraphSurfaceEnum,Trajectory,Route,SceneState,AllState +#KH: there is a bug on some system where the visualization crashes with an OpenGL error when drawing curves +#this is a workaround. We really should find the source of the bug! Change to 30 if still not working +MAX_POINTS_IN_CURVE = 50 + OBJECT_COLORS = { AgentEnum.CAR : (1,1,0,1), AgentEnum.PEDESTRIAN : (0,1,0,1), @@ -198,7 +202,10 @@ def plot_vehicle(vehicle : VehicleState, vehicle_model=None, axis_len=1.0): vehicle_model.link('rear_left_stop_light_link').appearance().setColor(0.3,0,0,1) def plot_path(name : str, path : Path, color=(0,0,0), width=1): - vis.add(name,[list(p) for p in path.points],color=color,width=width) + if len(path.points) > MAX_POINTS_IN_CURVE: # downsample due to OpenGL error? + vis.add(name,[list(p) for p in path.points[::len(path.points)//MAX_POINTS_IN_CURVE]],color=color,width=width) + else: + vis.add(name,[list(p) for p in path.points],color=color,width=width) def plot_curve(name : str, curve : RoadgraphCurve, color=None, width=None): style = CURVE_TO_STYLE.get(curve.type,CURVE_TO_STYLE[None]) @@ -211,7 +218,10 @@ def plot_curve(name : str, curve : RoadgraphCurve, color=None, width=None): if width is not None: style['width'] = width for i,seg in enumerate(curve.segments): - vis.add(name+"_%d" % i,seg,**style) + if len(seg) > MAX_POINTS_IN_CURVE: # downsample due to OpenGL error? + vis.add(name+"_%d" % i,seg[::len(seg)//MAX_POINTS_IN_CURVE],**style) + else: + vis.add(name+"_%d" % i,seg,**style) def plot_lane(name : str, lane : RoadgraphLane, on_route=False): if lane.surface != RoadgraphSurfaceEnum.PAVEMENT: @@ -284,6 +294,6 @@ def plot_scene(scene : SceneState, ground_truth_vehicle=None, vehicle_model = No def plot(state : AllState, ground_truth_vehicle = None, vehicle_model=None, title=None, show=True): plot_scene(state, ground_truth_vehicle=ground_truth_vehicle, vehicle_model=vehicle_model, title=title, show=show) if state.route is not None: - plot_path("route",state.route,color=(1,0.5,0,1)) + plot_path("route",state.route,color=(1,0.5,0,1),width=2) if state.trajectory is not None: - plot_path("trajectory",state.trajectory,color=(1,0,0,1),width=2) + plot_path("trajectory",state.trajectory,color=(1,0,0,1),width=3) diff --git a/GEMstack/utils/logging.py b/GEMstack/utils/logging.py index 0cda2e55e..62ecf84e6 100644 --- a/GEMstack/utils/logging.py +++ b/GEMstack/utils/logging.py @@ -1,5 +1,4 @@ from .serialization import deserialize,serialize,deserialize_collection,serialize_collection -import json from typing import Union,Tuple class Logfile: @@ -146,7 +145,7 @@ def read(self, else: raise ValueError("Need to provide a time to advance to") msgs = [] - while self.next_item_time < next_t: + while self.next_item_time <= next_t: self.last_read_time = self.next_item_time msgs.append(self.next_item) try: diff --git a/HOMEWORK.md b/HOMEWORK.md new file mode 100644 index 000000000..144c57282 --- /dev/null +++ b/HOMEWORK.md @@ -0,0 +1,194 @@ +# CS 588 Phase 1 Homework + +Due date: 2/24 + +**Overall objective**: work together in large teams to create a nontrivial integrated behavior on the vehicle. The target behavior will be to drive along a predefined track, and slowing down for pedestrians that would otherwise be hit if the vehicle continues at normal speed. The vehicle should stop when necessary. + +**Skills**: Familiarization with GEMstack repo, Git branches and pull requests, lab and equipment logistics, inter-team communication. + +**On-board development**: If you are working on the GEM vehicle, your work will go into the `cs588_groupX/` folder. We recommend that before you start, you +a) Ensure that your GEMstack repository is on the correct branch. cd into `cs588_groupX/GEMstack` and run `git status`. It should show that you are on the `s2025_groupX` branch. +b) Get the latest updates to the course stack. To do so, run `git pull` and `git merge s2025`. + +When you are done, commit your development of this component and launch files to your group's branch. **DO NOT** set the global git authentication on the vehicle to your account. The easiest way to do this is: +a) Log onto Github under your account name. [Create a Github Personal Access Token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-personal-access-token-classic). To reduce the chance of mischief, choose the "Only select repositories" option and set the access token just to push to this repository. Open the "Repository permissions" and change the "Contents" access to "Read and write" +b) When your token is generated, copy the token and keep it somewhere safe. +c) Commit your changes to your branch. +d) Run "git push". In "Username" put your Github username, and in "Password" use the token that you copied in step b). This should now be reflected in your branch. + +When you are creating a pull request to the `s2025_teamX` branch of GEMstack, you can do this through the Github webpage. You can also use the hub command line tool: https://hub.github.com/. Ensure that this shows up in the `s2025_teamX` branch's Pull Requests tab. + +**Quality-of-life tips** +- By default, the log folder is shown after each run. If this is annoying, change the launch file to read `after: show_log_folder: False`. +- Each component can generate CSV files that collect fast streaming data for later analysis. Use `self.debug(item,value)` or `self.debug_event(label)` in your component's initialize(), update(), or cleanup() functions. These will be stored in LOG_FOLDER/COMPONENT_debug.csv. +- Find all magic constants in your code. Move them into the `GEMstack/knowledge/defaults/current.yaml` file under appropriate sub-keys. + + +**To submit**: ensure your group's work is merged into the `s2025_teamA` or `s2025_teamB` branches (as appropriate) with appropriately logged PRs. We will review your team's branch history. Your group will submit some evidence of its component working, and if successful your team will submit an integrated video of the behavior working. These may be posted to Slack or Clickup. + + +## Group Leads + +- Decide on a good day of the week and time for group meetings. +- Discuss and decide on Github pull request procedures for your team's branches. +- Discuss and decide on project management and communication features for your team (e.g., Slack / Clickup). +- Lead your group and team with clear expectations to prepare work in time for integration. Don't leave this to the last minute! Suggest establishing an internal deadline of 2/17 for all technical pieces. +- Keep track of team members' efforts, especially if their contributions are not apparent through branch commit history. At the end of this phase you will be reporting on your members' contributions, so if they are not listed in PRs and you have little to say about their work, they will receive low individual scores for this assignment. Note that they will be reporting on your leadership as well, so remember to treat your group fairly. +- Gather sufficient evidence (e.g., videos of partial system tests or simulation tests, or a report with text and figures) that your group has succeeded in its objectives. +- Decide which group on your team will test the integrated behavior. Have them report results, and capture a video of successful runs. + + +## Control & Dynamics Group +- Starter code is provided in the `homework/` folder in your group's branch. +- All members must complete safety lookout training; at least two must complete safety driver training. + +Part 1: + +1. Flash a distress signal to the vehicle via ROS. Modify the `blink.py` program to print messages from some of the "/pacmod/parsed_tx/X" topics (at least 2). For each topic, you will need to create a ROS subscriber. Read the documentation on https://github.com/astuff/pacmod2 to figure out which message types to use and extract printable data from these messages. + + Now, create a publisher for the "/pacmod/as_rx/turn_cmd" topic. You will then flash a "distress signal" corresponding to left turn signal for 2 seconds, right turn signal for 2s, and then turn signals off for 2s. This will then repeat forever until you press Ctrl+C. + + To run your code on the vehicle, you will launch the sensors and drive-by-wire system, then enable Pacmod control via the joystick. (Please do not try to operate the vehicle with the joystick!) Then, run your program with `python3 blink.py`. After you are done, you should disable Pacmod control via the joystick. + +2. Adapt `blink.py` to run the behavior in the GEMstack executor. Copy blink_launch.yaml to `GEMstack/launch`. Copy `blink_component.py` to `GEMstack/onboard/planning/` and make your edits there. + + Replicate your sensor printing and distress signal code in the BlinkDistress class in blink_component.py. You will need to adapt your code from using ROS subscribers / publishers to using the `GEMVehicleInterface` and `GEMVehicleReading` objects provided in the template code. Opening the GEMstack folder in the VSCode IDE will help you find the documentation for these classes. + + Run your code in simulation first. (If you are running on the vehicle, first open one terminal window and run `roscore`. Keep this running.) Then, in the GEMstack folder, run `python3 main.py --variant=sim launch/blink_launch.yaml`. You should see your blinking sequence in a matplotlib window. Use Ctrl+C to quit. + + Now, run your code on the real vehicle. Enable Pacmod control as before, then run `python3 main.py launch/blink_launch.yaml`. + +3. Adapt your code so the distress signal runs on GEMstack as a `signaling` component, and blinks the distress signal only when the `AllState.intent.intent=HALTING` flag is set. You will need to modify the computation graph in `GEMstack/knowledge/defaults` to add the `signaling` component, and modify launch files so your class is no longer listed under `trajectory_tracking`. +4. Use Git to create a pull request, and have your PR approved to the `s2025_teamX` branch. + +Part 2: +1. Tune the low-level controller (currently `PurePursuit`) to track the trajectory `AllState.trajectory` produced by the Planning group as accurately as possible. You may tune steering, acceleration/braking, the tracking strategies, or all three. +2. Decide on an appropriate tracking quality metric with the Infra team. +3. Report on what you tuned and how the tracking accuracy metric was improved. +4. Move all magic constants to an appropriate settings file, and have your pull request approved. +5. Integrate as necessary. + + +## State estimation & Calibration Group + +- Have at least one team member complete safety lookout & driver training. + +Part 1: + +1. You will be using both the Oak RGB-D camera and the Ouster top lidar to estimate the 3D positions of pedestrians relative to the vehicle frame. Because the stereo depth estimate is not as accurate as the lidar, we will be using the image to detect objects and the lidar to measure their 3D extent. The first step of this process is to associate points in the camera with points in the lidar, which requires an understanding of projection and relative pose (extrinsics) calibration. +2. You will first need to capture a calibration dataset consisting of paired images and lidar scans. Write code to capture paired scans and take several snapshots of interesting scenes. You should save images as standard image files, and save lidar scans as numpy arrays or PCD files. Store these to a directory named GEMstack/data/SOMETHING where "SOMETHING" is a descriptive name of your dataset. In the main GEMstack folder we had a GEM e2 program `python3 GEMstack/offboard/calibration/capture_lidar_zed.py data/SOMETHING` that could capture these files. Create a similar program `capture_ouster_oak.py` that reads from the appropriate ROS topics. +3. Use the `CameraInfo` message to obtain the intrinsics of the camera (http://docs.ros.org/en/noetic/api/sensor_msgs/html/msg/CameraInfo.html) from the `/oak/rgb/camera_info` ROS topic. You may use the image_geometry module (https://docs.ros.org/en/api/image_geometry/html/python/) to project points in the camera frame to the image. A camera frame is defined with the z axis pointing forward, x pointing right, and y pointing down. The important values of this message are the fx, fy, cx ,and cy intrinsic parameter terms. You can store these values to disk or read them live in your code. +4. We do not have the relative transform `T_toplidar^frontcamera` that converts points from the top lidar (Ouster) frame to the Oak frame. Create a way to calibrate this transform. Specifically, if you have a dataset of paired image, point cloud pairs `(image^t,point_cloud^t)`, you should observe that the points belonging to some object in the point cloud are mapped directly to those pixels in the image. Specifically, if `image_pt` is an image pixel belonging to point pt in the point cloud, then we should observe the relation "image_pt = project(T_toplidar^frontcamera * pt, oak_intrinsics)" + + To obtain this matrix you may use manual calibration if you need to, or you could use a more sophisticated method. + + For manual calibration, you can modify the GEM e2 code `python3 GEMstack/offboard/calibration/klampt_show_lidar_zed.py data/SOMETHING` to visualize the stored point clouds from the front camera and the lidar. You can see why the forward depth estimates are quite poor. + + Automated methods could use a detection method to obtain a distinctive shape like a sphere or a square (e.g., a face of the foam cubes) in the Zed image, paired with the same shape in the lidar point cloud. An optimization would ensure that all lidar points belonging to the shape are projected to its projection in the image. You can also match the stereo and lidar point clouds using a dynamically-associated registration method like ICP. + + +Part 2: + +1. Now we will perform absolute calibration of vehicle height, rear axle center, and sensors, i.e., the vehicle frame. The vehicle frame is centered at the rear axle center, with x pointing forward, y pointing to the left, and z pointing up. We want the transforms `T_frontcamera^vehicle` and `T_toplidar^vehicle`. + + - A flat ground and walls are good references for the pitch/roll of the sensors. Using the lidar point cloud, calibrate the rotation component of this transform. Also, use the height of the ground to determine the z above ground. + - Using distinctive shapes (e.g., the foam cubes), create a dataset in which you set up objects on the centerline of the vehicle. Use these to calibrate the yaw and the horizontal offset of the lidar. + - Measure the height of the rear axle over the ground. Use this to figure out the z above the rear axle center. + +2. Combine all of these measurements to establish `T_toplidar^vehicle`. Use your result in Step 1 to compute `T_frontcamera^vehicle` as well. Store these calibrations into the files referenced by `GEMstack/knowledge/calibration/gem_e4.yaml`. Name your calibrations meaningfully -- specifying which vehicle and what type of calibration you are performing -- and include the time and data of the calibration in your files. +3. Put your offline calibration code in the `GEMstack/offboard/calibration/` directory and write a README with a description of the method that you used, and instructions about how to use it. Describe the results of your calibration steps 1 and 2 in this file. + + DO NOT commit data files used in calibration. Put your datasets in the `data` directory, named appropriately. Files in this directory will not be committed to the repo. + +4. Use Git to create a pull request to the `s2025_teamX` branch, and have your PR approved. +5. Integrate as necessary. + + +## Perception Group + +- Starter code is provided in the `homework/` folder in your group's branch. +- Have at least one team member complete safety lookout & driver training. +- Many of your team members should plan to work on the vehicle, at least in the early stages of the assignment. Plan to coordinate with the Infra team to enable a method for working offline. + +Part 1: +1. Use an object detector to identify a pedestrian from the front camera. Implement the function `detect_people(img)` in `person_detector.py` to return a list of bounding boxes of people in an image. This is a function that inputs OpenCV images and returns a list of (x,y,w,h) image boxes that contain people. To run it, call `person_detector.py IMAGE` where IMAGE is an image file. You may download images from the internet or take your own. If you have a webcam, you can run `person_detector.py webcam` to run the live detector. + + I suggest using the YOLOv8 package (https://github.com/ultralytics/ultralytics) and choosing one of its pretrained models, such as `yolov8n.pt`. Follow the basic tutorials for running the detector. The ID of people is 0, and you can examine the Result object to extract out the boxes that have that ID. +2. Refactor your detector to work with GEMstack. Move your detector logic into a GEMstack Component defined in `pedestrian_detection.py`. The provided code will turn your boxes into a dictionary of `AgentState` objects. Read the documentation of the `AgentState` class to see what data should be stored there. For now, you will put temporary values for most of these fields. + + Specifically you should: + + 1. Implement the `PedestrianDetector2D` component to call your detection function in `image_callback(img)`. Your model should be loaded once in the component's `initialize()` method. + 2. Move your pretrained model to `GEMstack/knowledge/detection`. + 3. Move the Python code to `GEMstack/onboard/perception` where the launch file can find them. + 4. Make sure that your code can find the pretrained model in the `knowledge` directory. + 5. Using the detector-only variant of the launch file `python3 main.py --variant=detector_only pedestrian_detection.yaml`, test your code on the real vehicle. You will need to run "source ~/demo_ws/devel/setup.bash" and "roslaunch basic_launch sensor_init.launch" in another terminal, and run the same "source ..." command before running GEMstack code. + 6. Have a group member(s) walk in front of the Zed camera and observe the printouts. + 7. Finally, tune the rate at which you run your component to obtain the highest framerate possible. + +3. Use Git to create a pull request to `s2025_teamX`, and have your PR approved. + +Part 2: +1. Fuse detections with depth (`front_depth` sensor), multiple cameras, and/or LIDAR (`top_lidar` sensor) to reduce errors and place agents in their 3D locations in the scene. Use the sensor calibration from the Calibration group. Look at the lidar readings near the center of the detection, and use those to estimate a region of interest in the point cloud. Then, fit the center and dimensions of the object to the points in the point cloud that you associate with the pedestrian. Finally, put the estimated center and dimensions of the pedestrian into the "pose" and "dimensions" attributes, making sure to respect the vehicle's coordinate convention. +2. If the Infra team has worked quickly enough, you should be able to work offline, replaying from ROS bags. It is a good idea to coordinate with them. +3. Provide your results in a `PedestrianDetector` component and provide this to the Planning group. + +Part 3: +1. Keep track of `AgentState` objects as the vehicle moves and when they disappear out of the field of view. +2. Track pedestrians over time to fill out the "velocity" attribute and to give them relatively consistent names over time. + - number pedestrian IDs as "ped1", "ped2", etc. so that if you detect that a single pedestrian has moved across your field of view, you consistently output "ped1" as the agent's ID. + - keep track of the detected pedestrians on the prior frame to estimate the velocity of each pedestrian. Make sure to store 3D previous detections in the START frame. For each current detection, examine whether the box of the new detection overlaps any prior detection. If so, this begins a "track" detection in which you a) keep the last ID, and b) estimate the pedestrian's velocity in the CURRENT frame. The reported velocity should NOT be relative to the vehicle's velocity. If this is a new pedestrian, increment the pedestrian ID and output zero velocity for this frame. + + If you wish to use more sophisticated trackers, e.g., Hungarian algorithm, velocity prediction, etc, you may do so. + +3. Make sure this works while the vehicle is moving. For example, if the pedestrian is staying still but the vehicle is moving, their reported velocity remains relatively close to 0. Provide these results to the Planning group. +4. Integrate as necessary. + + +## Planning Group + +- Have at least one team member complete safety lookout & driver training. + +Part 1: +1. We will introduce two new components that operate on the output of a `PedestrianDetector2D` / `PedestrianDetector` model produced by the perception team: + + - Logic in the `PedestrianYielder` component in `pedestrian_yield_logic.py`. + - A planner in the `YieldTrajectoryPlanner` component in `longitudinal_planning.py`. + + Move these files to `GEMstack/onboard/planning` where launch files can find them. + +3. For the planning logic, you will implement a braking strategy in `longitudinal_plan_brake()`, and implement a trapezoidal velocity profile strategy in `longitudinal_plan()`. + + You will first do your development using a unit test script, then in simulation. Run `test_longitudinal_planning.py` from the main GEMstack folder to plot the results of your methods. Using the launch file `python3 main.py --variant=fake_sim pedestrian_detection.yaml`, you will receive perfect knowledge about the pedestrian. Test to make sure this produces reasonable behavior in the simulation. It should slow and stop at a safe distance when a pedestrian is detected, and then resume slowly when the pedestrian disappears. +4. Run on the vehicle using `--variant fake_real`. This will use a "fake" detector that will just report pedestrians at some point. Work with the Control and Dynamics team to ensure that reasonable behavior results. +5. Use Git to create a pull request, and have your PR approved. + +Part 2: +1. Use collision detection to determine the distance to collision and use that to determine whether to begin to brake. + + - Determine the lookahead distance that you would need to avoid collision at the current velocity and desired braking deceleration. + - For many steps along the route up to that distance, determine whether the vehicle would be likely to hit (more precisely, get sufficiently close) to the pedestrian. Use a 1m lateral distance and 3m longitudinal distance buffer in your vehicle geometry. + - For the distance-to-collision you determine above, use the longitudinal planner to determine your desired acceleration. + + Determine how quickly you can run your planner. If it is too slow (<5Hz), try using the vehicle-pedestrian distance to dynamically determine your collision checking resolution. + +2. Modify the `PedestrianYielder` so that it selects a subset of pedestrians for which the above calculations should be performed. +3. Adapt the motion planner to slow gently for distant crossing pedestrians to boost rate of progress after the pedestrian crosses the vehicle's route. Use the Perception group's estimated velocity for each pedestrian. +4. Using the `--variant real_real` will run on the real vehicle using trajectory predictions from the Perception group. +5. Integrate as necessary. + + +## Infra Group +- Have at least one team member complete safety lookout & driver training. + +Part 1: +1. Develop a systematic method for saving camera data for the Calibration and Vision groups. Work closely with them to figure out a method to test their methods from logged data. +2. Write documentation for this method, detailing usage and performance issues. +3. Use Git to create a pull request to `s2025_teamX`, and have your PR approved. + +Part 2: +1. Develop a visualizer for rapidly plotting detected pedestrian trajectories from log data (e.g., matplotlib). +2. Work with the Control & Dynamics team to test the integrated system. +3. Develop metrics for pedestrian safety and passenger comfort. Analyze logs from tests. +4. Integrate as necessary. + diff --git a/README.md b/README.md index 51f26e290..b66f78c3c 100644 --- a/README.md +++ b/README.md @@ -11,25 +11,103 @@ GEMstack uses Python 3.7+ and ROS Noetic. (It is possible to do some offline and simulation work without ROS, but it is highly recommended to install it if you are working on any onboard behavior or training for rosbag files.) You should also have the following Python dependencies installed, which you can install from this folder using `pip install -r requirements.txt`: +- GEMstack Dependencies + - numpy + - scipy + - matplotlib + - opencv-python + - torch + - klampt==0.9.2 + - shapely + - dacite + - pyyaml +- Perception Dependencies + - ultralytics -- numpy -- scipy -- matplotlib -- opencv-python -- torch -- klampt -- shapely -- dacite -- pyyaml +In order to interface with the actual GEM e2 vehicle, you will need [PACMOD2](https://github.com/astuff/pacmod2) - Autonomoustuff's low level interface to vehicle. You will also need Autonomoustuff's [sensor message packages](https://github.com/astuff/astuff_sensor_msgs). The onboard computer uses Ubuntu 20.04 with Python 3.8, CUDA 11.6, and NVIDIA driver 515, so to minimize compatibility issues you should ensure that these are installed on your development system. +## Running the stack on Ubuntu 20.04 without Docker +### Checking CUDA Version -In order to interface with the actual GEM e2 vehicle, you will need [PACMOD2](https://github.com/astuff/pacmod2) - Autonomoustuff's low level interface to vehicle. You will also need Autonomoustuff's [sensor message packages](https://github.com/astuff/astuff_sensor_msgs). The onboard computer uses Ubuntu 20.04 with Python 3.8, CUDA 11.6, and NVIDIA driver 515, so to minimize compatibility issues you should ensure that these are installed on your development system. +Before proceeding, check your Nvidia Driver and supported CUDA version: +```bash +nvidia-smi +``` +This will show your NVIDIA driver version and the maximum supported CUDA version. Make sure you have CUDA 11.8 or 12+ installed. + +From Ubuntu 20.04 install [CUDA 11.6](https://gist.github.com/ksopyla/bf74e8ce2683460d8de6e0dc389fc7f5) or [CUDA 12+](https://gist.github.com/ksopyla/ee744bf013c83e4aa3fc525634d893c9) based on your current Nvidia Driver versio. + +To check the currently installed CUDA version: +```bash +nvcc --version +``` +you can install the dependencies or GEMstack by running `setup/setup_this_machine.sh` from the top-level GEMstack folder. + +## Running the stack on Ubuntu 20.04 or 22.04 with Docker +> [!NOTE] +> Make sure to check the Nvidia Driver and supported CUDA version before proceeding by following the steps in the previous section. -From a fresh Ubuntu 20.04 with ROS Noetic and [CUDA 11.6 installed](https://gist.github.com/ksopyla/bf74e8ce2683460d8de6e0dc389fc7f5), you can install these dependencies by running `setup/setup_this_machine.sh` from the top-level GEMstack folder. +## Prerequisites +- Docker (In Linux - Make sure to follow the post-installation steps from [here](https://docs.docker.com/engine/install/linux-postinstall/)) +- Nvidia Container Toolkit -To build a Docker container with all these prerequisites, you can use the provided Dockerfile by running `docker build -t gem_stack setup/`. For GPU support you will need the NVidia Container Runtime (run `setup/get_nvidia_container.sh` from this directory to install, or see [this tutorial](https://collabnix.com/introducing-new-docker-cli-api-support-for-nvidia-gpus-under-docker-engine-19-03-0-beta-release/) to install) and run `docker run -it --gpus all gem_stack /bin/bash`. +Try running the sample workload from the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/sample-workload.html) to check if your system is compatible. +```bash +sudo docker run --rm --runtime=nvidia --gpus all ubuntu nvidia-smi +``` +You should see the nvidia-smi output similar to [this](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/sample-workload.html#:~:text=all%20ubuntu%20nvidia%2Dsmi-,Your%20output%20should%20resemble%20the%20following%20output%3A,-%2B%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2B%0A%7C%20NVIDIA%2DSMI%20535.86.10). + +If you see the output, you are good to go. Otherwise, you will need to install the Docker and NVidia Container Toolkit by following the instructions. +- For **Docker**, follow the instructions [here](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository). + +- For **Nvidia Container Toolkit**, run `setup/get_nvidia_container.sh` from this directory to install, or see [this](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) for more details. + +## Building the Docker image + +To build a Docker image with all these prerequisites, you can use the provided Dockerfile by running. + +```bash +bash setup/build_docker_image.sh +``` + +## Running the Docker container + +To run the container, you can use the provided Docker Compose file by running. +> [!NOTE] +> If you want to open multiple terminals to run the container, you can use the same command. It will automatically start a new terminal inside the same container. +```bash +bash run_docker_container.sh +``` +## Usage Tips and Instructions + +### Using Host Volume + +You can use the host volume under the container's home directory inside the `` folder. This allows you to build and run files that are on the host machine. For example, if you have a file on the host machine at `/home//project`, you can access it inside the container at `/home//host/project`. + +### Using Dev Containers Extension in VSCode + +To have a good developer environment inside the Docker container, you can use the Dev Containers extension in VSCode. Follow these steps: + +1. Install the Dev Containers extension in VSCode. +2. Open the cloned repository in VSCode. +3. Press `ctrl+shift+p`(or select the remote explorer icon from the left bar) and select `Dev-Containers: Attach to Running Container...`. +4. Select the container name `gem_stack-container`. +5. Once attached, Select `File->Open Folder...`. +6. Select the folder/workspace you want to open in the container. + +This will set up the development environment inside the Docker container, allowing you to use VSCode features seamlessly. + +## Stopping the Docker container + +To stop the container, you can use the provided stop script by running. + +```bash +bash stop_docker_container.sh +``` +## Installing for Mac +To install Ubuntu and setup ROS for Mac, follow this [link](https://doc.clickup.com/9011960452/d/h/8cjf6m4-11191/e694fcfb47a015e) for in-depth instructions and troubleshooting. ## In this folder diff --git a/docs/Safety and Comfort Metrics Documentation.md b/docs/Safety and Comfort Metrics Documentation.md new file mode 100644 index 000000000..ea0e2f22f --- /dev/null +++ b/docs/Safety and Comfort Metrics Documentation.md @@ -0,0 +1,39 @@ +# Test Safety and Comfort Metrics Documentation + +## Files/Scripts +- `testing/test_comfort_metrics.py` + +## Purpose + +This script analyzes log files and reports vehicle comfort and pedestrian safety with plots. It extracts: + +- **Vehicle Data:** Time, acceleration, heading rate from `behavior.json`. +- **Pedestrian Data:** Time and pedestrian distance to car from `behavior.json`. +- **Pure Pursuit Tracker Data (Optional):** Vehicle time, cross-track error, and position (actual vs. desired) from `PurePursuitTrajectoryTracker_debug.csv`. + +The script will include 5 plots: +- Vehicle jerk vs. time. +- Vehicle heading acceleration vs. time. +- Vehicle cross-track error vs. time. +- Vehicle actual vs. desired position. +- Pedestrian distance vs. time. + +The script also prints key metrics: +- RMS Jerk +- RMS Heading acceleration +- RMS Cross track error +- RMS Position error +- Minimum pedestrian distance to car + +## Usage + +1. **Check log directory:** + - Ensure your log directory contains `behavior.json` (required). + - Optionally include `PurePursuitTrajectoryTracker_debug.csv` (if missing, some plots are skipped). + +2. **Run the script:** + + ```bash + python test_comfort_metrics.py + ``` + Replace `` with the path to the folder containing the log files. \ No newline at end of file diff --git a/docs/auto-upload.md b/docs/auto-upload.md new file mode 100644 index 000000000..3e3798e12 --- /dev/null +++ b/docs/auto-upload.md @@ -0,0 +1,28 @@ +# OneDrive Upload + +## Config file setup + +An authenticator service must be setup in azure first. This can be done by going to App registrations in Azure and creating a new resgistration + + +To configure the onedrive config, it must be named onedrive_config.json and be in the following format + +```json +{ + "CLIENT_ID": "", + "TENANT_ID": "", + "DRIVE_ID": "", + "ITEM_ID": "" +} +``` + +The Client Id and the tenant id of the authenticator service found in the app registration in azure + +The DRIVE_ID and ITEM_ID of the onedrive folder to upload to can be found with this guide: +https://www.youtube.com/watch?v=3pjS7YTIcgQ + +Anybody can access the same authenticator service for free as long as they have an illinois account. + + +## Usage +When any Ros topics are specified in the yaml file under 'log,' a prompt will appear in the console after finishing the GEMStack execution. If the user specifies that they want to continue upload, a window will appear in browser (will not work in docker or when running headless.) You may login with your illinois account only. diff --git a/docs/end-to-end-testing.md b/docs/end-to-end-testing.md new file mode 100644 index 000000000..00ea15f8f --- /dev/null +++ b/docs/end-to-end-testing.md @@ -0,0 +1,34 @@ +# End-to-End Testing + +## Writing Tests + +Under the GEMStack/GEMStack/integration_tests directory, there are two example test cases (log folders still need to be specified) + + +Each test case is associated with a test config, which is written in the order: + +>Name of test, variant of sim to run, launch file location, runtime of test + +The test must also be written with a custom validation function that inherits from the base validator and mapped to the name in the test suite: + +```python +VALIDATORS = { + "test1": ValidateTestCase1(), + "test2": ValidateTestCase2(), +} +``` + + Each test will run in sequential order and start GEMStack in a separate process over the course of the specified runtime. + + The custom validation funtion will then run on the latest log folder generated (This part needs to be made more robust later). + + The validation function is open ended so there is flexibility to parse the behavior_json file, rosbag, or stdout/stderr files. + + All results and the runtime will be displayed in the terminal at the end. + + ## Running the test suite: + + python3 integration\_tests/run\_tests.py + + + diff --git a/docs/rostopic-replay.md b/docs/rostopic-replay.md new file mode 100644 index 000000000..bc0da2c4f --- /dev/null +++ b/docs/rostopic-replay.md @@ -0,0 +1,24 @@ +# Component Replay and Rostopic Replay + +## Component Replay +Component replay can now play back states from behavior.json. Only the component outputs can be played back. (The vehicle_interface commands cannot be played back). Note that the current simulator maps need to be updated to handle real GEM data + +## Rostopic Replay + +Will create new publishers from the specified topics in the vehicle.bag. The messages will be published with the GEM interface's time, to keep sync with the rest of the execution. This means that the rest of the GEMstack execution must also be running (will not publish on detector_only variant) + + + +## Usage + +Add topics and components to replay in the yaml file: + +```yaml +replay: # Add items here to set certain topics / inputs to be replayed from logs + # Specify which log folder to replay from + log: + ros_topics : [] + components : [] +``` + + diff --git a/homework/blink.py b/homework/blink.py new file mode 100644 index 000000000..93803ae93 --- /dev/null +++ b/homework/blink.py @@ -0,0 +1,117 @@ +import rospy +from std_msgs.msg import String, Bool, Float32, Float64 +from pacmod_msgs.msg import PositionWithSpeed, PacmodCmd, SystemRptInt, SystemRptFloat, VehicleSpeedRpt + +# For message format, see +# https://github.com/astuff/astuff_sensor_msgs/blob/3.3.0/pacmod_msgs/msg/PacmodCmd.msg + +class BlinkDistress: + """Your control loop code should go here. You will use ROS and Pacmod messages + to communicate with drive-by-wire system to control the vehicle's turn signals. + """ + def __init__(self): + # TODO: Initialize your publishers and subscribers here + + # When you create a subscriber to a /pacmod/parsed_tx/X topic, you will need to provide a callback function. + # You will want this callback to be a BlinkDistress method, such as print_X(self, msg). msg will have a + # ROS message type, and you can find out what this is by either reading the documentation or running + # "rostopic info /pacmod/parsed_tx/X" on the command line. + + rospy.init_node("blink_node",anonymous=True) + rospy.Rate(5) + + self.sub_accel = rospy.Subscriber("/pacmod/parsed_tx/accel_rpt",SystemRptFloat,self.cb_accel,queue_size=1) + self.sub_brake = rospy.Subscriber("/pacmod/parsed_tx/brake_rpt",SystemRptFloat,self.cb_brake,queue_size=1) + + self.pub_turn_cmd = rospy.Publisher("/pacmod/as_rx/turn_cmd", PacmodCmd,queue_size=1) + + + self.msg_accel = SystemRptFloat() + self.msg_brake = SystemRptFloat() + + # 0 right, 1 off, 2 left + self.msg_signal = PacmodCmd() + self.msg_signal.ui16_cmd = 2 + + rospy.sleep(2) + + self.prev_time = rospy.get_time() + + while not rospy.is_shutdown(): + self.curr_time = rospy.get_time() + # change signal every 2 secs + if (self.curr_time - self.prev_time > 2): + self.msg_signal.ui16_cmd = (self.msg_signal.ui16_cmd + 1) % 3 + self.prev_time = self.curr_time + # publish cmd + self.update() + + def cb_accel(self,msg): + self.msg_accel = msg + print("accel output is {}".format(self.msg_accel.output)) + + def cb_brake(self,msg): + self.msg_brake = msg + print("brake output is {}".format(self.msg_brake.output)) + + def rate(self): + """Requested update frequency, in Hz""" + return 0.5 + + def initialize(self): + """Run first""" + pass + + def cleanup(self): + """Run last""" + pass + + def update(self): + """Run in a loop""" + # TODO: Implement your control loop here + # You will need to publish a PacmodCmd() to /pacmod/as_rx/turn_cmd. Read the documentation to see + # what the data in the message indicates. + + self.pub_turn_cmd.publish(self.msg_signal) + + def healthy(self): + """Returns True if the element is in a stable state.""" + return True + + def done(self): + """Return True if you want to exit.""" + return False + + + +def run_ros_loop(node): + """Executes the event loop of a node using ROS. `node` should support + rate(), initialize(), cleanup(), update(), and done(). You should not modify + this code. + """ + #intializes the node. We use disable_signals so that the end() function can be called when Ctrl+C is issued. + #See http://wiki.ros.org/rospy/Overview/Initialization%20and%20Shutdown + rospy.init_node(node.__class__.__name__, disable_signals=True) + + node.initialize() + rate = rospy.Rate(node.rate()) + termination_reason = "undetermined" + try: + while not rospy.is_shutdown() and not node.done() and node.healthy(): + node.update() + rate.sleep() + if node.done(): + termination_reason = "Node done" + if not node.healthy(): + termination_reason = "Node in unhealthy state" + except KeyboardInterrupt: + termination_reason = "Killed by user" + except Exception as e: + termination_reason = "Exception "+str(e) + raise + finally: + node.cleanup() + rospy.signal_shutdown(termination_reason) + +if __name__ == '__main__': + run_ros_loop(BlinkDistress()) \ No newline at end of file diff --git a/homework/blink_component.py b/homework/blink_component.py new file mode 100644 index 000000000..316518e55 --- /dev/null +++ b/homework/blink_component.py @@ -0,0 +1,72 @@ +from ..component import Component +from ..interface.gem import GEMInterface,GEMVehicleCommand,GEMVehicleReading + +import time + +class BlinkDistress(Component): + """Your control loop code should go here. You will use GEMVehicleCommand + to communicate with drive-by-wire system to control the vehicle's turn signals. + """ + def __init__(self, vehicle_interface : GEMInterface): + self.vehicle_interface = vehicle_interface + self.command = GEMVehicleCommand(gear=0, + accelerator_pedal_position=0, + brake_pedal_position=0, + steering_wheel_angle=0, + accelerator_pedal_speed=0, + brake_pedal_speed = 0, + steering_wheel_speed = 0) + + self.prev_time = time.time() + self.curr_time = time.time() + + while True : + self.curr_time = time.time() + self.update() + + def rate(self): + """Requested update frequency, in Hz""" + return 0.5 + + def initialize(self): + """Run first""" + pass + + def cleanup(self): + """Run last""" + pass + + def update(self): + """Run in a loop""" + # we need to set up a GEMVehicleCommand which encapsulates all commands that will be + # sent to the drive-by-wire system, simultaneously. To avoid doing arbitrary things + # to the vehicle, let's maintain the current values (e.g., accelerator, brake pedal, + # steering angle) from its current readings. + + # command = self.vehicle_interface.command_from_reading() + command = self.command + + # TODO: alter command to execute turn signals, then uncomment line below to send + # the command to vehicle + # self.vehicle_interface.send_command(command) + if (self.prev_time - self.curr_time > 2): + if command.left_turn_signal is True: + command.left_turn_signal = False + command.right_turn_signal = True + + elif command.right_turn_signal is True: + command.left_turn_signal = False + command.right_turn_signal = False + + else: + command.left_turn_signal = True + command.right_turn_signal = False + self.prev_time = self.curr_time + self.command = command + + self.vehicle_interface.send_command(command) + + def healthy(self): + """Returns True if the element is in a stable state.""" + return True + diff --git a/homework/blink_launch.yaml b/homework/blink_launch.yaml new file mode 100644 index 000000000..41174f692 --- /dev/null +++ b/homework/blink_launch.yaml @@ -0,0 +1,64 @@ +description: "Launch just the blink distress code" +mode: hardware +vehicle_interface: gem_hardware.GEMHardwareInterface +mission_execution: StandardExecutor +require_engaged: False +# Recovery behavior after a component failure +recovery: + perception: + state_estimation : GNSSStateEstimator + perception_normalization : StandardPerceptionNormalizer + planning: + trajectory_tracking : blink_component.BlinkDistress +# Driving behavior for the GEM vehicle. Runs perception and planner but doesn't execute anything (no controller). +drive: + perception: + state_estimation : GNSSStateEstimator + perception_normalization : StandardPerceptionNormalizer + planning: + trajectory_tracking : blink_component.BlinkDistress +log: + # Specify the top-level folder to save the log files. Default is 'logs' + folder : 'logs' + # If prefix is specified, then the log folder will be named with the prefix followed by the date and time. Default no prefix + prefix : 'hw1_' + # If suffix is specified, then logs will output to folder/prefix+suffix. Default uses date and time as the suffix + #suffix : 'test3' + # Specify which ros topics to record to vehicle.bag. + #ros_topics : ['/pacmod/as_rx/turn_cmd','/pacmod/as_tx/turn_rpt'] + # Specify options to pass to rosbag record. Default is no options. + #rosbag_options : '--split --size=1024' + # If True, then record all readings / commands of the vehicle interface. Default False + vehicle_interface : True + # Specify which components to record to behavior.json. Default records nothing + components : [] + # Specify which components of state to record to state.json. Default records nothing + #state: ['all'] + # Specify the rate in Hz at which to record state to state.json. Default records at the pipeline's rate + #state_rate: 10 +replay: # Add items here to set certain topics / inputs to be replayed from logs + # Specify which log folder to replay from + log: + ros_topics : [] + components : [] + +#usually can keep this constant +computation_graph: !include "../GEMstack/knowledge/defaults/computation_graph.yaml" + +variants: + sim: + run: + description: "Runs the distress signal, but in simulation" + mode: simulation + vehicle_interface: + type: gem_simulator.GEMDoubleIntegratorSimulationInterface + args: + scene: !relative_path '../scenes/xyhead_demo.yaml' + require_engaged: False + visualization: !include "klampt_visualization.yaml" + recovery: + perception: + state_estimation : OmniscientStateEstimator + drive: + perception: + state_estimation : OmniscientStateEstimator diff --git a/homework/test_longitudinal_plan.py b/homework/test_longitudinal_plan.py new file mode 100644 index 000000000..400f34fea --- /dev/null +++ b/homework/test_longitudinal_plan.py @@ -0,0 +1,80 @@ +#needed to import GEMstack from top level directory +import sys +import os +sys.path.append(os.getcwd()) + +from GEMstack.state import Path, ObjectFrameEnum +from GEMstack.onboard.planning.longitudinal_planning import longitudinal_plan,longitudinal_brake +import matplotlib.pyplot as plt + +def test_longitudinal_planning(): + test_path = Path(ObjectFrameEnum.START,[(0,0),(1,0),(2,0),(3,0),(4,0),(5,0),(6,0)]) + test_path2 = Path(ObjectFrameEnum.START,[(5,0),(6,1),(7,2),(9,4)]) + + test_traj = longitudinal_brake(test_path, 2.0, 0.0) + assert (t1 < t2 for (t1,t2) in zip(test_traj.times[:-1],test_traj.times[1:]) ) + plt.plot(test_traj.times,[p[0] for p in test_traj.points]) + plt.title("Braking from 0 m/s (should just stay still)") + plt.xlabel('time') + plt.ylabel('position') + plt.show() + + test_traj = longitudinal_brake(test_path, 2.0, 2.0) + assert (t1 < t2 for (t1,t2) in zip(test_traj.times[:-1],test_traj.times[1:]) ) + plt.plot(test_traj.times,[p[0] for p in test_traj.points]) + plt.title("Braking from 2 m/s") + plt.xlabel('time') + plt.ylabel('position') + plt.show() + + test_traj = longitudinal_plan(test_path, 1.0, 2.0, 3.0, 0.0) + assert (t1 < t2 for (t1,t2) in zip(test_traj.times[:-1],test_traj.times[1:]) ) + plt.plot(test_traj.times,[p[0] for p in test_traj.points]) + plt.title("Accelerating from 0 m/s") + plt.xlabel('time') + plt.ylabel('position') + plt.show() + + + test_traj = longitudinal_plan(test_path, 1.0, 2.0, 3.0, 2.0) + assert (t1 < t2 for (t1,t2) in zip(test_traj.times[:-1],test_traj.times[1:]) ) + plt.plot(test_traj.times,[p[0] for p in test_traj.points]) + plt.title("Accelerating from 2 m/s") + plt.xlabel('time') + plt.ylabel('position') + plt.show() + + test_traj = longitudinal_plan(test_path, 0.0, 2.0, 3.0, 3.1) + assert (t1 < t2 for (t1,t2) in zip(test_traj.times[:-1],test_traj.times[1:]) ) + plt.plot(test_traj.times,[p[0] for p in test_traj.points]) + plt.title("Keeping constant velocity at 3.1 m/s") + plt.xlabel('time') + plt.ylabel('position') + plt.show() + + test_traj = longitudinal_plan(test_path, 2.0, 2.0, 20.0, 10.0) + assert (t1 < t2 for (t1,t2) in zip(test_traj.times[:-1],test_traj.times[1:]) ) + plt.plot(test_traj.times,[p[0] for p in test_traj.points]) + plt.title("Too little time to stop, starting at 10 m/s") + plt.xlabel('time') + plt.ylabel('position') + plt.show() + + test_traj = longitudinal_brake(test_path, 2.0, 10.0) + plt.plot(test_traj.times,[p[0] for p in test_traj.points]) + plt.title("Too little time to stop, braking at 10 m/s") + plt.xlabel('time') + plt.ylabel('position') + plt.show() + + test_traj = longitudinal_plan(test_path2, 1.0, 2.0, 3.0, 0.0) + plt.plot(test_traj.times,[p[0] for p in test_traj.points]) + plt.title("Nonuniform planning") + plt.xlabel('time') + plt.ylabel('position') + plt.show() + + + +if __name__ == '__main__': + test_longitudinal_planning() diff --git a/integration_tests/launch/klampt_visualization.yaml b/integration_tests/launch/klampt_visualization.yaml new file mode 100644 index 000000000..2265debf3 --- /dev/null +++ b/integration_tests/launch/klampt_visualization.yaml @@ -0,0 +1,7 @@ +type: klampt_visualization.KlamptVisualization +args: + rate: 20 + #Don't include save_as if you don't want to save the video + save_as: + #save_as: "fixed_route_sim.mp4" + diff --git a/integration_tests/launch/mpl_visualization.yaml b/integration_tests/launch/mpl_visualization.yaml new file mode 100644 index 000000000..b9642e4de --- /dev/null +++ b/integration_tests/launch/mpl_visualization.yaml @@ -0,0 +1,7 @@ +type: mpl_visualization.MPLVisualization +args: + rate: 8 + #Don't include save_as if you don't want to save the video + save_as: + #save_as: "fixed_route_sim.mp4" +multiprocess: True diff --git a/integration_tests/launch/test1.yaml b/integration_tests/launch/test1.yaml new file mode 100644 index 000000000..e779ea9b5 --- /dev/null +++ b/integration_tests/launch/test1.yaml @@ -0,0 +1,79 @@ +description: "Drive the GEM vehicle along a fixed route (currently xyhead_highbay_backlot_p.csv)" +mode: hardware +vehicle_interface: gem_hardware.GEMHardwareInterface +mission_execution: StandardExecutor +# Recovery behavior after a component failure +recovery: + planning: + trajectory_tracking: + type: recovery.StopTrajectoryTracker + print: False +# Driving behavior for the GEM vehicle following a fixed route +drive: + perception: + state_estimation : GNSSStateEstimator + perception_normalization : StandardPerceptionNormalizer + planning: + route_planning: + type: StaticRoutePlanner + args: [!relative_path '../../GEMstack/knowledge/routes/xyhead_highbay_backlot_p.csv'] + motion_planning: + type: RouteToTrajectoryPlanner + args: [null] #desired speed in m/s. If null, this will keep the route untimed for the trajectory tracker + trajectory_tracking: + type: pure_pursuit.PurePursuitTrajectoryTracker + args: {desired_speed: 2.5} #approximately 5mph + print: False +log: + # Specify the top-level folder to save the log files. Default is 'logs' + #folder : 'logs' + # If prefix is Sspecified, then the log folder will be named with the prefix followed by the date and time. Default no prefix + #prefix : 'fixed_route_' + # If suffix is specified, then logs will output to folder/prefix+suffix. Default uses date and time as the suffix + #suffix : 'test3' + # Specify which ros topics to record to vehicle.bag. Default records nothing. This records the "standard" ROS topics. + ros_topics : [] + # Specify options to pass to rosbag record. Default is no options. + #rosbag_options : '--split --size=1024' + # If True, then record all readings / commands of the vehicle interface. Default False + vehicle_interface : True + # Specify which components to record to behavior.json. Default records nothing + components : ['state_estimation'] + # Specify which components of state to record to state.json. Default records nothing + #state: ['all'] + # Specify the rate in Hz at which to record state to state.json. Default records at the pipeline's rate + #state_rate: 10 +replay: # Add items here to set certain topics / inputs to be replayed from logs + # Specify which log folder to replay from + log: 'logs/test_control' + # For replaying sensor data, try !include "../knowledge/defaults/standard_sensor_ros_topics.yaml" + ros_topics : ['/septentrio_gnss/insnavgeod'] + components : ['state_estimation'] + +#usually can keep this constant +computation_graph: !include "../../GEMstack/knowledge/defaults/computation_graph.yaml" + +after: + show_log_folder: True #set to false to avoid showing the log folder + +#on load, variants will overload the settings structure +variants: + #sim variant doesn't execute on the real vehicle + #real variant executes on the real robot + sim: + run: + mode: simulation + vehicle_interface: + type: gem_simulator.GEMDoubleIntegratorSimulationInterface + args: + scene: !relative_path '../scenes/highbay.yaml' + + drive: + perception: + state_estimation : OmniscientStateEstimator + agent_detection : OmniscientAgentDetector + visualization: !include "klampt_visualization.yaml" + #visualization: !include "mpl_visualization.yaml" + log_ros: + log: + ros_topics : !include "../../GEMstack/knowledge/defaults/standard_ros_topics.yaml" diff --git a/integration_tests/launch/test2.yaml b/integration_tests/launch/test2.yaml new file mode 100644 index 000000000..c9d5376eb --- /dev/null +++ b/integration_tests/launch/test2.yaml @@ -0,0 +1,101 @@ +description: "Run the yielding trajectory planner on the real vehicle with real perception" +mode: hardware +vehicle_interface: gem_hardware.GEMHardwareInterface +mission_execution: StandardExecutor + +# Recovery behavior after a component failure +recovery: + planning: + trajectory_tracking : recovery.StopTrajectoryTracker + +# Driving behavior for the GEM vehicle. Runs real pedestrian perception, yield planner, but does not send commands to real vehicle. +drive: + perception: + state_estimation : GNSSStateEstimator + agent_detection : pedestrian_detection.PedestrianDetector2D + perception_normalization : StandardPerceptionNormalizer + planning: + relations_estimation: pedestrian_yield_logic.PedestrianYielder + route_planning: + type: StaticRoutePlanner + args: [!relative_path '../../GEMstack/knowledge/routes/forward_15m.csv','start'] + motion_planning: longitudinal_planning.YieldTrajectoryPlanner + trajectory_tracking: + type: pure_pursuit.PurePursuitTrajectoryTracker + print: False + + +log: + # Specify the top-level folder to save the log files. Default is 'logs' + #folder : 'logs' + # If prefix is specified, then the log folder will be named with the prefix followed by the date and time. Default no prefix + #prefix : 'fixed_route_' + # If suffix is specified, then logs will output to folder/prefix+suffix. Default uses date and time as the suffix + #suffix : 'test3' + # Specify which ros topics to record to vehicle.bag. Default records nothing. This records the "standard" ROS topics. + ros_topics : + # Specify options to pass to rosbag record. Default is no options. + #rosbag_options : '--split --size=1024' + # If True, then record all readings / commands of the vehicle interface. Default False + vehicle_interface : True + # Specify which components to record to behavior.json. Default records nothing + components : ['state_estimation','agent_detection','motion_planning'] + # Specify which components of state to record to state.json. Default records nothing + #state: ['all'] + # Specify the rate in Hz at which to record state to state.json. Default records at the pipeline's rate + #state_rate: 10 +replay: # Add items here to set certain topics / inputs to be replayed from logs + # Specify which log folder to replay from + log: 'logs/perception_test' + ros_topics : ['/oak/rgb/camera_info', '/oak/rgb/image_raw', '/ouster/points', '/septentrio_gnss/imu', '/septentrio_gnss/insnavgeod'] + components : [] + +#usually can keep this constant +computation_graph: !include "../../GEMstack/knowledge/defaults/computation_graph.yaml" + +variants: + detector_only: + run: + description: "Run the pedestrian detection code" + drive: + planning: + trajectory_tracking: + visualization: !include "mpl_visualization.yaml" + + real_sim: + run: + description: "Run the pedestrian detection code with real detection and fake simulation" + mode: hardware + vehicle_interface: + type: gem_mixed.GEMRealSensorsWithSimMotionInterface + args: + scene: !relative_path '../../scenes/xyhead_demo.yaml' + mission_execution: StandardExecutor + require_engaged: False + visualization: !include "mpl_visualization.yaml" + drive: + perception: + state_estimation : OmniscientStateEstimator + + + fake_real: + run: + description: "Run the yielding trajectory planner on the real vehicle with faked perception" + drive: + perception: + agent_detection : pedestrian_detection.FakePedestrianDetector2D + + fake_sim: + run: + description: "Run the yielding trajectory planner in simulation with faked perception" + mode: simulation + vehicle_interface: + type: gem_simulator.GEMDoubleIntegratorSimulationInterface + args: + scene: !relative_path '../../scenes/xyhead_demo.yaml' + visualization: !include "klampt_visualization.yaml" + drive: + perception: + agent_detection : pedestrian_detection.FakePedestrianDetector2D + #agent_detection : OmniscientAgentDetector #this option reads agents from the simulator + state_estimation : OmniscientStateEstimator diff --git a/integration_tests/run_tests.py b/integration_tests/run_tests.py new file mode 100644 index 000000000..b1f48a970 --- /dev/null +++ b/integration_tests/run_tests.py @@ -0,0 +1,90 @@ +import unittest +import subprocess +import os +import json +from parameterized import parameterized +import time +import signal +from abc import ABC, abstractmethod +# Configuration of each test case. Order is Name, variant, launch file location, runtime +TEST_CONFIGS = [ + ("test1", "sim", "integration_tests/launch/test1.yaml", 10), + ("test2", "real_sim", "integration_tests/launch/test2.yaml", 25), + +] + +def get_last_modified_folder(parent_path): + subdirs = [entry.path for entry in os.scandir(parent_path) if entry.is_dir()] + if not subdirs: + return None + last_modified_folder = max(subdirs, key=os.path.getmtime) + return last_modified_folder + + +class BaseLogValidator(ABC): + + @abstractmethod + def validate(self, log_dir): + """Each test case must implement this method""" + pass + +class ValidateTestCase1(BaseLogValidator): + def validate(self, log_dir): + log_file = os.path.join(log_dir, "behavior.json") + with open(log_file, "r") as file: + for index, line in enumerate(file): + record = json.loads(line) + acceleration = record.get("vehicle", {}).get("data", {}).get("acceleration", 0) + assert abs(acceleration) <= .11, f"Record {index} has acceleration {acceleration} > threshold" + + +class ValidateTestCase2(BaseLogValidator): + def validate(self, log_dir): + log_file = os.path.join(log_dir, "behavior.json") + has_non_empty_agent = False + + with open(log_file, "r") as file: + for _, line in enumerate(file): + record = json.loads(line) + agent_data = record.get("agents", {}) + + if agent_data: + has_non_empty_agent = True + break + + assert has_non_empty_agent, "No agents detected in behavior.json!" + + +class IntegrationTestSuite(unittest.TestCase): + VALIDATORS = { + "test1": ValidateTestCase1(), + "test2": ValidateTestCase2(), + } + def setUp(self): + pass + def tearDown(self): + """Clean up after each test if necessary.""" + pass + + @parameterized.expand(TEST_CONFIGS) + def test_command_execution(self, name, variant, config_path, runtime): + command = ["python3", "main.py", f"--variant={variant}", config_path] + process = subprocess.Popen(command, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + #Uncomment to debug output from execution + # for line in iter(process.stdout.readline, ''): + # print(line, end='') + time.sleep(runtime) + print("Stopping GEMStack...") + os.kill(process.pid, signal.SIGINT) + process.wait() + + # Validate logs + validator = self.VALIDATORS.get(name) + if validator is None: + self.fail(f"no validator found for {name}") + validator.validate(get_last_modified_folder('logs')) + + + +if __name__ == "__main__": + unittest.main() diff --git a/integration_tests/scenes/highbay.yaml b/integration_tests/scenes/highbay.yaml new file mode 100644 index 000000000..57b1bdff3 --- /dev/null +++ b/integration_tests/scenes/highbay.yaml @@ -0,0 +1,11 @@ +#TODO: change visualization to display highbay map for replaying/visualization of recorded data +vehicle_state: [0.0, 0.0, 200.0, 0.0, 0.0] +agents: + ped1: + type: pedestrian + position: [15.0, 2.0] + nominal_velocity: 0.5 + target: [15.0,10.0] + behavior: loop + + diff --git a/integration_tests/scenes/xyhead_demo.yaml b/integration_tests/scenes/xyhead_demo.yaml new file mode 100644 index 000000000..c6d97477a --- /dev/null +++ b/integration_tests/scenes/xyhead_demo.yaml @@ -0,0 +1,9 @@ +vehicle_state: [4.0, 5.0, 0.0, 0.0, 0.0] +agents: + ped1: + type: pedestrian + position: [15.0, 2.0] + nominal_velocity: 0.5 + target: [15.0,10.0] + behavior: loop + \ No newline at end of file diff --git a/launch/blink_launch.yaml b/launch/blink_launch.yaml new file mode 100644 index 000000000..de9d1abfd --- /dev/null +++ b/launch/blink_launch.yaml @@ -0,0 +1,68 @@ +description: "Launch just the blink distress code" +mode: hardware +vehicle_interface: gem_hardware.GEMHardwareInterface +mission_execution: StandardExecutor +require_engaged: False +# Recovery behavior after a component failure +recovery: + perception: + state_estimation : GNSSStateEstimator + perception_normalization : StandardPerceptionNormalizer + planning: + driving_logic : driving_logic_component.Driving_Logic + signaling : blink_component.BlinkDistress + # trajectory_tracking : blink_component.BlinkDistress +# Driving behavior for the GEM vehicle. Runs perception and planner but doesn't execute anything (no controller). +drive: + perception: + state_estimation : GNSSStateEstimator + perception_normalization : StandardPerceptionNormalizer + planning: + driving_logic : driving_logic_component.Driving_Logic + signaling : blink_component.BlinkDistress + # trajectory_tracking : blink_component.BlinkDistress +log: + # Specify the top-level folder to save the log files. Default is 'logs' + folder : 'logs' + # If prefix is specified, then the log folder will be named with the prefix followed by the date and time. Default no prefix + prefix : 'hw1_' + # If suffix is specified, then logs will output to folder/prefix+suffix. Default uses date and time as the suffix + #suffix : 'test3' + # Specify which ros topics to record to vehicle.bag. + #ros_topics : ['/pacmod/as_rx/turn_cmd','/pacmod/as_tx/turn_rpt'] + # Specify options to pass to rosbag record. Default is no options. + #rosbag_options : '--split --size=1024' + # If True, then record all readings / commands of the vehicle interface. Default False + vehicle_interface : True + # Specify which components to record to behavior.json. Default records nothing + components : [] + # Specify which components of state to record to state.json. Default records nothing + #state: ['all'] + # Specify the rate in Hz at which to record state to state.json. Default records at the pipeline's rate + #state_rate: 10 +replay: # Add items here to set certain topics / inputs to be replayed from logs + # Specify which log folder to replay from + log: + ros_topics : [] + components : [] + +#usually can keep this constant +computation_graph: !include "../GEMstack/knowledge/defaults/computation_graph.yaml" + +variants: + sim: + run: + description: "Runs the distress signal, but in simulation" + mode: simulation + vehicle_interface: + type: gem_simulator.GEMDoubleIntegratorSimulationInterface + args: + scene: !relative_path '../scenes/xyhead_demo.yaml' + require_engaged: False + visualization: !include "klampt_visualization.yaml" + recovery: + perception: + state_estimation : OmniscientStateEstimator + drive: + perception: + state_estimation : OmniscientStateEstimator diff --git a/launch/fixed_route.yaml b/launch/fixed_route.yaml index c05de8ff7..bdb3ff222 100644 --- a/launch/fixed_route.yaml +++ b/launch/fixed_route.yaml @@ -2,6 +2,7 @@ description: "Drive the GEM vehicle along a fixed route (currently xyhead_highba mode: hardware vehicle_interface: gem_hardware.GEMHardwareInterface mission_execution: StandardExecutor +# require_engaged: False # Recovery behavior after a component failure recovery: planning: @@ -16,13 +17,14 @@ drive: planning: route_planning: type: StaticRoutePlanner - args: [!relative_path '../GEMstack/knowledge/routes/xyhead_highbay_backlot_p.csv'] - motion_planning: - type: RouteToTrajectoryPlanner - args: [null] #desired speed in m/s. If null, this will keep the route untimed for the trajectory tracker + args: [!relative_path '../GEMstack/knowledge/routes/forward_15m.csv'] #forward_15m + motion_planning: longitudinal_planning.YieldTrajectoryPlanner + # type: RouteToTrajectoryPlanner + # args: [null] #desired speed in m/s. If null, this will keep the route untimed for the trajectory tracker trajectory_tracking: type: pure_pursuit.PurePursuitTrajectoryTracker - args: {desired_speed: 2.5} #approximately 5mph + # args: [null] + # args: {desired_speed: 2.5} #approximately 5mph print: False log: # Specify the top-level folder to save the log files. Default is 'logs' @@ -32,7 +34,7 @@ log: # If suffix is specified, then logs will output to folder/prefix+suffix. Default uses date and time as the suffix #suffix : 'test3' # Specify which ros topics to record to vehicle.bag. Default records nothing. This records the "standard" ROS topics. - ros_topics : [] + ros_topics : ['/septentrio_gnss/insnavgeod','/pacmod/parsed_tx/vehicle_speed_rpt'] # Specify options to pass to rosbag record. Default is no options. #rosbag_options : '--split --size=1024' # If True, then record all readings / commands of the vehicle interface. Default False @@ -56,7 +58,7 @@ computation_graph: !include "../GEMstack/knowledge/defaults/computation_graph.ya after: show_log_folder: True #set to false to avoid showing the log folder -#on load, variants will overload the settings structure +#on load, variants will overload the settingsNone structure variants: #sim variant doesn't execute on the real vehicle #real variant executes on the real robot @@ -72,8 +74,7 @@ variants: perception: state_estimation : OmniscientStateEstimator agent_detection : OmniscientAgentDetector - visualization: !include "klampt_visualization.yaml" - #visualization: !include "mpl_visualization.yaml" + visualization: [!include "klampt_visualization.yaml", !include "mpl_visualization.yaml"] log_ros: log: - ros_topics : !include "../GEMstack/knowledge/defaults/standard_ros_topics.yaml" \ No newline at end of file + ros_topics : !include "../GEMstack/knowledge/defaults/standard_ros_topics.yaml" diff --git a/launch/fixed_route_combined.yaml b/launch/fixed_route_combined.yaml new file mode 100644 index 000000000..85a68b44b --- /dev/null +++ b/launch/fixed_route_combined.yaml @@ -0,0 +1,81 @@ +description: "Drive the GEM vehicle along a fixed route (currently xyhead_highbay_backlot_p.csv)" +mode: hardware +vehicle_interface: gem_hardware.GEMHardwareInterface +mission_execution: StandardExecutor +# require_engaged: False +# Recovery behavior after a component failure +recovery: + planning: + trajectory_tracking: + type: recovery.StopTrajectoryTracker + print: False +# Driving behavior for the GEM vehicle following a fixed route +drive: + perception: + state_estimation : GNSSStateEstimator + perception_normalization : StandardPerceptionNormalizer + agent_detection : pedestrian_detection.PedestrianDetector2D + planning: + route_planning: + type: StaticRoutePlanner + args: [!relative_path '../GEMstack/knowledge/routes/forward_15m.csv'] #forward_15m + motion_planning: longitudinal_planning.YieldTrajectoryPlanner + # type: RouteToTrajectoryPlanner + # args: [null] #desired speed in m/s. If null, this will keep the route untimed for the trajectory tracker + trajectory_tracking: + type: pure_pursuit.PurePursuitTrajectoryTracker + # args: [null] + args: {desired_speed: 2.5} #approximately 5mph + print: False +log: + # Specify the top-level folder to save the log files. Default is 'logs' + #folder : 'logs' + # If prefix is specified, then the log folder will be named with the prefix followed by the date and time. Default no prefix + #prefix : 'fixed_route_' + # If suffix is specified, then logs will output to folder/prefix+suffix. Default uses date and time as the suffix + #suffix : 'test3' + # Specify which ros topics to record to vehicle.bag. Default records nothing. This records the "standard" ROS topics. + ros_topics : ['/septentrio_gnss/insnavgeod','/pacmod/parsed_tx/vehicle_speed_rpt'] + # Specify options to pass to rosbag record. Default is no options. + #rosbag_options : '--split --size=1024' + # If True, then record all readings / commands of the vehicle interface. Default False + vehicle_interface : True + # Specify which components to record to behavior.json. Default records nothing + components : ['state_estimation','trajectory_tracking'] + # Specify which components of state to record to state.json. Default records nothing + #state: ['all'] + # Specify the rate in Hz at which to record state to state.json. Default records at the pipeline's rate + #state_rate: 10 +replay: # Add items here to set certain topics / inputs to be replayed from logs + # Specify which log folder to replay from + log: + # For replaying sensor data, try !include "../knowledge/defaults/standard_sensor_ros_topics.yaml" + ros_topics : [] + components : [] + +#usually can keep this constant +computation_graph: !include "../GEMstack/knowledge/defaults/computation_graph.yaml" + +after: + show_log_folder: True #set to false to avoid showing the log folder + +#on load, variants will overload the settingsNone structure +variants: + #sim variant doesn't execute on the real vehicle + #real variant executes on the real robot + sim: + run: + mode: simulation + vehicle_interface: + type: gem_simulator.GEMDoubleIntegratorSimulationInterface + args: + scene: !relative_path '../scenes/xyhead_demo.yaml' + + drive: + perception: + state_estimation : OmniscientStateEstimator + agent_detection : pedestrian_detection.PedestrianDetector2D + visualization: [!include "klampt_visualization.yaml", !include "mpl_visualization.yaml"] + log_ros: + log: + ros_topics : !include "../GEMstack/knowledge/defaults/standard_ros_topics.yaml" diff --git a/launch/gather_data.yaml b/launch/gather_data.yaml index 4f3d03d4c..0a81e8272 100644 --- a/launch/gather_data.yaml +++ b/launch/gather_data.yaml @@ -26,7 +26,12 @@ log: # If suffix is specified, then logs will output to folder/prefix+suffix. Default uses date and time as the suffix #suffix : 'test3' # Specify which ros topics to record to vehicle.bag. Default records nothing. This records the "standard" ROS topics. - ros_topics : [] + ros_topics : ['/oak/left/image_raw', + '/oak/rgb/image_raw', + '/oak/right/image_raw', + '/oak/stereo/image_raw', + '/ouster/points', + '/livox/lidar '] # Specify options to pass to rosbag record. Default is no options. #rosbag_options : '--split --size=1024' # If True, then record all readings / commands of the vehicle interface. Default False @@ -51,4 +56,4 @@ variants: log_ros: run: log: - ros_topics: !include "../GEMstack/knowledge/defaults/standard_ros_topics.yaml" \ No newline at end of file + ros_topics: !include "../GEMstack/knowledge/defaults/standard_ros_topics.yaml" diff --git a/launch/parking_route_planner.yaml b/launch/parking_route_planner.yaml new file mode 100644 index 000000000..f54a63b0c --- /dev/null +++ b/launch/parking_route_planner.yaml @@ -0,0 +1,82 @@ +description: "Drive the GEM vehicle along a fixed route (currently xyhead_highbay_backlot_p.csv)" +mode: hardware +vehicle_interface: gem_hardware.GEMHardwareInterface +mission_execution: StandardExecutor +# require_engaged: False +# Recovery behavior after a component failure +recovery: + planning: + trajectory_tracking: + type: recovery.StopTrajectoryTracker + print: False +# Driving behavior for the GEM vehicle following a fixed route +drive: + perception: + state_estimation : GNSSStateEstimator + perception_normalization : StandardPerceptionNormalizer + planning: + parking_component: + type: ParkingSim + route_planning_component: + type: RoutePlanningComponent + motion_planning: longitudinal_planning.YieldTrajectoryPlanner + # type: RouteToTrajectoryPlanner + # args: [null] #desired speed in m/s. If null, this will keep the route untimed for the trajectory tracker + trajectory_tracking: + type: pure_pursuit.PurePursuitTrajectoryTracker + # args: [null] + # args: {desired_speed: 2.5} #approximately 5mph + print: False + +log: + # Specify the top-level folder to save the log files. Default is 'logs' + #folder : 'logs' + # If prefix is specified, then the log folder will be named with the prefix followed by the date and time. Default no prefix + #prefix : 'fixed_route_' + # If suffix is specified, then logs will output to folder/prefix+suffix. Default uses date and time as the suffix + #suffix : 'test3' + # Specify which ros topics to record to vehicle.bag. Default records nothing. This records the "standard" ROS topics. + ros_topics : ['/septentrio_gnss/insnavgeod','/pacmod/parsed_tx/vehicle_speed_rpt'] + # Specify options to pass to rosbag record. Default is no options. + #rosbag_options : '--split --size=1024' + # If True, then record all readings / commands of the vehicle interface. Default False + vehicle_interface : True + # Specify which components to record to behavior.json. Default records nothing + components : ['state_estimation','trajectory_tracking'] + # Specify which components of state to record to state.json. Default records nothing + #state: ['all'] + # Specify the rate in Hz at which to record state to state.json. Default records at the pipeline's rate + #state_rate: 10 +replay: # Add items here to set certain topics / inputs to be replayed from logs + # Specify which log folder to replay from + log: + # For replaying sensor data, try !include "../knowledge/defaults/standard_sensor_ros_topics.yaml" + ros_topics : [] + components : [] + +#usually can keep this constant +computation_graph: !include "../GEMstack/knowledge/defaults/computation_graph.yaml" + +after: + show_log_folder: True #set to false to avoid showing the log folder + +#on load, variants will overload the settingsNone structure +variants: + #sim variant doesn't execute on the real vehicle + #real variant executes on the real robot + sim: + run: + mode: simulation + vehicle_interface: + type: gem_simulator.GEMDoubleIntegratorSimulationInterface + args: + scene: !relative_path '../scenes/xyhead_demo.yaml' + + drive: + perception: + state_estimation : OmniscientStateEstimator + agent_detection : OmniscientAgentDetector + visualization: [!include "klampt_visualization.yaml", !include "mpl_visualization.yaml"] + log_ros: + log: + ros_topics : !include "../GEMstack/knowledge/defaults/standard_ros_topics.yaml" diff --git a/launch/pedestrian_detection.yaml b/launch/pedestrian_detection.yaml new file mode 100644 index 000000000..2f9ed8e8f --- /dev/null +++ b/launch/pedestrian_detection.yaml @@ -0,0 +1,101 @@ +description: "Run the yielding trajectory planner on the real vehicle with real perception" +mode: hardware +vehicle_interface: gem_hardware.GEMHardwareInterface +mission_execution: StandardExecutor + +# Recovery behavior after a component failure +recovery: + planning: + trajectory_tracking : recovery.StopTrajectoryTracker + +# Driving behavior for the GEM vehicle. Runs real pedestrian perception, yield planner, but does not send commands to real vehicle. +drive: + perception: + state_estimation : GNSSStateEstimator + agent_detection : pedestrian_detection.PedestrianDetector2D + perception_normalization : StandardPerceptionNormalizer + planning: + relations_estimation: pedestrian_yield_logic.PedestrianYielder + route_planning: + type: StaticRoutePlanner + args: [!relative_path '../GEMstack/knowledge/routes/forward_15m.csv','start'] + motion_planning: longitudinal_planning.YieldTrajectoryPlanner + trajectory_tracking: + type: pure_pursuit.PurePursuitTrajectoryTracker + print: False + + +log: + # Specify the top-level folder to save the log files. Default is 'logs' + #folder : 'logs' + # If prefix is specified, then the log folder will be named with the prefix followed by the date and time. Default no prefix + #prefix : 'fixed_route_' + # If suffix is specified, then logs will output to folder/prefix+suffix. Default uses date and time as the suffix + #suffix : 'test3' + # Specify which ros topics to record to vehicle.bag. Default records nothing. This records the "standard" ROS topics. + ros_topics : + # Specify options to pass to rosbag record. Default is no options. + #rosbag_options : '--split --size=1024' + # If True, then record all readings / commands of the vehicle interface. Default False + vehicle_interface : True + # Specify which components to record to behavior.json. Default records nothing + components : ['state_estimation','agent_detection','motion_planning'] + # Specify which components of state to record to state.json. Default records nothing + #state: ['all'] + # Specify the rate in Hz at which to record state to state.json. Default records at the pipeline's rate + #state_rate: 10 +replay: # Add items here to set certain topics / inputs to be replayed from logs + # Specify which log folder to replay from + log: + ros_topics : [] + components : [] + +#usually can keep this constant +computation_graph: !include "../GEMstack/knowledge/defaults/computation_graph.yaml" + +variants: + detector_only: + run: + description: "Run the pedestrian detection code" + drive: + planning: + trajectory_tracking: + visualization: !include "mpl_visualization.yaml" + + real_sim: + run: + description: "Run the pedestrian detection code with real detection and fake simulation" + mode: hardware + vehicle_interface: + type: gem_mixed.GEMRealSensorsWithSimMotionInterface + args: + scene: !relative_path '../scenes/xyhead_demo.yaml' + mission_execution: StandardExecutor + require_engaged: False + visualization: !include "klampt_visualization.yaml" + drive: + perception: + state_estimation : OmniscientStateEstimator + + + fake_real: + run: + description: "Run the yielding trajectory planner on the real vehicle with faked perception" + drive: + perception: + agent_detection : pedestrian_detection.FakePedestrianDetector2D + + fake_sim: + run: + description: "Run the yielding trajectory planner in simulation with faked perception" + mode: simulation + vehicle_interface: + type: gem_simulator.GEMDoubleIntegratorSimulationInterface + args: + scene: !relative_path '../scenes/xyhead_demo.yaml' + visualization: !include "klampt_visualization.yaml" + drive: + perception: + # agent_detection : pedestrian_detection.FakePedestrianDetector2D + agent_detection : OmniscientAgentDetector #this option reads agents from the simulator + state_estimation : OmniscientStateEstimator diff --git a/launch/summon_trapezoidal_planner_only.yaml b/launch/summon_trapezoidal_planner_only.yaml new file mode 100644 index 000000000..d26d5be44 --- /dev/null +++ b/launch/summon_trapezoidal_planner_only.yaml @@ -0,0 +1,82 @@ +description: "Drive the GEM vehicle along a fixed route (currently xyhead_highbay_backlot_p.csv)" +mode: hardware +vehicle_interface: gem_hardware.GEMHardwareInterface +mission_execution: StandardExecutor +# require_engaged: False +# Recovery behavior after a component failure +recovery: + planning: + trajectory_tracking: + type: recovery.StopTrajectoryTracker + print: False +# Driving behavior for the GEM vehicle following a fixed route +drive: + perception: + state_estimation : GNSSStateEstimator + perception_normalization : StandardPerceptionNormalizer + planning: + # route_planning: + # type: StaticRoutePlanner + # args: [!relative_path '../GEMstack/knowledge/routes/forward_15m.csv'] #forward_15m + summon_component: + type: SummonSim + motion_planning: longitudinal_planning.YieldTrajectoryPlanner + # type: RouteToTrajectoryPlanner + # args: [null] #desired speed in m/s. If null, this will keep the route untimed for the trajectory tracker + trajectory_tracking: + type: pure_pursuit.PurePursuitTrajectoryTracker + # args: [null] + # args: {desired_speed: 2.5} #approximately 5mph + print: False +log: + # Specify the top-level folder to save the log files. Default is 'logs' + #folder : 'logs' + # If prefix is specified, then the log folder will be named with the prefix followed by the date and time. Default no prefix + #prefix : 'fixed_route_' + # If suffix is specified, then logs will output to folder/prefix+suffix. Default uses date and time as the suffix + #suffix : 'test3' + # Specify which ros topics to record to vehicle.bag. Default records nothing. This records the "standard" ROS topics. + ros_topics : ['/septentrio_gnss/insnavgeod','/pacmod/parsed_tx/vehicle_speed_rpt'] + # Specify options to pass to rosbag record. Default is no options. + #rosbag_options : '--split --size=1024' + # If True, then record all readings / commands of the vehicle interface. Default False + vehicle_interface : True + # Specify which components to record to behavior.json. Default records nothing + components : ['state_estimation','trajectory_tracking'] + # Specify which components of state to record to state.json. Default records nothing + #state: ['all'] + # Specify the rate in Hz at which to record state to state.json. Default records at the pipeline's rate + #state_rate: 10 +replay: # Add items here to set certain topics / inputs to be replayed from logs + # Specify which log folder to replay from + log: + # For replaying sensor data, try !include "../knowledge/defaults/standard_sensor_ros_topics.yaml" + ros_topics : [] + components : [] + +#usually can keep this constant +computation_graph: !include "../GEMstack/knowledge/defaults/computation_graph.yaml" + +after: + show_log_folder: True #set to false to avoid showing the log folder + +#on load, variants will overload the settingsNone structure +variants: + #sim variant doesn't execute on the real vehicle + #real variant executes on the real robot + sim: + run: + mode: simulation + vehicle_interface: + type: gem_simulator.GEMDoubleIntegratorSimulationInterface + args: + scene: !relative_path '../scenes/xyhead_demo.yaml' + + drive: + perception: + state_estimation : OmniscientStateEstimator + agent_detection : OmniscientAgentDetector + visualization: [!include "klampt_visualization.yaml", !include "mpl_visualization.yaml"] + log_ros: + log: + ros_topics : !include "../GEMstack/knowledge/defaults/standard_ros_topics.yaml" diff --git a/requirements.txt b/requirements.txt index 94db8ba43..79ee0e62b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,6 +4,16 @@ scipy matplotlib torch shapely -klampt +klampt==0.9.2 pyyaml dacite + +# Perception +ultralytics +lap==0.5.12 +open3d + +#infra +msal +parameterized +requests \ No newline at end of file diff --git a/run_docker_container.sh b/run_docker_container.sh new file mode 100644 index 000000000..ac0f998b3 --- /dev/null +++ b/run_docker_container.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# Check if container is already running +if [ "$(docker ps -q -f name=gem_stack-container)" ]; then + docker exec -it gem_stack-container bash +else + UID=$(id -u) GID=$(id -g) docker compose -f setup/docker-compose.yaml up -d + docker exec -it gem_stack-container bash +fi \ No newline at end of file diff --git a/scenes/highbay.yaml b/scenes/highbay.yaml new file mode 100644 index 000000000..57b1bdff3 --- /dev/null +++ b/scenes/highbay.yaml @@ -0,0 +1,11 @@ +#TODO: change visualization to display highbay map for replaying/visualization of recorded data +vehicle_state: [0.0, 0.0, 200.0, 0.0, 0.0] +agents: + ped1: + type: pedestrian + position: [15.0, 2.0] + nominal_velocity: 0.5 + target: [15.0,10.0] + behavior: loop + + diff --git a/scenes/xyhead_demo.yaml b/scenes/xyhead_demo.yaml index c6d97477a..70e922461 100644 --- a/scenes/xyhead_demo.yaml +++ b/scenes/xyhead_demo.yaml @@ -6,4 +6,15 @@ agents: nominal_velocity: 0.5 target: [15.0,10.0] behavior: loop - \ No newline at end of file + ped2: + type: pedestrian + position: [30.0, 2.0] + nominal_velocity: 0.4 + target: [30.0,10.0] + behavior: loop + ped3: + type: pedestrian + position: [50.0, 2.0] + nominal_velocity: 0.5 + target: [50.0,10.0] + behavior: loop \ No newline at end of file diff --git a/setup/Dockerfile b/setup/Dockerfile deleted file mode 100644 index b35c3b85d..000000000 --- a/setup/Dockerfile +++ /dev/null @@ -1,49 +0,0 @@ -FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 -#use bash instead of sh -RUN rm /bin/sh && ln -s /bin/bash /bin/sh -RUN apt-get update && apt-get install -y git python3 python3-pip wget zstd -# Install ROS Noetic -RUN apt-get update && apt-get install -y lsb-release gnupg2 -RUN sh -c 'echo "deb http://packages.ros.org/ros/ubuntu focal main" > /etc/apt/sources.list.d/ros-latest.list' -RUN wget https://raw.githubusercontent.com/ros/rosdistro/master/ros.asc -RUN apt-key add ros.asc -RUN apt-get update -RUN DEBIAN_FRONTEND=noninteractive apt-get install -y ros-noetic-desktop -RUN apt-get install -y python3-rosdep python3-rosinstall python3-rosinstall-generator python3-wstool build-essential python3-catkin-tools -RUN rosdep init -RUN rosdep update - -#Install Cuda 11.8 -#RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin -#RUN sudo mv cuda-ubuntu2004.pin /etc/apt/preferences.d/cuda-repository-pin-600 -##add public keys -#RUN sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/3bf863cc.pub -#RUN sudo add-apt-repository "deb http://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/ /" -#RUN install cuda-toolkit-11-8 - - -# install Zed SDK -RUN wget https://download.stereolabs.com/zedsdk/4.0/cu118/ubuntu20 -O zed_sdk.run -RUN chmod +x zed_sdk.run -RUN ./zed_sdk.run -- silent - -# create ROS Catkin workspace -RUN mkdir -p /catkin_ws/src - -# install ROS dependencies and packages -RUN cd /catkin_ws/src && git clone https://github.com/krishauser/POLARIS_GEM_e2.git -RUN cd /catkin_ws/src && git clone --recurse-submodules https://github.com/stereolabs/zed-ros-wrapper.git -RUN cd /catkin_ws/src && git clone https://github.com/astuff/pacmod2.git - #for some reason the ibeo messages don't work? -RUN cd /catkin_ws/src && git clone https://github.com/astuff/astuff_sensor_msgs.git && rm -rf astuff_sensor_msgs/ibeo_msgs -RUN cd /catkin_ws/src && git clone https://github.com/ros-perception/radar_msgs.git \ - && cd radar_msgs && git checkout noetic - -RUN source /opt/ros/noetic/setup.bash && cd /catkin_ws && rosdep install --from-paths src --ignore-src -r -y -RUN source /opt/ros/noetic/setup.bash && cd /catkin_ws && catkin_make -DCMAKE_BUILD_TYPE=Release - -# install GEMstack Python dependencies -RUN git clone https://github.com/krishauser/GEMstack.git -RUN cd GEMstack && pip3 install -r requirements.txt - -RUN echo /catkin_ws/devel/setup.sh diff --git a/setup/Dockerfile.cuda11.8 b/setup/Dockerfile.cuda11.8 new file mode 100644 index 000000000..464b3b29b --- /dev/null +++ b/setup/Dockerfile.cuda11.8 @@ -0,0 +1,82 @@ +FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 + +ARG USER_UID=1000 +ARG USER_GID=1000 +ARG USER=$(whoami) + +ENV DEBIAN_FRONTEND=noninteractive + +#use bash instead of sh +SHELL ["/bin/bash", "-c"] + +RUN apt-get update && apt-get install -y git python3 python3-pip wget zstd + +# Set time zone non-interactively +ENV TZ=America/Chicago +RUN ln -fs /usr/share/zoneinfo/$TZ /etc/localtime \ + && echo $TZ > /etc/timezone \ + && apt-get update && apt-get install -y tzdata \ + && rm -rf /var/lib/apt/lists/* + +RUN wget https://stereolabs.sfo2.cdn.digitaloceanspaces.com/zedsdk/4.2/ZED_SDK_Ubuntu20_cuda11.8_v4.2.4.zstd.run -O zed_sdk.run +RUN chmod +x zed_sdk.run +RUN ./zed_sdk.run -- silent + +# Install ROS Noetic +RUN apt-get update && apt-get install -y lsb-release gnupg2 +RUN sh -c 'echo "deb http://packages.ros.org/ros/ubuntu focal main" > /etc/apt/sources.list.d/ros-latest.list' +RUN wget https://raw.githubusercontent.com/ros/rosdistro/master/ros.asc +RUN apt-key add ros.asc +RUN apt-get update +RUN DEBIAN_FRONTEND=noninteractive apt-get install -y ros-noetic-desktop +RUN apt-get install -y python3-rosdep python3-rosinstall python3-rosinstall-generator python3-wstool build-essential python3-catkin-tools +RUN rosdep init +RUN rosdep update + +ARG USER +ARG USER_UID +ARG USER_GID + +# Create user more safely +RUN groupadd -g ${USER_GID} ${USER} || groupmod -n ${USER} $(getent group ${USER_GID} | cut -d: -f1) +RUN useradd -l -m -u ${USER_UID} -g ${USER_GID} ${USER} || usermod -l ${USER} -m -u ${USER_UID} -g ${USER_GID} $(getent passwd ${USER_UID} | cut -d: -f1) +RUN echo "${USER} ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers + +# Fix permissions for Python packages +RUN chown -R ${USER}:${USER} /usr/local/lib/python3.8/dist-packages/ \ + && chmod -R u+rw /usr/local/lib/python3.8/dist-packages/ + +# create ROS Catkin workspace +RUN mkdir -p /catkin_ws/src + +# install ROS dependencies and packages +RUN cd /catkin_ws/src && git clone https://github.com/krishauser/POLARIS_GEM_e2.git +RUN cd /catkin_ws/src && git clone --recurse-submodules https://github.com/stereolabs/zed-ros-wrapper.git +RUN cd /catkin_ws/src && git clone https://github.com/astuff/pacmod2.git + #for some reason the ibeo messages don't work? +RUN cd /catkin_ws/src && git clone https://github.com/astuff/astuff_sensor_msgs.git && rm -rf astuff_sensor_msgs/ibeo_msgs +RUN cd /catkin_ws/src && git clone https://github.com/ros-perception/radar_msgs.git \ + && cd radar_msgs && git checkout noetic + +RUN source /opt/ros/noetic/setup.bash && cd /catkin_ws && rosdep install --from-paths src --ignore-src -r -y +RUN source /opt/ros/noetic/setup.bash && cd /catkin_ws && catkin_make -DCMAKE_BUILD_TYPE=Release -j1 + +# Copy requirements.txt from host (now relative to parent directory) +COPY requirements.txt /tmp/requirements.txt + +# Install Python dependencies +RUN pip3 install -r /tmp/requirements.txt + +# Install other Dependencies +RUN apt-get install -y ros-noetic-septentrio-gnss-driver + +USER ${USER} + +# Add ROS and GEMstack paths to bashrc +RUN echo "source /opt/ros/noetic/setup.bash" >> /home/${USER}/.bashrc +RUN echo "source /catkin_ws/devel/setup.bash" >> /home/${USER}/.bashrc + +# BASE END CONFIG +WORKDIR /home/${USER} + +ENTRYPOINT [ "/bin/bash", "-l" ] diff --git a/setup/Dockerfile.cuda12 b/setup/Dockerfile.cuda12 new file mode 100644 index 000000000..32005202f --- /dev/null +++ b/setup/Dockerfile.cuda12 @@ -0,0 +1,83 @@ +FROM nvidia/cuda:12.0.0-cudnn8-devel-ubuntu20.04 + +ARG USER_UID=1000 +ARG USER_GID=1000 +ARG USER=$(whoami) + +ENV DEBIAN_FRONTEND=noninteractive + +#use bash instead of sh +SHELL ["/bin/bash", "-c"] + +RUN apt-get update && apt-get install -y git python3 python3-pip wget zstd + +# Set time zone non-interactively +ENV TZ=America/Chicago +RUN ln -fs /usr/share/zoneinfo/$TZ /etc/localtime \ + && echo $TZ > /etc/timezone \ + && apt-get update && apt-get install -y tzdata \ + && rm -rf /var/lib/apt/lists/* + +# install Zed SDK. If you want to install a different version, change to this https://download.stereolabs.com/zedsdk/4.2/cu12/ubuntu20 +RUN wget https://stereolabs.sfo2.cdn.digitaloceanspaces.com/zedsdk/4.2/ZED_SDK_Ubuntu20_cuda12.1_v4.2.4.zstd.run -O zed_sdk.run +RUN chmod +x zed_sdk.run +RUN ./zed_sdk.run -- silent + +# Install ROS Noetic +RUN apt-get update && apt-get install -y lsb-release gnupg2 +RUN sh -c 'echo "deb http://packages.ros.org/ros/ubuntu focal main" > /etc/apt/sources.list.d/ros-latest.list' +RUN wget https://raw.githubusercontent.com/ros/rosdistro/master/ros.asc +RUN apt-key add ros.asc +RUN apt-get update +RUN DEBIAN_FRONTEND=noninteractive apt-get install -y ros-noetic-desktop +RUN apt-get install -y python3-rosdep python3-rosinstall python3-rosinstall-generator python3-wstool build-essential python3-catkin-tools +RUN rosdep init +RUN rosdep update + +ARG USER +ARG USER_UID +ARG USER_GID + +# Create user more safely +RUN groupadd -g ${USER_GID} ${USER} || groupmod -n ${USER} $(getent group ${USER_GID} | cut -d: -f1) +RUN useradd -l -m -u ${USER_UID} -g ${USER_GID} ${USER} || usermod -l ${USER} -m -u ${USER_UID} -g ${USER_GID} $(getent passwd ${USER_UID} | cut -d: -f1) +RUN echo "${USER} ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers + +# Fix permissions for Python packages +RUN chown -R ${USER}:${USER} /usr/local/lib/python3.8/dist-packages/ \ + && chmod -R u+rw /usr/local/lib/python3.8/dist-packages/ + +# create ROS Catkin workspace +RUN mkdir -p /catkin_ws/src + +# install ROS dependencies and packages +RUN cd /catkin_ws/src && git clone https://github.com/krishauser/POLARIS_GEM_e2.git +RUN cd /catkin_ws/src && git clone --recurse-submodules https://github.com/stereolabs/zed-ros-wrapper.git +RUN cd /catkin_ws/src && git clone https://github.com/astuff/pacmod2.git + #for some reason the ibeo messages don't work? +RUN cd /catkin_ws/src && git clone https://github.com/astuff/astuff_sensor_msgs.git && rm -rf astuff_sensor_msgs/ibeo_msgs +RUN cd /catkin_ws/src && git clone https://github.com/ros-perception/radar_msgs.git \ + && cd radar_msgs && git checkout noetic + +RUN source /opt/ros/noetic/setup.bash && cd /catkin_ws && rosdep install --from-paths src --ignore-src -r -y +RUN source /opt/ros/noetic/setup.bash && cd /catkin_ws && catkin_make -DCMAKE_BUILD_TYPE=Release + +# Copy requirements.txt from host (now relative to parent directory) +COPY requirements.txt /tmp/requirements.txt + +# Install Python dependencies +RUN pip3 install -r /tmp/requirements.txt + +# Install other Dependencies +RUN apt-get install -y ros-noetic-septentrio-gnss-driver + +USER ${USER} + +# Add ROS and GEMstack paths to bashrc +RUN echo "source /opt/ros/noetic/setup.bash" >> /home/${USER}/.bashrc +RUN echo "source /catkin_ws/devel/setup.bash" >> /home/${USER}/.bashrc + +# BASE END CONFIG +WORKDIR /home/${USER} + +ENTRYPOINT ["/bin/bash"] \ No newline at end of file diff --git a/setup/build_docker_image.sh b/setup/build_docker_image.sh new file mode 100644 index 000000000..0852f27ab --- /dev/null +++ b/setup/build_docker_image.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +echo "Select CUDA version:" +echo "1) CUDA 11.8" +echo "2) CUDA 12+" +read -p "Enter choice [1-2]: " choice + +case $choice in + 1) + DOCKERFILE=setup/Dockerfile.cuda11.8 + ;; + 2) + DOCKERFILE=setup/Dockerfile.cuda12 + ;; + *) + echo "Invalid choice" + exit 1 + ;; +esac + +export DOCKERFILE +UID=$(id -u) GID=$(id -g) docker compose -f setup/docker-compose.yaml build \ No newline at end of file diff --git a/setup/docker-compose.yaml b/setup/docker-compose.yaml new file mode 100644 index 000000000..f0c7703b8 --- /dev/null +++ b/setup/docker-compose.yaml @@ -0,0 +1,41 @@ +version: '3.9' + +services: + gem-stack-ubuntu-20.04-CUDA: + image: gem_stack + container_name: gem_stack-container + build: + context: .. + dockerfile: ${DOCKERFILE:-setup/Dockerfile.cuda11.8} # Default to cuda11.8 if not specified + args: + USER: ${USER} + USER_UID: ${UID:-1000} # Pass host UID + USER_GID: ${GID:-1000} # Pass host GID + stdin_open: true + tty: true + volumes: + # - "/etc/group:/etc/group:ro" + # - "/etc/passwd:/etc/passwd:ro" + # - "/etc/shadow:/etc/shadow:ro" + # - "/etc/sudoers.d:/etc/sudoers.d:ro" + - "~:/home/${USER}/host" + - "/tmp/.X11-unix:/tmp/.X11-unix:rw" + environment: + - DISPLAY=${DISPLAY} + - XDG_RUNTIME_DIR=/tmp/runtime-${USER} + - DBUS_SYSTEM_BUS_ADDRESS=unix:path=/var/run/dbus/system_bus_socket + - DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${UID}/bus + - NVIDIA_DRIVER_CAPABILITIES=all + - NVIDIA_VISIBLE_DEVICES=all + # - LIBGL_ALWAYS_SOFTWARE=1 # Uncomment if you want to use software rendering (No GPU) + network_mode: host + ipc: host + user: "${USER}:${USER}" + # Un-Comment the following lines if you want to use Nvidia GPU + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: all # alternatively, use `count: all` for all GPUs + capabilities: [gpu] diff --git a/setup/get_nvidia_container.sh b/setup/get_nvidia_container.sh index 466b1989e..4f738e2c8 100755 --- a/setup/get_nvidia_container.sh +++ b/setup/get_nvidia_container.sh @@ -1,9 +1,14 @@ #!/bin/bash -curl -s -L https://nvidia.github.io/nvidia-container-runtime/gpgkey | \ - sudo apt-key add - -distribution=$(. /etc/os-release;echo $ID$VERSION_ID) -curl -s -L https://nvidia.github.io/nvidia-container-runtime/$distribution/nvidia-container-runtime.list | \ - sudo tee /etc/apt/sources.list.d/nvidia-container-runtime.list +curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \ + && curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \ + sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ + sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list + +sed -i -e '/experimental/ s/^#//g' /etc/apt/sources.list.d/nvidia-container-toolkit.list + sudo apt-get update -sudo apt-get install nvidia-container-runtime \ No newline at end of file +sudo apt-get install -y nvidia-container-toolkit + +sudo nvidia-ctk runtime configure --runtime=docker +sudo systemctl restart docker \ No newline at end of file diff --git a/setup/setup_this_machine.sh b/setup/setup_this_machine.sh index e30b3aa23..36690f1d4 100755 --- a/setup/setup_this_machine.sh +++ b/setup/setup_this_machine.sh @@ -1,30 +1,88 @@ #!/bin/bash sudo apt update -sudo apt-get install git python3 python3-pip wget zstd +sudo apt-get install -y git python3 python3-pip wget zstd + +if [ ! -f /opt/ros/noetic/setup.bash ]; then + echo "ROS Noetic not found. Installing ROS Noetic..." + sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list' + wget https://raw.githubusercontent.com/ros/rosdistro/master/ros.asc + sudo apt-key add ros.asc + sudo apt update + sudo DEBIAN_FRONTEND=noninteractive apt install -y ros-noetic-desktop + sudo apt install -y python3-rosdep python3-rosinstall python3-rosinstall-generator python3-wstool build-essential python3-catkin-tools + sudo rosdep init + rosdep update +fi source /opt/ros/noetic/setup.bash #install Zed SDK -wget https://download.stereolabs.com/zedsdk/4.0/cu121/ubuntu20 -O ZED_SDK_Ubuntu20_cuda11.8_v4.0.8.zstd.run -chmod +x ZED_SDK_Ubuntu20_cuda11.8_v4.0.8.zstd.run -./ZED_SDK_Ubuntu20_cuda11.8_v4.0.8.zstd.run -- silent +echo "To install the ZED SDK, select the CUDA version:" +echo "1) CUDA 11.8" +echo "2) CUDA 12+" +echo "3) No GPU (Skip ZED SDK installation)" +read -p "Enter choice [1-3]: " choice + +case $choice in + 1) + wget https://download.stereolabs.com/zedsdk/4.0/cu118/ubuntu20 -O zed_sdk.run + chmod +x zed_sdk.run + ./zed_sdk.run -- silent + ;; + 2) + wget https://stereolabs.sfo2.cdn.digitaloceanspaces.com/zedsdk/4.2/ZED_SDK_Ubuntu20_cuda12.1_v4.2.4.zstd.run -O zed_sdk.run + chmod +x zed_sdk.run + ./zed_sdk.run -- silent + ;; + 3) + echo "Skipping ZED SDK installation..." + ;; + *) + echo "Invalid choice" + exit 1 + ;; +esac #create ROS Catkin workspace -mkdir catkin_ws -mkdir catkin_ws/src +mkdir -p ~/catkin_ws/src + +# Store current working directory +CURRENT_DIR=$(pwd) +echo "CURRENT_DIR: $CURRENT_DIR" #install ROS dependencies and packages -cd catkin_ws/src +cd ~/catkin_ws/src git clone https://github.com/krishauser/POLARIS_GEM_e2.git git clone https://github.com/astuff/pacmod2.git -git clone https://github.com/astuff/astuff_sensor_msgs.git -git clone https://github.com/ros-perception/radar_msgs.git -cd radar_msgs; git checkout noetic; cd .. +git clone https://github.com/astuff/astuff_sensor_msgs.git && rm -rf astuff_sensor_msgs/ibeo_msgs +git clone https://github.com/ros-perception/radar_msgs.git && cd radar_msgs; git checkout noetic; cd .. cd .. #back to catkin_ws +rosdep update rosdep install --from-paths src --ignore-src -r -y catkin_make -DCMAKE_BUILD_TYPE=Release source devel/setup.bash -cd .. #back to GEMstack +cd $CURRENT_DIR #install GEMstack Python dependencies -python3 -m pip install -r requirements.txt \ No newline at end of file + +# Ask the user if they want to install ultralytics +read -p "Do you want to install ultralytics? (y/n): " install_ultralytics + +# Create a temporary requirements file +temp_requirements="temp_requirements.txt" + +# Copy all lines except ultralytics if the user chooses to skip it +if [ "$install_ultralytics" == "y" ]; then + cp requirements.txt $temp_requirements +else + grep -v "ultralytics" requirements.txt > $temp_requirements +fi + +# Install the packages from the temporary requirements file +pip3 install -r $temp_requirements + +# Clean up the temporary file +rm $temp_requirements + +#install other dependencies +sudo apt-get install -y ros-noetic-septentrio-gnss-driver \ No newline at end of file diff --git a/stop_docker_container.sh b/stop_docker_container.sh new file mode 100644 index 000000000..1b1bad2c8 --- /dev/null +++ b/stop_docker_container.sh @@ -0,0 +1,3 @@ +#! /bin/bash + +docker compose -f setup/docker-compose.yaml down \ No newline at end of file diff --git a/testing/test_comfort_metrics.py b/testing/test_comfort_metrics.py new file mode 100644 index 000000000..f0b162fb3 --- /dev/null +++ b/testing/test_comfort_metrics.py @@ -0,0 +1,236 @@ +import sys +import os +sys.path.append(os.getcwd()) + +from GEMstack.state import AgentEnum +import json +import matplotlib.pyplot as plt +import numpy as np + +CMAP = "RdYlGn" + +def compute_safety_factor(value, safe_thresh, unsafe_thresh, flip=False): + """ + Computes a safety factor between 0(unsafe) and 1(safe) + If flip is True, the threshold bounds are reversed. + """ + abs_val = abs(value) + if abs_val <= safe_thresh: + factor = 1.0 + elif abs_val >= unsafe_thresh: + factor = 0.0 + else: + factor = 1.0 - (abs_val - safe_thresh) / (unsafe_thresh - safe_thresh) + + if flip: + return 1.0 - factor + return factor + +def parse_behavior_log(filename): + """ + Parses the behavior log file and extracts the following data: + - vehicle time + - vehicle acceleration + - vehicle heading rate + - pedestrian time + - pedestrian distance + """ + times = [] + accelerations = [] + heading_rates = [] + pedestrian_times = [] + pedestrian_distances = [] + + with open(filename, 'r') as f: + for line in f: + try: + entry = json.loads(line) + except json.JSONDecodeError: + print(f"Skipping invalid JSON line: {line.strip()}") + continue + # Process vehicle state data + if "vehicle" in entry: + t = entry.get("time") + vehicle_data = entry["vehicle"].get("data", {}) + acceleration = vehicle_data.get("acceleration") + heading_rate = vehicle_data.get("heading_rate") + # Only add if all fields are available + if None not in (t, acceleration, heading_rate): + times.append(t) + accelerations.append(acceleration) + heading_rates.append(heading_rate) + # Process agent state data + if "agents" in entry: + for agent in entry["agents"].values(): + agent_data = agent.get("data", {}) + # Check if the agent is a pedestrian + if agent_data.get("type") == AgentEnum.PEDESTRIAN.value: + t = entry.get("time") + pose = agent_data.get("pose", {}) + x_agent = pose.get("x") + y_agent = pose.get("y") + if None not in (t, x_agent, y_agent): + pedestrian_times.append(t) + dist = np.sqrt(x_agent**2 + y_agent**2) + pedestrian_distances.append(dist) + + return (np.array(times), np.array(accelerations), np.array(heading_rates), + np.array(pedestrian_times), np.array(pedestrian_distances)) + +def parse_tracker_csv(filename): + """ + Parses the pure pursuit tracker log file and extracts the following data: + - vehicle time (from column index 18) + - Crosstrack error (from column index 20) + - X position actual (from column index 2) + - Y position actual (from column index 5) + - X position desired (from column index 11) + - Y position desired (from column index 14) + """ + data = np.genfromtxt(filename, delimiter=',', skip_header=1) + vehicle_time = data[:, 18] + cte = data[:, 20] + x_actual, y_actual = data[:, 2], data[:, 5] + x_desired, y_desired = data[:, 11], data[:, 14] + return vehicle_time, cte, x_actual, y_actual, x_desired, y_desired + +def compute_derivative(times, values): + """ + Computes the derivative of array with respect to time. + Returns the time array and derivative values. + """ + dt = np.diff(times) + dv = np.diff(values) + derivative = dv / dt + return times[1:], derivative + +def add_safety_colorbar(figure): + """Adds a colorbar to the right of the figure""" + cbar_ax = figure.add_axes([0.92, 0.2, 0.02, 0.6]) + sm = plt.cm.ScalarMappable(cmap=CMAP) + cbar = figure.colorbar(sm, cax=cbar_ax) + cbar.set_label("Comfort/Safety Level") + +def plot_metrics(time_jerk, jerk, time_heading_acc, heading_acc, vehicle_time, cte, + x_actual, y_actual, x_desired, y_desired, pedestrian_times, pedestrian_distances): + """Plots all metrics in 2x3 grid""" + fig, axs = plt.subplots(2, 3, figsize=(12, 8)) + fig.subplots_adjust(hspace=0.375, wspace=0.35) + axs[1,2].axis('off') + + plot_jerk(axs[0,0], time_jerk, jerk) + plot_heading_acceleration(axs[0,1], time_heading_acc, heading_acc) + plot_crosstrack_error(axs[1,0], vehicle_time, cte) + plot_position(axs[1,1], x_actual, y_actual, x_desired, y_desired) + plot_pedestrian_dist(axs[0,2], pedestrian_times, pedestrian_distances) + + # Colorbar on the right side + add_safety_colorbar(fig) + + plt.show() + +def plot_jerk(axis, time, jerk, safe_thresh=1.0, unsafe_thresh=2.5): + """Plots vehicle jerk (rate of acceleration) vs. time""" + safety_scores = np.vectorize(compute_safety_factor)(jerk, safe_thresh, unsafe_thresh) + + axis.plot(time, jerk, color="black", linewidth=0.8, alpha=0.5) + axis.scatter(time, jerk, c=safety_scores, cmap=CMAP, vmin=0, vmax=1, edgecolors="black") + + axis.set_xlabel("Time (s)") + axis.set_ylabel("Jerk (m/s³)") + axis.set_title("Vehicle Jerk Over Time") + axis.grid(True) + +def plot_heading_acceleration(axis, time, heading_acc, safe_thresh=0.0075, unsafe_thresh=0.25): + """Plots vehicle heading acceleration vs. time""" + safety_scores = np.vectorize(compute_safety_factor)(heading_acc, safe_thresh, unsafe_thresh) + + axis.plot(time, heading_acc, color="black", linewidth=0.8, alpha=0.5) + axis.scatter(time, heading_acc, c=safety_scores, cmap=CMAP, vmin=0, vmax=1, edgecolors="black") + + axis.set_xlabel("Time (s)") + axis.set_ylabel("Heading Acceleration (rad/s²)") + axis.set_title("Vehicle Heading Acceleration Over Time") + axis.grid(True) + +def plot_crosstrack_error(axis, time, cte, safe_thresh=0.1, unsafe_thresh=0.4): + """Plots vehicle cross track error vs. time""" + safety_scores = np.vectorize(compute_safety_factor)(cte, safe_thresh, unsafe_thresh) + + axis.plot(time, cte, color="black", linewidth=0.8, alpha=0.5) + axis.scatter(time, cte, c=safety_scores, cmap=CMAP, vmin=0, vmax=1, edgecolors="black") + + axis.set_xlabel("Time (s)") + axis.set_ylabel("Cross Track Error") + axis.set_title("Vehicle Cross Track Error Over Time") + axis.grid(True) + +def plot_position(axis, x_actual, y_actual, x_desired, y_desired, safe_thresh=1, unsafe_thresh=2.5): + """Plots vehicle actual and desired positions vs. time""" + position_error = np.sqrt((x_desired - x_actual) ** 2 + (y_desired - y_actual) ** 2) + safety_scores = np.vectorize(compute_safety_factor)(position_error, safe_thresh, unsafe_thresh) + + axis.plot(y_desired, x_desired, marker='.', linestyle=':', color='blue', label='Desired') + axis.plot(y_actual, x_actual, color="black", linewidth=0.8, alpha=0.5) + axis.scatter(y_actual, x_actual, c=safety_scores, cmap=CMAP, vmin=0, vmax=1, edgecolors="black") + + axis.set_xlabel("Y Position (m)") + axis.set_ylabel("X Position (m)") + axis.set_title("Vehicle Position vs. Desired Position") + axis.legend() + axis.grid(True) + +def plot_pedestrian_dist(axis, pedestrian_times, pedestrian_distances, safe_thresh=5.0, unsafe_thresh=2.0): + """Plots pedestrian distance to vehicle vs. time""" + if len(pedestrian_times) > 0: + safety_scores = np.vectorize(compute_safety_factor)(pedestrian_distances, safe_thresh, unsafe_thresh, flip=True) + axis.plot(pedestrian_times, pedestrian_distances, color="black", linewidth=0.8, alpha=0.5) + axis.scatter(pedestrian_times, pedestrian_distances, c=safety_scores, cmap=CMAP, vmin=0, vmax=1, edgecolors="black") + + axis.set_xlabel("Time (s)") + axis.set_ylabel("Pedestrian Distance (m)") + axis.set_title("Pedestrian Distance Over Time") + axis.grid(True) + +if __name__=='__main__': + if len(sys.argv) != 2: + print("Usage: python test_comfort_metrics.py ") + sys.exit(1) + + log_dir = sys.argv[1] + behavior_file = os.path.join(log_dir, "behavior.json") + tracker_file = os.path.join(log_dir, "PurePursuitTrajectoryTracker_debug.csv") + + # if behavior.json doesn't exist, print error and exit + if not os.path.exists(behavior_file): + print("Error: behavior.json file is missing in log folder.") + sys.exit(1) + + # Parse behavior log file and compute metrics + times, accelerations, heading_rates, ped_times, ped_distances = parse_behavior_log(behavior_file) + time_jerk, jerk = compute_derivative(times, accelerations) + time_heading_acc, heading_acc = compute_derivative(times, heading_rates) + + # Pure pursuit tracker file exists: parse and plot all metrics + if os.path.exists(tracker_file): + vehicle_time, cte, x_actual, y_actual, x_desired, y_desired = parse_tracker_csv(tracker_file) + plot_metrics(time_jerk, jerk, time_heading_acc, heading_acc, vehicle_time, cte, + x_actual, y_actual, x_desired, y_desired, ped_times, ped_distances) + + print("RMS Cross Track Error:", np.sqrt(np.mean(cte**2)), "m") + print("RMS Position Error:", np.sqrt(np.mean((x_actual - x_desired)**2 + (y_actual - y_desired)**2)), 'm') + # Pure pursuit tracker file is missing: plot only behavior.json metrics + else: + print("Tracker file is missing. Skipping cross track error and vehicle position plots.") + # Plot only jerk, heading acceleration, and pedestrian distance + fig, axs = plt.subplots(1, 3, figsize=(12, 4)) + plot_jerk(axs[0], time_jerk, jerk) + plot_heading_acceleration(axs[1], time_heading_acc, heading_acc) + plot_pedestrian_dist(axs[2], ped_times, ped_distances) + add_safety_colorbar(fig) + plt.show() + + print("RMS Jerk:", np.sqrt(np.mean(jerk**2)), "m/s³") + print("RMS Heading Acceleration:", np.sqrt(np.mean(heading_acc**2)), "rad/s²") + if len(ped_distances) > 0: + print("Minimum Pedestrian Distance:", np.min(ped_distances), "m") \ No newline at end of file diff --git a/webcam.py b/webcam.py new file mode 100644 index 000000000..52f1b36a8 --- /dev/null +++ b/webcam.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +import rospy +import cv2 +from sensor_msgs.msg import Image +from cv_bridge import CvBridge + +def webcam_publisher(): + rospy.init_node('webcam_publisher', anonymous=True) + + image_pub = rospy.Publisher('/webcam', Image, queue_size=10) + + cap = cv2.VideoCapture(0) + if not cap.isOpened(): + rospy.logerr("cannot open camera") + return + + bridge = CvBridge() + + rate = rospy.Rate(30) # 30 Hz + + try: + while not rospy.is_shutdown(): + ret, frame = cap.read() + if not ret: + rospy.logerr("cannnot read frames") + break + + cv2.imshow("Webcam", frame) + + if cv2.waitKey(1) & 0xFF == ord('q'): + rospy.loginfo("press q to exit") + break + + try: + ros_image = bridge.cv2_to_imgmsg(frame, "bgr8") + except Exception as e: + rospy.logerr(f"failed to convert image: {e}") + continue + + image_pub.publish(ros_image) + rospy.loginfo("publish frame") + + rate.sleep() + + except KeyboardInterrupt: + rospy.loginfo("user key interrupt") + + finally: + cap.release() + cv2.destroyAllWindows() + rospy.loginfo("freed camera resources") + +if __name__ == '__main__': + webcam_publisher() \ No newline at end of file