Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion GEMstack/knowledge/vehicle/gem_e4_sensor_environment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,7 @@ ros_topics:
front_camera: /oak/rgb/image_raw
front_depth: /oak/stereo/image_raw
gnss: /septentrio_gnss/insnavgeod

front_left_camera: /camera_fl/arena_camera_node/image_raw
front_right_camera: /camera_fr/arena_camera_node/image_raw
rear_left_camera: /camera_rl/arena_camera_node/image_raw
rear_right_camera: /camera_rr/arena_camera_node/image_raw
52 changes: 52 additions & 0 deletions GEMstack/onboard/interface/gem_gazebo.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,10 @@ def __init__(self):

# Other sensors
self.front_camera_sub = None
self.front_left_camera_sub = None
self.front_right_camera_sub = None
self.rear_left_camera_sub = None
self.rear_right_camera_sub = None
self.front_depth_sub = None
self.top_lidar_sub = None
self.front_radar_sub = None
Expand Down Expand Up @@ -169,6 +173,54 @@ def callback_with_cv2(msg: Image):
cv_image = conversions.ros_Image_to_cv2(msg, desired_encoding="bgr8")
callback(cv_image)
self.front_camera_sub = rospy.Subscriber(topic, Image, callback_with_cv2)

elif name == 'front_left_camera':
topic = self.ros_sensor_topics[name]
if type is not None and (type is not Image and type is not cv2.Mat):
raise ValueError("GEMGazeboInterface only supports Image or OpenCV for front left camera")
if type is None or type is Image:
self.front_left_camera_sub = rospy.Subscriber(topic, Image, callback)
else:
def callback_with_cv2(msg: Image):
cv_image = conversions.ros_Image_to_cv2(msg, desired_encoding="bgr8")
callback(cv_image)
self.front_left_camera_sub = rospy.Subscriber(topic, Image, callback_with_cv2)

elif name == 'front_right_camera':
topic = self.ros_sensor_topics[name]
if type is not None and (type is not Image and type is not cv2.Mat):
raise ValueError("GEMGazeboInterface only supports Image or OpenCV for front right camera")
if type is None or type is Image:
self.front_right_camera_sub = rospy.Subscriber(topic, Image, callback)
else:
def callback_with_cv2(msg: Image):
cv_image = conversions.ros_Image_to_cv2(msg, desired_encoding="bgr8")
callback(cv_image)
self.front_right_camera_sub = rospy.Subscriber(topic, Image, callback_with_cv2)

elif name == 'rear_left_camera':
topic = self.ros_sensor_topics[name]
if type is not None and (type is not Image and type is not cv2.Mat):
raise ValueError("GEMGazeboInterface only supports Image or OpenCV for rear left camera")
if type is None or type is Image:
self.rear_left_camera_sub = rospy.Subscriber(topic, Image, callback)
else:
def callback_with_cv2(msg: Image):
cv_image = conversions.ros_Image_to_cv2(msg, desired_encoding="bgr8")
callback(cv_image)
self.rear_left_camera_sub = rospy.Subscriber(topic, Image, callback_with_cv2)

elif name == 'rear_right_camera':
topic = self.ros_sensor_topics[name]
if type is not None and (type is not Image and type is not cv2.Mat):
raise ValueError("GEMGazeboInterface only supports Image or OpenCV for rear right camera")
if type is None or type is Image:
self.rear_right_camera_sub = rospy.Subscriber(topic, Image, callback)
else:
def callback_with_cv2(msg: Image):
cv_image = conversions.ros_Image_to_cv2(msg, desired_encoding="bgr8")
callback(cv_image)
self.rear_right_camera_sub = rospy.Subscriber(topic, Image, callback_with_cv2)
# Front depth sensor has not been added to gazebo yet.
# This code is placeholder until we add front depth sensor.
elif name == 'front_depth':
Expand Down
140 changes: 140 additions & 0 deletions GEMstack/onboard/perception/test_gazebo_sensors.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
"""
Demo script to run YOLO object detection on a Gazebo simulation.

This code subscribes to the front camera feed from the GEM e2/e4 model and applies YOLO-based object detection to the incoming images.

Visualization:
- Use RViz or rqt to monitor the topics.

ROS Topics:
- Raw camera feed: /gem/debug
- YOLO detection output: /gem/image_detection
"""


# Python
import os
from typing import List, Dict
from collections import defaultdict
from datetime import datetime
import copy
# ROS, CV
import rospy
import message_filters
import cv2
import numpy as np
import tf
from cv_bridge import CvBridge
from sensor_msgs.msg import Image, PointCloud2
from visualization_msgs.msg import MarkerArray
# GEMStack
from ...state import AllState,VehicleState,ObjectPose,ObjectFrameEnum,AgentState,AgentEnum,AgentActivityEnum,ObjectFrameEnum
from ..interface.gem import GEMInterface, GNSSReading
from ..component import Component
from scipy.spatial.transform import Rotation as R



class SensorCheck(Component):

def __init__(self, vehicle_interface : GEMInterface) -> None:
"""
Initializes essential functions required to run the YOLO model.
"""

# vehicle interface
self.vehicle_interface = vehicle_interface
# cv2 to ros converter
self.bridge = CvBridge()



def initialize(self):
"""
Defines callback functions for subscribing to the front camera image stream and sets up publishers for debugging purposes.
"""

super().initialize()
self.vehicle_interface.subscribe_sensor('front_camera',self.front_camera_callback, type = cv2.Mat)
self.vehicle_interface.subscribe_sensor('front_left_camera',self.front_left_camera_callback, type = cv2.Mat)
self.vehicle_interface.subscribe_sensor('front_right_camera',self.front_right_camera_callback, type = cv2.Mat)
self.vehicle_interface.subscribe_sensor('rear_left_camera',self.rear_left_camera_callback, type = cv2.Mat)
self.vehicle_interface.subscribe_sensor('rear_right_camera',self.rear_right_camera_callback, type = cv2.Mat)

self.pub_front_camera_image = rospy.Publisher("/gem/debug/front_camera", Image, queue_size=1)
self.pub_front_left_camera_image = rospy.Publisher("/gem/debug/front_left_camera", Image, queue_size=1)
self.pub_front_right_camera_image = rospy.Publisher("/gem/debug/front_right_camera", Image, queue_size=1)
self.pub_rear_left_camera_image = rospy.Publisher("/gem/debug/rear_left_camera", Image, queue_size=1)
self.pub_rear_right_camera_image = rospy.Publisher("/gem/debug/rear_right_camera", Image, queue_size=1)



def update(self, vehicle : VehicleState) -> Dict[str,AgentState]:

"""
Displays vehicle statistics in the GLOBAL reference frame.

This function also allows switching between coordinate frames using the `VehicleState -> pose` method.

Returning an `AgentState` will automatically log detected objects.
"""

return {}



def front_camera_callback(self, image: cv2.Mat):

"""
A simple callback function that re published the topic to validate image coming through gazebo
"""

ros_img = self.bridge.cv2_to_imgmsg(image, 'bgr8')
self.pub_front_camera_image.publish(ros_img)

def front_left_camera_callback(self, image: cv2.Mat):

"""
A simple callback function that re published the topic to validate image coming through gazebo
"""

ros_img = self.bridge.cv2_to_imgmsg(image, 'bgr8')
self.pub_front_left_camera_image.publish(ros_img)

def front_right_camera_callback(self, image: cv2.Mat):

"""
A simple callback function that re published the topic to validate image coming through gazebo
"""

ros_img = self.bridge.cv2_to_imgmsg(image, 'bgr8')
self.pub_front_right_camera_image.publish(ros_img)

def rear_left_camera_callback(self, image: cv2.Mat):

"""
A simple callback function that re published the topic to validate image coming through gazebo
"""

ros_img = self.bridge.cv2_to_imgmsg(image, 'bgr8')
self.pub_rear_left_camera_image.publish(ros_img)

def rear_right_camera_callback(self, image: cv2.Mat):

"""
A simple callback function that re published the topic to validate image coming through gazebo
"""

ros_img = self.bridge.cv2_to_imgmsg(image, 'bgr8')
self.pub_rear_right_camera_image.publish(ros_img)


def rate(self):
return 4.0

def state_inputs(self):
return ['vehicle']

def state_outputs(self):
return ['agents']

30 changes: 30 additions & 0 deletions launch/gazebo_simulation.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,36 @@ variants:
print: True
# visualization: !include "klampt_visualization.yaml"
# visualization: !include "mpl_visualization.yaml"
gazebo_sensor_check:
run:
mode: simulation
vehicle_interface:
type: gem_gazebo.GEMGazeboInterface
# args:
# scene: !relative_path '../scenes/gazebo.yaml'

drive:
perception:
state_estimation : GNSSStateEstimator
agent_detection : test_gazebo_sensors.SensorCheck
perception_normalization : StandardPerceptionNormalizer

planning:
# Adding your custom relation estimation
# relations_estimation: pedestrian_yield_logic.PedestrianYielder
# Fixed route with pure pursuit
route_planning:
type: StaticRoutePlanner
args: [!relative_path '../GEMstack/knowledge/routes/xyhead_highbay_backlot_p.csv'] # Fixed path
motion_planning:
type: RouteToTrajectoryPlanner
args: [null] #desired speed in m/s. If null, this will keep the route untimed for the trajectory tracker
trajectory_tracking:
type: pure_pursuit.PurePursuitTrajectoryTracker
args: {desired_speed: 2.5} #approximately 5mph
print: True
# visualization: !include "klampt_visualization.yaml"
# visualization: !include "mpl_visualization.yaml"
log_ros:
log:
ros_topics : !include "../GEMstack/knowledge/defaults/standard_ros_topics.yaml"
Loading