diff --git a/GEMstack/onboard/interface/gem_gazebo.py b/GEMstack/onboard/interface/gem_gazebo.py index 8b54b44a3..2c19d60b0 100644 --- a/GEMstack/onboard/interface/gem_gazebo.py +++ b/GEMstack/onboard/interface/gem_gazebo.py @@ -1,3 +1,4 @@ +from GEMstack.state.obstacle import Obstacle, ObstacleMaterialEnum, ObstacleStateEnum from .gem import * from ...utils import settings import math @@ -16,12 +17,14 @@ # Changed from AckermannDriveStamped from ackermann_msgs.msg import AckermannDrive from rosgraph_msgs.msg import Clock +from gazebo_msgs.msg import ModelStates from tf.transformations import euler_from_quaternion from ...state import ObjectPose,ObjectFrameEnum from ...knowledge.vehicle.geometry import steer2front from ...knowledge.vehicle.dynamics import pedal_positions_to_acceleration +from ...state import AgentState, AgentEnum, AgentActivityEnum # OpenCV and cv2 bridge import cv2 @@ -37,6 +40,33 @@ class GNSSReading: status: str +# Agent dimensions similar to what's in gem_simulator.py +AGENT_DIMENSIONS = { + 'pedestrian' : (0.5,0.5,1.6), + 'bicyclist' : (1.8,0.5,1.6), + 'car' : (4.0,2.5,1.4), + 'medium_truck': (6.0,2.5,3.0), + 'large_truck': (10.0,2.5,3.5) +} + +# Map model prefixes to agent types +MODEL_PREFIX_TO_AGENT_TYPE = { + 'pedestrian': 'pedestrian', + 'person': 'pedestrian', + 'bicycle': 'bicyclist', + 'bike': 'bicyclist', + 'car': 'car', + 'vehicle': 'car', + 'truck': 'medium_truck', + 'large_truck': 'large_truck', + 'cone': 'traffic_cone' +} + +# Map model prefixes to obstacle types +MODEL_PREFIX_TO_OBSTACLE_TYPE = { + 'cone': 'traffic_cone' +} + class GEMGazeboInterface(GEMInterface): """Interface for connecting to the GEM e2 vehicle in Gazebo simulation.""" @@ -74,6 +104,30 @@ def __init__(self): self.top_lidar_sub = None self.front_radar_sub = None + # Agent detection + self.model_states_sub = None + self.tracked_model_prefixes = settings.get('simulator.agent_tracker.model_prefixes', + ['pedestrian', 'bicycle', 'car']) + self.tracked_obstacle_prefixes = settings.get('simulator.obstacle_tracker.model_prefixes', + ['cone']) + self.agent_detector_callback = None + self.obstacle_detector_callback = None + self.last_agent_positions = {} + self.last_agent_velocities = {} + self.last_model_states_time = 0.0 + self.agent_detection_rate = settings.get('simulator.agent_tracker.rate', 10.0) # Hz + self.obstacle_detection_rate = settings.get('simulator.obstacle_tracker.rate', 10.0) # Hz + + # Frame transformation variables + self.start_pose_abs = None # Initial vehicle pose in GLOBAL frame + self.vehicle_model_pose = None # Current vehicle pose from model_states + self.vehicle_gps_pose = None # Current vehicle pose from GPS + self.t_start = None # Start time + + # Stable transformation variables + self.transform_initialized = False + self.initial_vehicle_model_pose = None # Vehicle pose in model_states at start + self.faults = [] # Gazebo vehicle control @@ -86,6 +140,9 @@ def __init__(self): self.sim_time = rospy.Time(0) self.clock_sub = rospy.Subscriber('/clock', Clock, self.clock_callback) + # Subscribe to model states for agent detection + self.model_states_sub = rospy.Subscriber('/gazebo/model_states', ModelStates, self.model_states_callback) + def start(self): if self.debug: print("Starting GEM Gazebo Interface") @@ -100,6 +157,263 @@ def time(self): def get_reading(self) -> GEMVehicleReading: return self.last_reading + def model_states_callback(self, msg: ModelStates): + current_time = self.time() + + # Check if we should process this update (rate limiting) + if ((current_time - self.last_model_states_time < 1.0/self.agent_detection_rate) and (current_time - self.last_model_states_time < 1.0/self.obstacle_detection_rate)): + return + + # Calculate time delta since last update + dt = current_time - self.last_model_states_time + self.last_model_states_time = current_time + + # Skip if no callback is registered + if self.agent_detector_callback is None and self.obstacle_detector_callback is None: + return + + # Find vehicle in model states + vehicle_idx = -1 + for i, name in enumerate(msg.name): + if name.lower() in ['gem_e4', 'gem_e2']: + vehicle_idx = i + break + + # If vehicle not found, cannot proceed + if vehicle_idx < 0: + return + + # Get vehicle position and orientation from model states + vehicle_pos = msg.pose[vehicle_idx].position + vehicle_ori = msg.pose[vehicle_idx].orientation + quaternion = (vehicle_ori.x, vehicle_ori.y, vehicle_ori.z, vehicle_ori.w) + _, _, vehicle_yaw = euler_from_quaternion(quaternion) + + # Create vehicle model pose in ABSOLUTE_CARTESIAN frame (Gazebo's native frame) + self.vehicle_model_pose = ObjectPose( + frame=ObjectFrameEnum.ABSOLUTE_CARTESIAN, + t=current_time, + x=vehicle_pos.x, + y=vehicle_pos.y, + z=vehicle_pos.z, + yaw=vehicle_yaw + ) + + # Initialize stable transformation when we have both GPS and model data + if not self.transform_initialized and self.vehicle_gps_pose is not None: + # Initialize start pose and transformation data + self.start_pose_abs = self.vehicle_gps_pose + self.initial_vehicle_model_pose = self.vehicle_model_pose + self.t_start = current_time + self.transform_initialized = True + + if self.debug: + print("STABLE TRANSFORMATION INITIALIZED:") + print(f" GPS position: ({self.start_pose_abs.x:.4f}, {self.start_pose_abs.y:.4f}, {self.start_pose_abs.z:.4f})") + print(f" Model position: ({self.initial_vehicle_model_pose.x:.4f}, {self.initial_vehicle_model_pose.y:.4f}, {self.initial_vehicle_model_pose.z:.4f})") + print(f" GPS orientation: {self.start_pose_abs.yaw:.4f} radians") + print(f" Model orientation: {self.initial_vehicle_model_pose.yaw:.4f} radians") + + # Process all models except the vehicle itself + for i, model_name in enumerate(msg.name): + # Skip the vehicle model itself + if i == vehicle_idx: + continue + + # Check if this model should be tracked as an agent or obstacle + agent_type = None + for prefix in self.tracked_model_prefixes: + if model_name.lower().startswith(prefix.lower()): + for key, value in MODEL_PREFIX_TO_AGENT_TYPE.items(): + if prefix.lower().startswith(key.lower()): + agent_type = value + break + break + + obstacle_type = None + for prefix in self.tracked_obstacle_prefixes: + if model_name.lower().startswith(prefix.lower()): + for key, value in MODEL_PREFIX_TO_OBSTACLE_TYPE.items(): + if prefix.lower().startswith(key.lower()): + obstacle_type = value + break + break + + if agent_type is None and obstacle_type is None: + continue # Not an entity we're tracking + + # Get position and orientation from model states + position = msg.pose[i].position + orientation = msg.pose[i].orientation + + # Get velocity from twist + linear_vel = msg.twist[i].linear + angular_vel = msg.twist[i].angular + + # Convert orientation quaternion to euler angles + quaternion = (orientation.x, orientation.y, orientation.z, orientation.w) + roll, pitch, yaw = euler_from_quaternion(quaternion) + + # Create agent pose in ABSOLUTE_CARTESIAN frame (Gazebo's native frame) + agent_global_pose = ObjectPose( + frame=ObjectFrameEnum.ABSOLUTE_CARTESIAN, + t=current_time, + x=position.x, + y=position.y, + z=position.z, + roll=roll, + pitch=pitch, + yaw=yaw + ) + + # Transform agent pose to START frame using stable transformation + agent_pose = self._transform_to_start_frame(current_time, position, roll, pitch, yaw) + + # Process agent if applicable + if agent_type is not None and self.agent_detector_callback is not None: + self._process_agent(model_name, agent_type, agent_pose, position, linear_vel, angular_vel, dt) + + # Process obstacle if applicable + if obstacle_type is not None and self.obstacle_detector_callback is not None: + self._process_obstacle(model_name, obstacle_type, agent_pose, roll, pitch) + + def _transform_to_start_frame(self, current_time, position, roll, pitch, yaw): + """Transform a pose from ABSOLUTE_CARTESIAN to START frame.""" + if self.transform_initialized: + # Calculate position relative to the *initial* vehicle position (from when START frame was established) + rel_x = position.x - self.initial_vehicle_model_pose.x + rel_y = position.y - self.initial_vehicle_model_pose.y + rel_z = position.z - self.initial_vehicle_model_pose.z + + # Rotate by the *initial* vehicle orientation + cos_yaw = math.cos(-self.initial_vehicle_model_pose.yaw) + sin_yaw = math.sin(-self.initial_vehicle_model_pose.yaw) + rot_x = rel_x * cos_yaw - rel_y * sin_yaw + rot_y = rel_x * sin_yaw + rel_y * cos_yaw + + # Adjust yaw relative to *initial* vehicle orientation + rel_yaw = yaw - self.initial_vehicle_model_pose.yaw + + # Create the pose in START frame using the stable transformation + return ObjectPose( + frame=ObjectFrameEnum.START, + t=current_time - self.t_start if self.t_start is not None else 0, + x=rot_x, + y=rot_y, + z=rel_z, + roll=roll, + pitch=pitch, + yaw=rel_yaw + ) + else: + # If transformation not initialized yet, just use the global pose in ABSOLUTE_CARTESIAN frame + return ObjectPose( + frame=ObjectFrameEnum.ABSOLUTE_CARTESIAN, + t=current_time, + x=position.x, + y=position.y, + z=position.z, + roll=roll, + pitch=pitch, + yaw=yaw + ) + + def _process_agent(self, model_name, agent_type, agent_pose, position, linear_vel, angular_vel, dt): + """Process an agent detected in the simulation.""" + # Calculate velocity manually if twist data is zero or missing + velocity = (linear_vel.x, linear_vel.y, linear_vel.z) + velocity_is_zero = abs(linear_vel.x) < 1e-6 and abs(linear_vel.y) < 1e-6 and abs(linear_vel.z) < 1e-6 + + if velocity_is_zero and model_name in self.last_agent_positions and dt > 0: + # Calculate velocity from position difference + prev_pos = self.last_agent_positions[model_name] + dx = position.x - prev_pos[0] + dy = position.y - prev_pos[1] + dz = position.z - prev_pos[2] + + # Calculate velocity (position change / time) + calculated_vel = (dx/dt, dy/dt, dz/dt) + + # Apply some smoothing with the previous velocity if available + if model_name in self.last_agent_velocities: + prev_vel = self.last_agent_velocities[model_name] + # Apply exponential smoothing (0.7 current + 0.3 previous) + velocity = ( + 0.7 * calculated_vel[0] + 0.3 * prev_vel[0], + 0.7 * calculated_vel[1] + 0.3 * prev_vel[1], + 0.7 * calculated_vel[2] + 0.3 * prev_vel[2] + ) + else: + velocity = calculated_vel + + # Determine activity state based on velocity magnitude + velocity_magnitude = np.linalg.norm(velocity) + if velocity_magnitude < 0.1: + activity = AgentActivityEnum.STOPPED + elif velocity_magnitude > 5.0: # Arbitrary threshold for "fast" + activity = AgentActivityEnum.FAST + else: + activity = AgentActivityEnum.MOVING + + # Get agent dimensions + dimensions = AGENT_DIMENSIONS.get(agent_type, (1.0, 1.0, 1.0)) # Default if unknown + + # Create agent state + agent_state = AgentState( + pose=agent_pose, # Using START frame pose + dimensions=dimensions, + outline=None, + type=getattr(AgentEnum, agent_type.upper()), + activity=activity, + velocity=velocity, + yaw_rate=angular_vel.z + ) + + # Store current position for next velocity calculation (using raw positions) + self.last_agent_positions[model_name] = (position.x, position.y, position.z) + self.last_agent_velocities[model_name] = velocity + + # Call the callback with the agent state + self.agent_detector_callback(model_name, agent_state) + + def _process_obstacle(self, model_name, obstacle_type, agent_pose, roll, pitch): + """Process an obstacle detected in the simulation.""" + # Determine obstacle state based on orientation + # For traffic cones, we want to check if they are standing up or tipped over + obstacle_activity = ObstacleStateEnum.STANDING # Default state + + # Check roll and pitch to determine if the obstacle is tipped over + # Thresholds in radians - approx 20 degrees + roll_threshold = 0.35 + pitch_threshold = 0.35 + + # For a traffic cone, analyze orientation + if obstacle_type == 'traffic_cone': + if abs(roll) > roll_threshold: + # Check which direction it's tipped + if roll > 0: + obstacle_activity = ObstacleStateEnum.RIGHT + else: + obstacle_activity = ObstacleStateEnum.LEFT + elif abs(pitch) > pitch_threshold: + # If tipped forward/backward, we'll use LEFT/RIGHT based on pitch sign + if pitch > 0: + obstacle_activity = ObstacleStateEnum.RIGHT + else: + obstacle_activity = ObstacleStateEnum.LEFT + + # Create obstacle state with the determined activity + obstacle_state = Obstacle( + dimensions=(0,0,0), + outline=None, + pose=agent_pose, + material=getattr(ObstacleMaterialEnum, obstacle_type.upper()), + state=obstacle_activity, + collidable=True + ) + + # Call the callback with the obstacle state + self.obstacle_detector_callback(model_name, obstacle_state) def subscribe_sensor(self, name, callback, type=None): if name == 'gnss': @@ -133,6 +447,9 @@ def callback_with_gnss_reading(inspva_msg): speed = np.linalg.norm([inspva_msg.east_velocity, inspva_msg.north_velocity]) self.last_reading.speed = speed + # Save the vehicle's GPS pose for coordinate transformation + self.vehicle_gps_pose = pose + # Create GNSS reading with fused data reading = GNSSReading( pose=pose, @@ -173,6 +490,9 @@ def callback_with_gnss_reading(gnss_msg): pitch=pitch, yaw=yaw ) + + # Save the vehicle's GPS pose for coordinate transformation + self.vehicle_gps_pose = pose # Calculate speed from GNSS self.last_reading.speed = np.linalg.norm([gnss_msg.ve, gnss_msg.vn]) @@ -230,11 +550,25 @@ def callback_with_cv2(msg: Image): cv_image = conversions.ros_Image_to_cv2(msg, desired_encoding="passthrough") callback(cv_image) self.front_depth_sub = rospy.Subscriber(topic, Image, callback_with_cv2) + + elif name == 'agent_detector': + if type is not None and type is not AgentState: + raise ValueError("GEMGazeboInterface only supports AgentState for agent_detector") + self.agent_detector_callback = callback + + elif name == 'obstacle_detector': + if type is not None and type is not Obstacle: + raise ValueError("GEMGazeboInterface only supports Obstacle for obstacle_detector") + self.obstacle_detector_callback = callback def hardware_faults(self) -> List[str]: # In simulation, we don't have real hardware faults return self.faults + def sensors(self): + # Add agent_detector to the list of available sensors + return super().sensors() + ['agent_detector'] + def send_command(self, command : GEMVehicleCommand): # Throttle rate at which we send commands t = self.time() @@ -262,10 +596,15 @@ def send_command(self, command : GEMVehicleCommand): # Calculate acceleration from pedal positions acceleration = pedal_positions_to_acceleration(accelerator_pedal_position, brake_pedal_position, v, 0, 1) - + if self.debug: + print("acceleration before", acceleration) + # Apply reasonable limits to acceleration max_accel = settings.get('vehicle.limits.max_acceleration', 1.0) - max_decel = settings.get('vehicle.limits.max_deceleration', -2.0) + max_decel = -1 * settings.get('vehicle.limits.max_deceleration', 2.0) # cuz ackermann expects neg but pure pursiut wants positive decel val + if self.debug: + print("max_accel", max_accel) + print("max_decel", max_decel) acceleration = np.clip(acceleration, max_decel, max_accel) # Convert wheel angle to steering angle (front wheel angle) @@ -280,7 +619,9 @@ def send_command(self, command : GEMVehicleCommand): # Don't use infinite speed, instead calculate a reasonable target speed current_speed = v target_speed = current_speed - + if self.debug: + print("acceleration ", acceleration) + if acceleration > 0: # Accelerating - set target speed to current speed plus some increment # This is more realistic than infinite speed @@ -288,6 +629,9 @@ def send_command(self, command : GEMVehicleCommand): target_speed = min(current_speed + acceleration * 0.5, max_speed) elif acceleration < 0: # Braking - set target speed to zero if deceleration is significant + if self.debug: + print("braking ", acceleration) + if brake_pedal_position > 0.1: target_speed = 0.0 @@ -303,4 +647,4 @@ def send_command(self, command : GEMVehicleCommand): print(f"[ACKERMANN] Speed: {msg.speed:.2f}, Accel: {msg.acceleration:.2f}, Steer: {msg.steering_angle:.2f}") self.ackermann_pub.publish(msg) - self.last_command = command + self.last_command = command \ No newline at end of file diff --git a/GEMstack/onboard/perception/agent_detection.py b/GEMstack/onboard/perception/agent_detection.py index 5d600f792..c80442771 100644 --- a/GEMstack/onboard/perception/agent_detection.py +++ b/GEMstack/onboard/perception/agent_detection.py @@ -31,3 +31,24 @@ def agent_callback(self, name : str, agent : AgentState): def update(self) -> Dict[str,AgentState]: with self.lock: return copy.deepcopy(self.agents) + + +class GazeboAgentDetector(OmniscientAgentDetector): + """Obtains agent detections from the Gazebo simulator using model_states topic""" + def __init__(self, vehicle_interface : GEMInterface, tracked_model_prefixes=None): + super().__init__(vehicle_interface) + + # If specific model prefixes are provided, configure the interface to track them + if tracked_model_prefixes is not None: + # Check if our interface has the tracked_model_prefixes attribute (is a GazeboInterface) + if hasattr(vehicle_interface, 'tracked_model_prefixes'): + vehicle_interface.tracked_model_prefixes = tracked_model_prefixes + print(f"Configured GazeboAgentDetector to track models with prefixes: {tracked_model_prefixes}") + else: + print("Warning: vehicle_interface doesn't support tracked_model_prefixes configuration") + + def initialize(self): + # Use the same agent_detector sensor as OmniscientAgentDetector + # The GazeboInterface implements this with model_states subscription + super().initialize() + print("GazeboAgentDetector initialized and subscribed to model_states") diff --git a/GEMstack/onboard/perception/obstacle_detection.py b/GEMstack/onboard/perception/obstacle_detection.py new file mode 100644 index 000000000..97f4716c0 --- /dev/null +++ b/GEMstack/onboard/perception/obstacle_detection.py @@ -0,0 +1,56 @@ + +import threading +import copy +from typing import Dict +from ..component import Component + +from ..interface.gem import GEMInterface +from ...state.obstacle import Obstacle + +class OmniscientObstacleDetector(Component): + """Obtains agent detections from a simulator""" + def __init__(self,vehicle_interface : GEMInterface): + self.vehicle_interface = vehicle_interface + self.obstacles = {} + self.lock = threading.Lock() + + def rate(self): + return 15.0 + + def state_inputs(self): + return [] + + def state_outputs(self): + return ['obstacles'] + + def initialize(self): + self.vehicle_interface.subscribe_sensor('obstacle_detector',self.obstacle_callback, Obstacle) + + def obstacle_callback(self, name : str, obstacle : Obstacle): + with self.lock: + self.obstacles[name] = obstacle + + def update(self) -> Dict[str,Obstacle]: + with self.lock: + return copy.deepcopy(self.obstacles) + + +class GazeboObstacleDetector(OmniscientObstacleDetector): + """Obtains agent detections from the Gazebo simulator using model_states topic""" + def __init__(self, vehicle_interface : GEMInterface, tracked_obstacle_prefixes=None): + super().__init__(vehicle_interface) + + # If specific model prefixes are provided, configure the interface to track them + if tracked_obstacle_prefixes is not None: + # Check if our interface has the tracked_model_prefixes attribute (is a GazeboInterface) + if hasattr(vehicle_interface, 'tracked_obstacle_prefixes'): + vehicle_interface.tracked_obstacle_prefixes = tracked_obstacle_prefixes + print(f"Configured GazeboObstacleDetector to track obstacles with prefixes: {tracked_obstacle_prefixes}") + else: + print("Warning: vehicle_interface doesn't support tracked_obstacle_prefixes configuration") + + def initialize(self): + # Use the same agent_detector sensor as OmniscientAgentDetector + # The GazeboInterface implements this with model_states subscription + super().initialize() + print("GazeboObstacleDetector initialized and subscribed to model_states") \ No newline at end of file diff --git a/GEMstack/onboard/planning/route_planning.py b/GEMstack/onboard/planning/route_planning.py index 9bae3c24a..91c33fee7 100644 --- a/GEMstack/onboard/planning/route_planning.py +++ b/GEMstack/onboard/planning/route_planning.py @@ -5,7 +5,6 @@ import os import numpy as np from .RRT import BiRRT -from .reeds_shepp_parking import ReedsSheppParking class StaticRoutePlanner(Component): @@ -216,6 +215,8 @@ class SummoningRoutePlanner(Component): """Reads a route from disk and returns it as the desired route.""" def __init__(self, roadgraphfn: str = None, map_type: str = 'roadgraph', map_frame: str = 'start'): + # Moving this import here to avoid dependency error related to reeds-shepp python package + from .reeds_shepp_parking import ReedsSheppParking self.planner = None self.route = None self.map_type = map_type diff --git a/GEMstack/onboard/visualization/mpl_visualization.py b/GEMstack/onboard/visualization/mpl_visualization.py index 08392e9ed..a251c98a1 100644 --- a/GEMstack/onboard/visualization/mpl_visualization.py +++ b/GEMstack/onboard/visualization/mpl_visualization.py @@ -6,6 +6,7 @@ import time from collections import deque from ...state.agent import AgentEnum +from ...state import ObjectFrameEnum, ObjectPose, AllState import numpy as np class MPLVisualization(Component): @@ -93,44 +94,177 @@ def update(self, state): self.debug("vehicle", "velocity", state.vehicle.v) self.debug("vehicle", "front_wheel_angle", state.vehicle.front_wheel_angle) - # Pedestrian metrics + # Print debugging info about the vehicle's position and frame + if self.num_updates % 10 == 0: + print(f"Vehicle position: ({state.vehicle.pose.x:.2f}, {state.vehicle.pose.y:.2f}), frame: {state.vehicle.pose.frame}") + # print("Obstacle state:", state.obstacles) + + # Pedestrian metrics and position debugging + ped_positions = [] for agent_id, agent in state.agents.items(): - if agent.type == AgentEnum.PEDESTRIAN: - # self.debug(f"ped_{agent_id}", "x", agent.pose.x) - # self.debug(f"ped_{agent_id}", "y", agent.pose.y) - self.debug(f"ped_{agent_id}", "velocity", np.linalg.norm(agent.velocity)) # Magnitude of resultant velocity - self.debug(f"ped_{agent_id}", "yaw_rate", agent.yaw_rate) + try: + # Check agent type safely + is_pedestrian = False + try: + is_pedestrian = agent.type == AgentEnum.PEDESTRIAN + except: + # If there's an issue comparing, try string comparison + try: + is_pedestrian = str(agent.type) == str(AgentEnum.PEDESTRIAN) + except: + # If that fails, use substring match + try: + is_pedestrian = "PEDESTRIAN" in str(agent.type) + except: + # Last resort: assume it's a pedestrian if not explicitly marked as something else + is_pedestrian = True + + if is_pedestrian: + # Position logging + ped_x, ped_y = agent.pose.x, agent.pose.y + ped_positions.append((ped_x, ped_y)) + # Debug output every 10 updates + if self.num_updates % 10 == 0: + print(f"Pedestrian {agent_id} position: ({ped_x:.2f}, {ped_y:.2f}), frame: {agent.pose.frame}") + # Calculate distance from vehicle + dist = np.sqrt((ped_x - state.vehicle.pose.x)**2 + (ped_y - state.vehicle.pose.y)**2) + print(f"Distance to vehicle: {dist:.2f} meters") + + # Track positions for plotting + self.debug(f"ped_{agent_id}", "x", ped_x) + self.debug(f"ped_{agent_id}", "y", ped_y) + # Calculate velocity magnitude safely + try: + vel_mag = np.linalg.norm(agent.velocity) + except: + vel_mag = 0.0 + self.debug(f"ped_{agent_id}", "velocity", vel_mag) + self.debug(f"ped_{agent_id}", "yaw_rate", agent.yaw_rate) + except Exception as e: + print(f"Error processing agent {agent_id}: {str(e)}") time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(state.t)) - #frame=ObjectFrameEnum.CURRENT - #state = state.to_frame(frame) - xrange = [state.vehicle.pose.x - 10, state.vehicle.pose.x + 10] - yrange = [state.vehicle.pose.y - 10, state.vehicle.pose.y + 10] - #plot main visualization - mpl_visualization.plot(state,title="Scene %d at %s"%(self.num_updates,time_str),xrange=xrange,yrange=yrange,show=False,ax=self.axs[0]) + + # Print coordinate frame information for debugging + if self.num_updates % 10 == 0: + print(f"Vehicle: pos=({state.vehicle.pose.x:.2f}, {state.vehicle.pose.y:.2f}), frame={state.vehicle.pose.frame}") + if state.start_vehicle_pose: + print(f"Start pose: pos=({state.start_vehicle_pose.x:.2f}, {state.start_vehicle_pose.y:.2f}), frame={state.start_vehicle_pose.frame}") + + if len(state.agents) > 0: + print(f"Number of agents: {len(state.agents)}") + + # Determine a good plot range that includes both vehicle and pedestrians + if len(ped_positions) > 0: + # Start with vehicle position + min_x, max_x = state.vehicle.pose.x, state.vehicle.pose.x + min_y, max_y = state.vehicle.pose.y, state.vehicle.pose.y + + # Expand to include pedestrians + for x, y in ped_positions: + min_x = min(min_x, x) + max_x = max(max_x, x) + min_y = min(min_y, y) + max_y = max(max_y, y) + + # Add margin - based on the content size + size_x = max(50, max_x - min_x) + size_y = max(50, max_y - min_y) + margin_x = max(20, size_x * 0.2) # at least 20m or 20% of content + margin_y = max(20, size_y * 0.2) + + xrange = [min_x - margin_x, max_x + margin_x] + yrange = [min_y - margin_y, max_y + margin_y] + else: + # Default range around vehicle if no pedestrians + xrange = [state.vehicle.pose.x - 10, state.vehicle.pose.x + 10] + yrange = [state.vehicle.pose.y - 10, state.vehicle.pose.y + 10] + + # Print xrange and yrange for debugging + if self.num_updates % 10 == 0: + print(f"Plot range: X {xrange}, Y {yrange}") + + # Plot using the current state directly - avoid conversions that might fail + try: + # First attempt: Just use current state as-is + mpl_visualization.plot(state, title=f"Scene {self.num_updates} at {time_str}", + xrange=xrange, yrange=yrange, show=False, ax=self.axs[0]) + except Exception as e: + print(f"Error in basic plot: {str(e)}") + # If that fails, try a minimal plot with just essential elements + try: + # Fallback to a minimal plot without using state conversion + self.axs[0].clear() + self.axs[0].set_aspect('equal') + self.axs[0].set_xlabel('x (m)') + self.axs[0].set_ylabel('y (m)') + self.axs[0].set_xlim(xrange[0], xrange[1]) + self.axs[0].set_ylim(yrange[0], yrange[1]) + + # Just draw dots for vehicle and agents + self.axs[0].plot(state.vehicle.pose.x, state.vehicle.pose.y, 'bo', markersize=10, label='Vehicle') + + # Plot pedestrians as red dots + for x, y in ped_positions: + self.axs[0].plot(x, y, 'ro', markersize=8) + + self.axs[0].set_title(f"Basic Scene {self.num_updates} at {time_str}") + self.axs[0].legend() + except Exception as e2: + print(f"Even basic plot failed: {str(e2)}") # Vehicle plot (axs[1]) - self.axs[1].clear() - for k,v in self.vehicle_plot_values.items(): - t = [x[0] for x in v] - y = [x[1] for x in v] - self.axs[1].plot(t,y,label=k) - self.axs[1].set_title('Vehicle Metrics') - self.axs[1].set_xlabel('Time (s)') - self.axs[1].legend() + try: + self.axs[1].clear() + has_data = False + for k,v in self.vehicle_plot_values.items(): + if len(v) > 0: # Only plot if we have data + t = [x[0] for x in v] + y = [x[1] for x in v] + self.axs[1].plot(t,y,label=k) + has_data = True + if has_data: + self.axs[1].legend() + self.axs[1].set_title('Vehicle Metrics') + self.axs[1].set_xlabel('Time (s)') + except Exception as e: + print(f"Error in vehicle plot: {str(e)}") # Pedestrian plot (axs[2]) - self.axs[2].clear() - for k,v in self.pedestrian_plot_values.items(): - t = [x[0] for x in v] - y = [x[1] for x in v] - self.axs[2].plot(t,y,label=k) - self.axs[2].set_title('Pedestrian Metrics') - self.axs[2].set_xlabel('Time (s)') - self.axs[2].legend() + try: + self.axs[2].clear() + has_data = False + for k,v in self.pedestrian_plot_values.items(): + if len(v) > 0: # Only plot if we have data + t = [x[0] for x in v] + y = [x[1] for x in v] + self.axs[2].plot(t,y,label=k) + has_data = True + if has_data: + self.axs[2].legend() + self.axs[2].set_title('Pedestrian Metrics') + self.axs[2].set_xlabel('Time (s)') + except Exception as e: + print(f"Error in pedestrian plot: {str(e)}") + + try: + # ---- plot static obstacles in orange ---- + for obs in state.obstacles.values(): + x_obs, y_obs = obs.pose.x, obs.pose.y + self.axs[0].plot(x_obs, y_obs, 'o', + markersize=8, + markerfacecolor='orange', + markeredgecolor='darkorange', + label='_nolegend_') + except Exception as e: + print(f"Error plotting obstacles: {str(e)}") - self.fig.canvas.draw_idle() - self.fig.canvas.flush_events() + # Update canvas + try: + self.fig.canvas.draw_idle() + self.fig.canvas.flush_events() + except Exception as e: + print(f"Error updating canvas: {str(e)}") if self.save_as is not None and self.writer is not None: try: diff --git a/GEMstack/state/__init__.py b/GEMstack/state/__init__.py index 84efba805..77d1bea71 100644 --- a/GEMstack/state/__init__.py +++ b/GEMstack/state/__init__.py @@ -8,7 +8,7 @@ 'VehicleState', 'Roadgraph', 'Roadmap', - 'Obstacle', + 'Obstacle', 'ObstacleMaterialEnum','ObstacleStateEnum', 'Sign', 'AgentState','AgentEnum','AgentActivityEnum', 'ObstacleMaterialEnum','ObstacleStateEnum', diff --git a/GEMstack/state/obstacle.py b/GEMstack/state/obstacle.py index d7e5db881..30a21217d 100644 --- a/GEMstack/state/obstacle.py +++ b/GEMstack/state/obstacle.py @@ -1,7 +1,8 @@ from __future__ import annotations from dataclasses import dataclass, replace from ..utils.serialization import register -from .physical_object import ObjectFrameEnum,ObjectPose,PhysicalObject,convert_vector +from .physical_object import ObjectFrameEnum,PhysicalObject #,convert_vector + from enum import Enum from typing import Tuple @@ -24,11 +25,14 @@ class ObstacleStateEnum(Enum): LEFT = 2 # flipped cone facing left RIGHT = 3 # flipped cone facing right - - @dataclass @register class Obstacle(PhysicalObject): material : ObstacleMaterialEnum collidable : bool - state: ObstacleStateEnum = ObstacleStateEnum.UNKNOWN + state: ObstacleStateEnum = ObstacleStateEnum.UNDETERMINED + + def to_frame(self, frame: ObjectFrameEnum, current_pose=None, start_pose_abs=None) -> Obstacle: + newpose = self.pose.to_frame(frame, current_pose, start_pose_abs) + return replace(self, pose=newpose) + diff --git a/GEMstack/utils/mpl_visualization.py b/GEMstack/utils/mpl_visualization.py index 09d078a02..ee8ed1565 100644 --- a/GEMstack/utils/mpl_visualization.py +++ b/GEMstack/utils/mpl_visualization.py @@ -3,6 +3,7 @@ import numpy as np from . import settings from ..state import ObjectFrameEnum,ObjectPose,PhysicalObject,VehicleState,VehicleGearEnum,Path,Obstacle,AgentState,Roadgraph,RoadgraphLane,RoadgraphLaneEnum,RoadgraphCurve,RoadgraphCurveEnum,RoadgraphRegion,RoadgraphRegionEnum,RoadgraphSurfaceEnum,Trajectory,Route,SceneState,AllState +from ..state.agent import AgentEnum CURVE_TO_STYLE = { RoadgraphCurveEnum.LANE_BOUNDARY : {'color':'k','linewidth':1,'linestyle':'-'}, @@ -67,6 +68,7 @@ def plot_object(obj : PhysicalObject, axis_len=None, outline=True, bbox=True, ax #plot bounding box R = obj.pose.rotation2d() t = [obj.pose.x,obj.pose.y] + if bbox or (outline and obj.outline is None): bounds = obj.bounds() (xmin,xmax),(ymin,ymax),(zmin,zmax) = bounds @@ -79,6 +81,7 @@ def plot_object(obj : PhysicalObject, axis_len=None, outline=True, bbox=True, ax ax.plot(xs,ys,'r-') else: ax.plot(xs,ys,'b-') + #plot outline if outline and obj.outline: outline = [R.dot(p)+t for p in obj.outline] @@ -86,6 +89,47 @@ def plot_object(obj : PhysicalObject, axis_len=None, outline=True, bbox=True, ax xs = [c[0] for c in outline] ys = [c[1] for c in outline] ax.plot(xs,ys,'r-') + + # Add a marker at the center to make small agents more visible + try: + if isinstance(obj, AgentState): + # Make different agent types visually distinct + try: + # Try to identify pedestrians + is_pedestrian = False + try: + is_pedestrian = obj.type == AgentEnum.PEDESTRIAN + except: + # If direct comparison fails, try string comparison + try: + is_pedestrian = str(obj.type) == str(AgentEnum.PEDESTRIAN) + except: + # If string comparison fails, try substring match + try: + is_pedestrian = "PEDESTRIAN" in str(obj.type) + except: + # Last resort + is_pedestrian = False + + # Set marker style based on agent type + if is_pedestrian: + marker = 'o' + markersize = 10 + color = 'r' + else: + # Default for other agent types + marker = 's' + markersize = 8 + color = 'b' + + ax.plot(obj.pose.x, obj.pose.y, marker=marker, markersize=markersize, color=color) + + except Exception as e: + # Fallback to basic marker if type identification fails + ax.plot(obj.pose.x, obj.pose.y, 'mo', markersize=8) + print(f"Using basic marker for agent due to error: {str(e)}") + except Exception as e: + print(f"Error adding agent marker: {str(e)}") def plot_vehicle(vehicle : VehicleState, axis_len=0.1, ax=None): """Plots the vehicle in the given axes. The coordinates @@ -227,19 +271,61 @@ def plot_scene(scene : SceneState, xrange=None, yrange=None, ax=None, title = No ax.set_ylim(yrange[0],yrange[1]) else: ax.set_ylim(-yrange*0.5,yrange*0.5) - #plot roadgraph - plot_roadgraph(scene.roadgraph,scene.route,ax=ax) - #plot vehicle and objects - plot_vehicle(scene.vehicle,ax=ax) - for k,a in scene.agents.items(): - plot_object(a,ax=ax) - for k,o in scene.obstacles.items(): - plot_object(o,ax=ax) + + # Plot roadgraph if available + try: + if scene.roadgraph is not None: + plot_roadgraph(scene.roadgraph,scene.route,ax=ax) + except Exception as e: + print(f"Error plotting roadgraph: {str(e)}") + + # Plot vehicle if available + try: + if scene.vehicle is not None: + plot_vehicle(scene.vehicle,ax=ax) + except Exception as e: + print(f"Error plotting vehicle: {str(e)}") + # Fallback to basic marker for vehicle + try: + ax.plot(scene.vehicle.pose.x, scene.vehicle.pose.y, 'bo', markersize=10) + except: + pass + + # Plot agents with careful error handling + try: + if scene.agents: + print(f"Plotting {len(scene.agents)} agents") + for agent_id, agent in scene.agents.items(): + try: + plot_object(agent,ax=ax) + except Exception as agent_e: + print(f"Error plotting agent {agent_id}: {str(agent_e)}") + # Fallback to a simple marker for this agent + try: + ax.plot(agent.pose.x, agent.pose.y, 'ro', markersize=8) + except: + pass + except Exception as e: + print(f"Error plotting agents: {str(e)}") + + # Plot obstacles if available + try: + if scene.obstacles: + for k, o in scene.obstacles.items(): + try: + plot_object(o,ax=ax) + except Exception as e: + print(f"Error plotting obstacle {k}: {str(e)}") + except Exception as e: + print(f"Error plotting obstacles: {str(e)}") + + # Set title if title is None: if show: ax.set_title("Scene at t = %.2f" % scene.t) else: ax.set_title(title) + if show: plt.show(block=False) diff --git a/docs/Gazebo Simulation Documentation.md b/docs/Gazebo Simulation Documentation.md index 0c6c5190b..ba36bc9d2 100644 --- a/docs/Gazebo Simulation Documentation.md +++ b/docs/Gazebo Simulation Documentation.md @@ -25,7 +25,7 @@ Follow the instructions in the linked repo to build and run the Docker container Install the required ROS packages: ```bash -sudo apt-get install -y ros-noetic-ackermann-msgs +sudo apt-get install -y ros-noetic-ackermann-msgs ros-noetic-gazebo-msgs ``` --- @@ -99,4 +99,54 @@ GEMstack/knowledge/defaults/ - `current.yaml` - Default configuration (GEM e4) - `e2.yaml` - GEM e2 configuration +## Entity Detection in Gazebo + +The Gazebo simulation environment supports detection of various types of entities - both agents (pedestrians, vehicles, etc.) and obstacles (traffic cones, barriers, etc.) that can be spawned in the simulation world. + +For detailed information about entity detection, including configuration options, usage examples, and implementation details, see the [Entity Detection Documentation](gazebo_entity_detection.md). + +### Quick Start for Entity Detection + +To enable entity detection in your Gazebo simulation, add the following to your launch file's `gazebo` variant: + +```yaml +drive: + perception: + state_estimation: GNSSStateEstimator # Matches your Gazebo GNSS implementation + agent_detection: + type: agent_detection.GazeboAgentDetector + args: + tracked_model_prefixes: ['pedestrian', 'car', 'bicycle'] + obstacle_detection: + type: obstacle_detection.GazeboObstacleDetector + args: + tracked_obstacle_prefixes: ['cone'] +``` + +#### Configuration Options + +- **Agent Detection**: + - `type`: Specify the detector class (`agent_detection.GazeboAgentDetector`) + - `args`: Additional arguments: + - `tracked_model_prefixes`: Array of prefixes for models to track as agents + +- **Obstacle Detection**: + - `type`: Specify the detector class (`obstacle_detection.GazeboObstacleDetector`) + - `args`: Additional arguments: + - `tracked_obstacle_prefixes`: Array of prefixes for models to track as obstacles + +The prefixes in the configuration arrays define which entities will be tracked. For example: +- A model named `pedestrian1` will be detected as a pedestrian agent +- A model named `cone5` will be detected as a traffic cone obstacle + +You can customize these arrays based on the entities present in your simulation environment. + +### Spawning Entities in Gazebo + +You can spawn both agents and obstacles in Gazebo using a YAML configuration file. + +Follow the instructions in the [POLARIS GEM Simulator](https://github.com/harishkumarbalaji/POLARIS_GEM_Simulator/tree/main) repository to spawn entities in Gazebo. + +The naming conventions for entities are important as they determine how models are detected and classified. Refer to the [Entity Detection Documentation](gazebo_entity_detection.md) for details on model naming and the detection process. + --- diff --git a/docs/gazebo_entity_detection.md b/docs/gazebo_entity_detection.md new file mode 100644 index 000000000..ebcd6b15b --- /dev/null +++ b/docs/gazebo_entity_detection.md @@ -0,0 +1,147 @@ +# Entity Detection in GEMstack + +This module contains implementations for detecting entities (pedestrians, vehicles, obstacles, etc.) in the environment. The available detectors are: + +## Agent Detection + +### OmniscientAgentDetector + +The `OmniscientAgentDetector` works with the basic simulator to obtain agent states that are being simulated. It receives updates from the simulator and maintains a thread-safe dictionary of all detected agents. + +Example usage: + +```python +from onboard.perception.agent_detection import OmniscientAgentDetector +from onboard.interface.gem_simulator import GEMDoubleIntegratorSimulationInterface + +# Create the simulator interface +simulator = GEMDoubleIntegratorSimulationInterface() + +# Create the agent detector with the simulator interface +agent_detector = OmniscientAgentDetector(simulator) + +# Initialize and start +agent_detector.initialize() +simulator.start() + +# Later, when you need agent data: +agent_states = agent_detector.update() +``` + +### GazeboAgentDetector + +The `GazeboAgentDetector` works specifically with the Gazebo simulator, subscribing to the model states topic and converting Gazebo models into `AgentState` objects. It can be configured to track specific model prefixes. + +Example usage: + +```python +from onboard.perception.agent_detection import GazeboAgentDetector +from onboard.interface.gem_gazebo import GEMGazeboInterface + +# Create the Gazebo interface +gazebo_interface = GEMGazeboInterface() + +# Define the model prefixes you want to track as agents +tracked_prefixes = ['pedestrian', 'person', 'car', 'vehicle'] + +# Create the agent detector with the Gazebo interface and the tracked prefixes +agent_detector = GazeboAgentDetector(gazebo_interface, tracked_prefixes) + +# Initialize and start +agent_detector.initialize() +gazebo_interface.start() + +# Later, when you need agent data: +agent_states = agent_detector.update() +``` + +### Agent State Format + +Agent detectors provide `AgentState` objects with the following properties: + +- `pose`: The position and orientation of the agent +- `dimensions`: The physical dimensions (length, width, height) of the agent +- `type`: The type of agent (car, pedestrian, bicyclist, etc.) +- `activity`: The activity state of the agent (stopped, moving, fast) +- `velocity`: The velocity vector in the agent's local frame +- `yaw_rate`: The rate of rotation around the vertical axis + +## Obstacle Detection + +### GazeboObstacleDetector + +The `GazeboObstacleDetector` works with the Gazebo simulator to detect obstacles such as traffic cones. It subscribes to the model states topic and converts Gazebo models into `Obstacle` objects based on specific model prefixes. + +Example usage: + +```python +from onboard.perception.obstacle_detection import GazeboObstacleDetector +from onboard.interface.gem_gazebo import GEMGazeboInterface + +# Create the Gazebo interface +gazebo_interface = GEMGazeboInterface() + +# Define the model prefixes you want to track as obstacles +tracked_obstacle_prefixes = ['cone'] + +# Create the obstacle detector with the Gazebo interface +obstacle_detector = GazeboObstacleDetector(gazebo_interface, tracked_obstacle_prefixes) + +# Initialize and start +obstacle_detector.initialize() +gazebo_interface.start() + +# Later, when you need obstacle data: +obstacle_states = obstacle_detector.update() +``` + +### Obstacle State Format + +Obstacle detectors provide `Obstacle` objects with the following properties: + +- `pose`: The position and orientation of the obstacle +- `dimensions`: The physical dimensions of the obstacle +- `material`: The material type of obstacle (traffic cone, barrier, etc.) +- `state`: The state of the obstacle (standing, left, right) + +For traffic cones in particular, the orientation is analyzed to determine if the cone is: +- `STANDING`: Upright within normal thresholds +- `LEFT`: Tipped to the left side +- `RIGHT`: Tipped to the right side + +## Configuration + +Both detectors can be configured through settings in the configuration file: + +```yaml +simulator: + agent_tracker: + model_prefixes: + - pedestrian + - person + - bicycle + - bike + - car + - vehicle + - truck + rate: 10.0 # Hz - how frequently to process model updates + obstacle_tracker: + model_prefixes: + - cone + rate: 10.0 # Hz - how frequently to process model updates +``` + +The model prefixes are strings that match the beginning of Gazebo model names. For example, a model named "pedestrian1" would match the "pedestrian" prefix, while "cone5" would match the "cone" prefix. + +## Implementation Details + +Both agent and obstacle detection are implemented in the `GEMGazeboInterface` class, which: + +1. Monitors the Gazebo model states +2. Matches model prefixes to determine entity type +3. Transforms coordinates from Gazebo's global frame to the vehicle's START frame +4. Determines agent activity based on velocity or obstacle state based on orientation +5. Creates and maintains appropriate state objects +6. Provides detection through registered callbacks + +Both GEM e2 and GEM e4 vehicle models are supported with proper coordinate transformations. \ No newline at end of file diff --git a/launch/fixed_route.yaml b/launch/fixed_route.yaml index 40e6b9a73..58d83b43b 100644 --- a/launch/fixed_route.yaml +++ b/launch/fixed_route.yaml @@ -38,7 +38,7 @@ log: # If True, then record all readings / commands of the vehicle interface. Default False vehicle_interface : True # Specify which components to record to behavior.json. Default records nothing - components : ['state_estimation','trajectory_tracking'] + components : ['state_estimation','trajectory_tracking', 'agent_detection'] # Specify which components of state to record to state.json. Default records nothing #state: ['all'] # Specify the rate in Hz at which to record state to state.json. Default records at the pipeline's rate @@ -72,8 +72,8 @@ variants: perception: state_estimation : OmniscientStateEstimator agent_detection : OmniscientAgentDetector - visualization: !include "klampt_visualization.yaml" - #visualization: !include "mpl_visualization.yaml" + # visualization: !include "klampt_visualization.yaml" + visualization: [!include "mpl_visualization.yaml", !include "klampt_visualization.yaml"] gazebo: run: mode: simulation @@ -82,7 +82,15 @@ variants: drive: perception: state_estimation: GNSSStateEstimator # Matches your Gazebo GNSS implementation - # visualization: !include "mpl_visualization.yaml" + agent_detection: + type: agent_detection.GazeboAgentDetector + args: + tracked_model_prefixes: ['pedestrian', 'car', 'bicycle'] + obstacle_detection: + type: obstacle_detection.GazeboObstacleDetector + args: + tracked_obstacle_prefixes: ['cone'] + visualization: !include "mpl_visualization.yaml" log_ros: log: ros_topics : !include "../GEMstack/knowledge/defaults/standard_ros_topics.yaml" \ No newline at end of file diff --git a/setup/Dockerfile.cuda11.8 b/setup/Dockerfile.cuda11.8 index fc9a6d185..ca8feb86e 100644 --- a/setup/Dockerfile.cuda11.8 +++ b/setup/Dockerfile.cuda11.8 @@ -68,7 +68,7 @@ COPY requirements.txt /tmp/requirements.txt RUN pip3 install -r /tmp/requirements.txt # Install other Dependencies -RUN apt-get install -y ros-noetic-septentrio-gnss-driver ros-noetic-ackermann-msgs ros-noetic-novatel-gps-msgs +RUN apt-get install -y ros-noetic-septentrio-gnss-driver ros-noetic-ackermann-msgs ros-noetic-novatel-gps-msgs ros-noetic-gazebo-msgs USER ${USER} diff --git a/setup/Dockerfile.cuda12 b/setup/Dockerfile.cuda12 index 49f32c7bf..906d66f7c 100644 --- a/setup/Dockerfile.cuda12 +++ b/setup/Dockerfile.cuda12 @@ -69,7 +69,7 @@ COPY requirements.txt /tmp/requirements.txt RUN pip3 install -r /tmp/requirements.txt # Install other Dependencies -RUN apt-get install -y ros-noetic-septentrio-gnss-driver ros-noetic-ackermann-msgs ros-noetic-novatel-gps-msgs +RUN apt-get install -y ros-noetic-septentrio-gnss-driver ros-noetic-ackermann-msgs ros-noetic-novatel-gps-msgs ros-noetic-gazebo-msgs USER ${USER} diff --git a/setup/setup_this_machine.sh b/setup/setup_this_machine.sh index 80d308118..4617388a8 100755 --- a/setup/setup_this_machine.sh +++ b/setup/setup_this_machine.sh @@ -85,4 +85,4 @@ pip3 install -r $temp_requirements rm $temp_requirements #install other dependencies -sudo apt-get install -y ros-noetic-septentrio-gnss-driver ros-noetic-ackermann-msgs ros-noetic-novatel-gps-msgs \ No newline at end of file +sudo apt-get install -y ros-noetic-septentrio-gnss-driver ros-noetic-ackermann-msgs ros-noetic-novatel-gps-msgs ros-noetic-gazebo-msgs \ No newline at end of file