|
48 | 48 | from .pedestrian_detection_utils import * |
49 | 49 | from ..interface.gem import GEMInterface |
50 | 50 | from ..component import Component |
| 51 | +from AgentTracker import AgentTracker |
| 52 | + |
51 | 53 |
|
52 | 54 | def box_to_fake_agent(box): |
53 | 55 | """Creates a fake agent state from an (x,y,w,h) bounding box. |
@@ -101,6 +103,8 @@ def __init__(self, vehicle_interface : GEMInterface) -> None: |
101 | 103 | # TF listener to get transformation from LiDAR to Camera |
102 | 104 | self.tf_listener = tf.TransformListener() |
103 | 105 |
|
| 106 | + self.agent_tracker = AgentTracker() |
| 107 | + |
104 | 108 | if self.debug: self.init_debug() |
105 | 109 |
|
106 | 110 | def init_debug(self,) -> None: |
@@ -136,21 +140,21 @@ def find_dims(self, clusters: List[List[np.ndarray]]) -> List[np.ndarray]: |
136 | 140 | # Work towards own tracking class instead of simple YOLO track? |
137 | 141 | # Fix division by time |
138 | 142 | # ret: Dict[track_id: vel[x, y, z]] |
139 | | - def find_vels(self, track_ids: List[int], obj_centers: List[np.ndarray]) -> Dict[int, np.ndarray]: |
140 | | - # Object not seen -> velocity = None |
141 | | - track_id_center_map = dict(zip(track_ids, obj_centers)) |
142 | | - vels = defaultdict(lambda: np.array(())) # None is faster, np.array matches other find_ methods. |
143 | | - |
144 | | - for prev_track_id, prev_agent in self.prev_agents.items(): |
145 | | - if prev_track_id in track_ids: |
146 | | - # TODO: Add prev_agents to memory to avoid None velocity |
147 | | - # We should only be missing prev pose on first sight of track_id Agent. |
148 | | - # print("shape 1: ", track_id_center_map[prev_agent.track_id]) |
149 | | - # print("shape 2: ", np.array([prev_agent.pose.x, prev_agent.pose.y, prev_agent.pose.z])) |
150 | | - # prev can be 3 separate Nones, current is just empty array... make this symmetrical |
151 | | - if prev_agent.pose.x and prev_agent.pose.y and prev_agent.pose.z and track_id_center_map[prev_agent.track_id].shape == 3: |
152 | | - vels[prev_track_id] = (track_id_center_map[prev_track_id] - np.array([prev_agent.pose.x, prev_agent.pose.y, prev_agent.pose.z])) / (self.curr_time - self.prev_time) |
153 | | - return vels |
| 143 | + # def find_vels(self, track_ids: List[int], obj_centers: List[np.ndarray]) -> Dict[int, np.ndarray]: |
| 144 | + # # Object not seen -> velocity = None |
| 145 | + # track_id_center_map = dict(zip(track_ids, obj_centers)) |
| 146 | + # vels = defaultdict(lambda: np.array(())) # None is faster, np.array matches other find_ methods. |
| 147 | + |
| 148 | + # for prev_track_id, prev_agent in self.prev_agents.items(): |
| 149 | + # if prev_track_id in track_ids: |
| 150 | + # # TODO: Add prev_agents to memory to avoid None velocity |
| 151 | + # # We should only be missing prev pose on first sight of track_id Agent. |
| 152 | + # # print("shape 1: ", track_id_center_map[prev_agent.track_id]) |
| 153 | + # # print("shape 2: ", np.array([prev_agent.pose.x, prev_agent.pose.y, prev_agent.pose.z])) |
| 154 | + # # prev can be 3 separate Nones, current is just empty array... make this symmetrical |
| 155 | + # if prev_agent.pose.x and prev_agent.pose.y and prev_agent.pose.z and track_id_center_map[prev_agent.track_id].shape == 3: |
| 156 | + # vels[prev_track_id] = (track_id_center_map[prev_track_id] - np.array([prev_agent.pose.x, prev_agent.pose.y, prev_agent.pose.z])) / (self.curr_time - self.prev_time) |
| 157 | + # return vels |
154 | 158 |
|
155 | 159 |
|
156 | 160 | # TODO: Separate debug/viz class, bbox and 2d 3d points funcs |
@@ -231,25 +235,37 @@ def update_object_states(self, track_result: List[Results], extracted_pts_all: L |
231 | 235 | # Separate numpy prob still faster for now |
232 | 236 | obj_centers = self.find_centers(pedestrians_3d_pts) |
233 | 237 | obj_dims = self.find_dims(pedestrians_3d_pts) |
234 | | - obj_vels = self.find_vels(track_ids, obj_centers) |
| 238 | + |
| 239 | + print("obj centers shape:") |
| 240 | + print(obj_centers.shape) |
| 241 | + |
| 242 | + print("obj dims shape: ") |
| 243 | + print(obj_dims.shape) |
| 244 | + |
| 245 | + # Assign id's based on whether or not agents overlap: |
| 246 | + # self.current_agents = self.agent_tracker.assign_ids(self.prev_agents, obj_centers, obj_dims) |
| 247 | + |
| 248 | + |
| 249 | + |
| 250 | + # obj_vels = self.find_vels(track_ids, obj_centers) |
235 | 251 |
|
236 | 252 | # Update Current AgentStates |
237 | | - for ind in range(num_objs): |
238 | | - obj_center = (None, None, None) if obj_centers[ind].size == 0 else obj_centers[ind] |
239 | | - obj_dim = (None, None, None) if obj_dims[ind].size == 0 else obj_dims[ind] |
240 | | - self.current_agents[track_ids[ind]] = ( |
241 | | - AgentState( |
242 | | - track_id = track_ids[ind], |
243 | | - pose=ObjectPose(t=0, x=obj_center[0], y=obj_center[1], z=obj_center[2] ,yaw=0,pitch=0,roll=0,frame=ObjectFrameEnum.CURRENT), |
244 | | - # (l, w, h) |
245 | | - # TODO: confirm (z -> l, x -> w, y -> h) |
246 | | - dimensions=(obj_dim[0], obj_dim[1], obj_dim[2]), |
247 | | - outline=None, |
248 | | - type=AgentEnum.PEDESTRIAN, |
249 | | - activity=AgentActivityEnum.MOVING, |
250 | | - velocity= None if obj_vels[track_ids[ind]].size == 0 else tuple(obj_vels[track_ids[ind]]), |
251 | | - yaw_rate=0 |
252 | | - )) |
| 253 | + # for ind in range(num_objs): |
| 254 | + # obj_center = (None, None, None) if obj_centers[ind].size == 0 else obj_centers[ind] |
| 255 | + # obj_dim = (None, None, None) if obj_dims[ind].size == 0 else obj_dims[ind] |
| 256 | + # self.current_agents[track_ids[ind]] = ( |
| 257 | + # AgentState( |
| 258 | + # track_id = track_ids[ind], |
| 259 | + # pose=ObjectPose(t=0, x=obj_center[0], y=obj_center[1], z=obj_center[2] ,yaw=0,pitch=0,roll=0,frame=ObjectFrameEnum.CURRENT), |
| 260 | + # # (l, w, h) |
| 261 | + # # TODO: confirm (z -> l, x -> w, y -> h) |
| 262 | + # dimensions=(obj_dim[0], obj_dim[1], obj_dim[2]), |
| 263 | + # outline=None, |
| 264 | + # type=AgentEnum.PEDESTRIAN, |
| 265 | + # activity=AgentActivityEnum.MOVING, |
| 266 | + # velocity= None if obj_vels[track_ids[ind]].size == 0 else tuple(obj_vels[track_ids[ind]]), |
| 267 | + # yaw_rate=0 |
| 268 | + # )) |
253 | 269 |
|
254 | 270 | def ouster_oak_callback(self, rgb_image_msg: Image, lidar_pc2_msg: PointCloud2): |
255 | 271 | # Convert to cv2 image and run detector |
|
0 commit comments