Skip to content

Commit 36a720f

Browse files
committed
Implemented kalman filtering
Updated velocity estimation to use kalman filtering
1 parent 8cb4102 commit 36a720f

2 files changed

Lines changed: 49 additions & 41 deletions

File tree

GEMstack/state/agent.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from .physical_object import ObjectFrameEnum,ObjectPose,PhysicalObject,convert_vector
55
from enum import Enum
66
from typing import Tuple
7+
import numpy as np
78

89
class AgentEnum(Enum):
910
CAR = 0
@@ -27,6 +28,7 @@ class AgentState(PhysicalObject):
2728
activity : AgentActivityEnum
2829
velocity : Tuple[float,float,float] #estimated velocity in x,y,z, m/s and in agent's local frame
2930
yaw_rate : float #estimated yaw rate, in radians/s
31+
covariance : np.ndarray #covariance_matrix used in Kalman filtering
3032

3133
def velocity_local(self) -> Tuple[float,float,float]:
3234
"""Returns velocity in m/s in the agent's local frame."""

homework/pedestrian_detection.py

Lines changed: 47 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
import cv2
66
from typing import Dict
77
import open3d as o3d
8+
from numpy import linalg as la
89
import numpy as np
910
from sklearn.cluster import DBSCAN
1011
from scipy.spatial.transform import Rotation as R
@@ -13,6 +14,7 @@
1314
import sensor_msgs.point_cloud2 as pc2
1415
import struct, ctypes
1516
from message_filters import Subscriber, ApproximateTimeSynchronizer
17+
from typing import Tuple
1618

1719
# ----- Helper Functions -----
1820
def match_existing_pedestrian(
@@ -37,16 +39,52 @@ def match_existing_pedestrian(
3739

3840
return best_agent_id
3941

40-
def compute_velocity(old_pose: ObjectPose, new_pose: ObjectPose, dt: float) -> tuple:
42+
# def compute_velocity(old_pose: ObjectPose, new_pose: ObjectPose, dt: float) -> tuple:
43+
# """
44+
# Returns a (vx, vy, vz) velocity in the same frame as old_pose/new_pose.
45+
# """
46+
# if dt <= 0:
47+
# return (0, 0, 0)
48+
# vx = (new_pose.x - old_pose.x) / dt
49+
# vy = (new_pose.y - old_pose.y) / dt
50+
# vz = (new_pose.z - old_pose.z) / dt
51+
# return (vx, vy, vz)
52+
53+
def update_velocity(old_pose: ObjectPose, new_pose: ObjectPose, old_velocity: tuple, old_covariance: np.ndarray, dt: float, process_noise: float = 1e-4, meas_noise: float = .1) -> Tuple[tuple, np.ndarray]:
4154
"""
42-
Returns a (vx, vy, vz) velocity in the same frame as old_pose/new_pose.
55+
Returns a (vx, vy, vz) velocity and updates covariance matrix P calculated using Kalman filtering.
4356
"""
4457
if dt <= 0:
45-
return (0, 0, 0)
58+
return ((0, 0, 0), old_covariance)
59+
60+
x = np.array(old_velocity).reshape(3, 1)
61+
F = np.eye(3)
62+
H = np.eye(3)
63+
Q = np.eye(3) * process_noise
64+
R = np.eye(3) * meas_noise
65+
P = old_covariance
66+
67+
#Predict
68+
x_pred = F @ x
69+
P_pred = F @ P @ F.T + Q
70+
71+
#Kalman gain
72+
S = H @ P @ H.T + R
73+
K = P_pred @ H.T @ np.la.inv(S)
74+
75+
#Update
4676
vx = (new_pose.x - old_pose.x) / dt
4777
vy = (new_pose.y - old_pose.y) / dt
4878
vz = (new_pose.z - old_pose.z) / dt
49-
return (vx, vy, vz)
79+
80+
z = np.array([vx, vy, vz])
81+
y = z - H @ x_pred
82+
x_new = x_pred + K @ y
83+
P_new = (np.eye(3) - K @ H) @ P_pred
84+
85+
updated_velocity = tuple(x_new.flatten())
86+
return (updated_velocity, P_new)
87+
5088

5189
def extract_roi_box(lidar_pc, center, half_extents):
5290
lower = center - half_extents
@@ -188,17 +226,12 @@ class PedestrianDetector2D(Component):
188226

189227
def __init__(self, vehicle_interface: GEMInterface):
190228
self.vehicle_interface = vehicle_interface
191-
<<<<<<< HEAD
192229
self.current_agents = {}
193230
self.tracked_agents = {}
194231
self.pedestrian_counter = 0
195232
# Variables to store synchronized sensor data:
196233
self.latest_image = None
197234
self.latest_lidar = None
198-
=======
199-
self.detector = YOLO('../../knowledge/detection/yolov8n.pt')
200-
self.last_person_boxes = []
201-
>>>>>>> af6089cb57e0b929e85028039fb16c4d4e151ddb
202235

203236
def rate(self) -> float:
204237
return 4.0
@@ -210,7 +243,6 @@ def state_outputs(self) -> list:
210243
return ['agents']
211244

212245
def initialize(self):
213-
<<<<<<< HEAD
214246
# Instead of individual subscriptions, use message_filters to synchronize
215247
self.rgb_sub = Subscriber('/oak/rgb/image_raw', Image)
216248
self.lidar_sub = Subscriber('/ouster/points', PointCloud2)
@@ -310,34 +342,6 @@ def update(self, vehicle: VehicleState) -> Dict[str, AgentState]:
310342
1.0])
311343
refined_center_vehicle_hom = self.T_l2v @ refined_center_lidar_hom
312344
refined_center_vehicle = refined_center_vehicle_hom[:3]
313-
=======
314-
#tell the vehicle to use image_callback whenever 'front_camera' gets a reading, and it expects images of type cv2.Mat
315-
self.vehicle_interface.subscribe_sensor('front_camera',self.image_callback,cv2.Mat)
316-
pass
317-
318-
def image_callback(self, image : cv2.Mat):
319-
detection_result = self.detector(image)
320-
self.last_person_boxes = []
321-
for result in detection_result:
322-
for box in result.boxes:
323-
if int(box.cls[0]) == 0: # check if bounding box is a person
324-
x, y, w, h = box.xywh[0].int().tolist()
325-
self.last_person_boxes.append((x, y, w, h))
326-
#uncomment if you want to debug the detector...
327-
#for bb in self.last_person_boxes:
328-
# x,y,w,h = bb
329-
# cv2.rectangle(image, (int(x-w/2), int(y-h/2)), (int(x+w/2), int(y+h/2)), (255, 0, 255), 3)
330-
#cv2.imwrite("pedestrian_detections.png",image)
331-
332-
def update(self, vehicle : VehicleState) -> Dict[str,AgentState]:
333-
res = {}
334-
for i,b in enumerate(self.last_person_boxes):
335-
x,y,w,h = b
336-
res['pedestrian'+str(i)] = box_to_fake_agent(b)
337-
if len(res) > 0:
338-
print("Detected",len(res),"pedestrians")
339-
return res
340-
>>>>>>> af6089cb57e0b929e85028039fb16c4d4e151ddb
341345

342346
R_vehicle = self.T_l2v[:3, :3] @ R_lidar
343347
euler_angles_vehicle = R.from_matrix(R_vehicle).as_euler('zyx', degrees=False)
@@ -370,7 +374,7 @@ def update(self, vehicle : VehicleState) -> Dict[str,AgentState]:
370374
if existing_id is not None:
371375
old_agent_state = self.tracked_agents[existing_id]
372376
dt = new_pose.t - old_agent_state.pose.t
373-
vx, vy, vz = compute_velocity(old_agent_state.pose, new_pose, dt)
377+
(vx, vy, vz), P = update_velocity(old_agent_state.pose, new_pose, old_agent_state.velocity, old_agent_state.covariance, dt)
374378

375379
updated_agent = AgentState(
376380
pose=new_pose,
@@ -379,7 +383,8 @@ def update(self, vehicle : VehicleState) -> Dict[str,AgentState]:
379383
type=AgentEnum.PEDESTRIAN,
380384
activity=AgentActivityEnum.MOVING,
381385
velocity=(vx, vy, vz),
382-
yaw_rate=0
386+
yaw_rate=0,
387+
covariance = P
383388
)
384389
agents[existing_id] = updated_agent
385390
self.tracked_agents[existing_id] = updated_agent
@@ -394,7 +399,8 @@ def update(self, vehicle : VehicleState) -> Dict[str,AgentState]:
394399
type=AgentEnum.PEDESTRIAN,
395400
activity=AgentActivityEnum.MOVING,
396401
velocity=(0, 0, 0),
397-
yaw_rate=0
402+
yaw_rate=0,
403+
covariance = np.eye(3)
398404
)
399405
agents[agent_id] = new_agent
400406
self.tracked_agents[agent_id] = new_agent

0 commit comments

Comments
 (0)