@@ -198,10 +198,16 @@ def state_to_polygon(self, state):
198198
199199 return temp_obj .polygon ()
200200
201- def generate_action_set ():
201+ # def generate_action_set(max_vel, max_turn_rate):
202+ # return [
203+ # (.75, -0.3), (.75, 0.0), (.75, 0.3),
204+ # (-.75, -0.3), (-.75, 0.0), (-.75, 0.3)
205+ # ]
206+
207+ def generate_action_set (max_vel , max_turn_rate ):
202208 return [
203- (.75 , - 0.3 ), (.75 , 0.0 ), (.75 , 0.3 ),
204- (- .75 , - 0.3 ), (- .75 , 0.0 ), (- .75 , 0.3 )
209+ (max_vel , - max_turn_rate ), (max_vel , 0.0 ), (max_vel , max_turn_rate ),
210+ (- max_vel , - max_turn_rate ), (- max_vel , 0.0 ), (- max_vel , max_turn_rate )
205211 ]
206212class ParkingSolverFirstOrderDubins (AStar ):
207213 """sample use of the astar algorithm. In this exemple we work on a maze made of ascii characters,
@@ -212,11 +218,17 @@ def __init__(self, vehicle=None, obstacles=None, actions=None):
212218
213219 # Vehicle model
214220 self ._vehicle = None
221+
222+ # settings
223+ self .T = settings .get ("planning.dubins.integrator.time_step" , 1.5 )
224+ self .dt = settings .get ("planning.dubins.integrator.time_step" , .25 )
225+ self .max_v = settings .get ("planning.dubins.actions.max_velocity" , 0.75 )
226+ self .max_turn_rate = settings .get ("planning.dubins.actions.max_turn_rate" , 0.3 )
215227
216228 self .vehicle = DubinsCar () #x = (tx,ty,theta) and u = (fwd_velocity,turnRate).
217- self .vehicle_sim = DubinsCarIntegrator (self .vehicle , 1.5 , 0.25 )
229+ self .vehicle_sim = DubinsCarIntegrator (self .vehicle , self . T , self . dt )
218230 #@TODO create a more standardized way to define the actions
219- self ._actions = generate_action_set ()
231+ self ._actions = generate_action_set (self . max_v , self . max_turn_rate )
220232
221233
222234 @property
@@ -420,6 +432,8 @@ def __init__(self):
420432 # self.planner = ParkingSolverSecondOrderDubins()
421433 self .planner = ParkingSolverFirstOrderDubins ()
422434
435+ self .iterations = settings .get ("planning.astar.iterations" , 20000 )
436+
423437 def state_inputs (self ):
424438 return ['all' ]
425439
@@ -481,8 +495,12 @@ def update(self, state : AllState) -> Route:
481495 all_obstacles = {** agents , ** obstacles }
482496 # print(f"Obstacles {obstacles}")
483497 print (f"Agents { agents } " )
484- route = state .route
485- goal_pose = ObjectPose (frame = ObjectFrameEnum .ABSOLUTE_CARTESIAN , t = 15 , x = state .mission_plan .goal_x ,y = state .mission_plan .goal_y ,z = 0 ,yaw = state .mission_plan .goal_orientation )
498+ # goal= state.goal
499+ # print(goal.frame)
500+ # assert goal.frame == ObjectFrameEnum.ABSOLUTE_CARTESIAN
501+ # assert goal.v == 0
502+ # print(f"Goal {goal}")
503+ goal_pose = ObjectPose (frame = ObjectFrameEnum .ABSOLUTE_CARTESIAN , t = 0.0 , x = state .mission_plan .goal_x ,y = state .mission_plan .goal_y ,z = 0.0 ,yaw = state .mission_plan .goal_orientation )
486504 goal = VehicleState .zero ()
487505 goal .pose = goal_pose
488506 goal .v = 0
@@ -500,7 +518,7 @@ def update(self, state : AllState) -> Route:
500518 self .planner .vehicle = vehicle
501519
502520 # Compute the new trajectory and return it
503- res = list (self .planner .astar (start_state , goal_state , reversePath = False , iterations = 10000 ))
521+ res = list (self .planner .astar (start_state , goal_state , reversePath = False , iterations = self . iterations ))
504522 # points = [state[:2] for state in res] # change here to return the theta as well
505523 points = []
506524 for state in res :
0 commit comments