diff --git a/src/software/ai/hl/stp/play/play.cpp b/src/software/ai/hl/stp/play/play.cpp index bdeccf8b35..a0cb613102 100644 --- a/src/software/ai/hl/stp/play/play.cpp +++ b/src/software/ai/hl/stp/play/play.cpp @@ -298,6 +298,10 @@ Play::assignTactics(const WorldPtr &world_ptr, TacticVector tactic_vector, // "jobs" (the Tactics). Matrix matrix(num_rows, num_cols); + std::map prev_assignments; + std::map curr_assignments; + if (!prev_assignments.empty()) prev_assignments = assignments_cache.back(); + // Initialize the matrix with the cost of assigning each Robot to each Tactic for (size_t row = 0; row < num_rows; row++) { @@ -305,12 +309,33 @@ Play::assignTactics(const WorldPtr &world_ptr, TacticVector tactic_vector, { Robot robot = robots_to_assign.at(row); std::shared_ptr tactic = tactic_vector.at(col); + PreviousAssignment current_assignment; + current_assignment.previous_tactic = tactic; + current_assignment.penality_cost = static_cast(8); + auto primitives = primitive_sets.at(col); CHECK(primitives.contains(robot.id())) << "Couldn't find a primitive for robot id " << robot.id(); double robot_cost_for_tactic = primitives.at(robot.id())->getEstimatedPrimitiveCost(); + + if (!prev_assignments.empty() && prev_assignments.contains(robot.id()) && tactic) + { + const PreviousAssignment& assignment = prev_assignments.at(robot.id()); + if (typeid(*assignment.previous_tactic) == typeid(*tactic)) + // TODO: Decrement/increment the penalty + current_assignment.penality_cost = std::max(current_assignment.penality_cost, assignment.penality_cost / 2); + else + { + // TODO: Apply the penalty + robot_cost_for_tactic += assignment.penality_cost; + current_assignment.penality_cost = assignment.penality_cost * 2; + } + } + + curr_assignments[robot.id()] = current_assignment; + std::set required_capabilities = tactic->robotCapabilityRequirements(); std::set robot_capabilities = @@ -335,6 +360,12 @@ Play::assignTactics(const WorldPtr &world_ptr, TacticVector tactic_vector, } } + if (assignments_cache.size() > ASSIGNMENTS_CACHE_MAX_SIZE) + { + assignments_cache.pop_front(); + } + + assignments_cache.push_back(curr_assignments); // Apply the Munkres/Hungarian algorithm to the matrix. Munkres m; m.solve(matrix); diff --git a/src/software/ai/hl/stp/play/play.h b/src/software/ai/hl/stp/play/play.h index b66f240b05..ed1f0cba0e 100644 --- a/src/software/ai/hl/stp/play/play.h +++ b/src/software/ai/hl/stp/play/play.h @@ -191,4 +191,30 @@ class Play uint64_t sequence_number = 0; RobotNavigationObstacleFactory obstacle_factory; + + static constexpr uint32_t ASSIGNMENTS_CACHE_MAX_SIZE = 5; + + + struct PreviousAssignment + { + uint32_t penality_cost; + std::shared_ptr previous_tactic; + }; + + std::deque> assignments_cache; + + /** + * 1) matrix of each tactic + * 2) Robot w/ vector of prev tactics + costs (prune if out of threshold) + * 3) + * + * Info: + * - robot_cost_for_tactic for each robot + * + * Penalty Algorithms + * - give the same tactic a constant + * - Exponential punishment for matching tactics + * - Convolutions as a function of costs over time? For the given tactic? robot? + */ + }; diff --git a/src/software/ai/hl/stp/tactic/receiver/receiver_fsm.cpp b/src/software/ai/hl/stp/tactic/receiver/receiver_fsm.cpp index 216f5293f5..a4a53c875b 100644 --- a/src/software/ai/hl/stp/tactic/receiver/receiver_fsm.cpp +++ b/src/software/ai/hl/stp/tactic/receiver/receiver_fsm.cpp @@ -177,6 +177,15 @@ void ReceiverFSM::adjustReceive(const Update& event) TbotsProto::DribblerMode::MAX_FORCE, TbotsProto::BallCollisionType::ALLOW, AutoChipOrKick{AutoChipOrKickMode::OFF, 0})); } + else + { + event.common.set_primitive(std::make_unique( + event.common.robot, robot_pos, event.common.robot.orientation(), + TbotsProto::MaxAllowedSpeedMode::PHYSICAL_LIMIT, + TbotsProto::ObstacleAvoidanceMode::AGGRESSIVE, + TbotsProto::DribblerMode::MAX_FORCE, TbotsProto::BallCollisionType::ALLOW, + AutoChipOrKick{AutoChipOrKickMode::OFF, 0})); + } } bool ReceiverFSM::passStarted(const Update& event)