diff --git a/.vscode/settings.json b/.vscode/settings.json index 935e249..4068b73 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -65,6 +65,7 @@ "gson", "Holonomic", "Jetson", + "multitag", "PATHFIND", "Pathfinding", "Pathfinds", diff --git a/src/main/java/frc/robot/Constants.java b/src/main/java/frc/robot/Constants.java index 2c51cc7..7777aec 100644 --- a/src/main/java/frc/robot/Constants.java +++ b/src/main/java/frc/robot/Constants.java @@ -519,7 +519,6 @@ public static class VisionOdometryConstants { Units.inchesToMeters(9.725), Units.inchesToMeters(20.25)); - long num = 999_999_999; public static final Vector LEFT_SINGLE_TAG_STANDARD_DEVIATION = VecBuilder.fill(4, 4, 8); public static final Vector LEFT_MULTI_TAG_STANDARD_DEVIATION = VecBuilder.fill(0.5, 0.5, 1); diff --git a/src/main/java/frc/robot/subsystems/vision/PhotonVisionOdometry.java b/src/main/java/frc/robot/subsystems/vision/PhotonVisionOdometry.java index c5d54da..b97afad 100644 --- a/src/main/java/frc/robot/subsystems/vision/PhotonVisionOdometry.java +++ b/src/main/java/frc/robot/subsystems/vision/PhotonVisionOdometry.java @@ -2,14 +2,17 @@ import edu.wpi.first.apriltag.AprilTagFieldLayout; import edu.wpi.first.apriltag.AprilTagFields; +import edu.wpi.first.math.Matrix; import edu.wpi.first.math.geometry.Pose2d; import edu.wpi.first.math.geometry.Pose3d; import edu.wpi.first.math.geometry.Transform2d; - +import edu.wpi.first.math.kinematics.ChassisSpeeds; +import edu.wpi.first.math.numbers.N1; +import edu.wpi.first.math.numbers.N3; import edu.wpi.first.wpilibj.smartdashboard.Field2d; -import edu.wpi.first.wpilibj.smartdashboard.SmartDashboard; import frc.robot.Robot; import frc.robot.util.vision.Cameras; +import frc.robot.util.vision.VisionObservation; import java.awt.Desktop; import java.util.ArrayList; @@ -21,6 +24,7 @@ import org.photonvision.simulation.VisionSystemSim; import org.photonvision.targeting.PhotonPipelineResult; import org.photonvision.targeting.PhotonTrackedTarget; + import swervelib.SwerveDrive; import swervelib.telemetry.SwerveDriveTelemetry; @@ -31,6 +35,11 @@ */ public class PhotonVisionOdometry { + // Tuning constants for cross-camera consensus and multitag filtering. + private static final double CONSENSUS_MAX_TRANSLATION_METERS = 0.6; + private static final double CONSENSUS_MAX_ROTATION_RADIANS = Math.toRadians(15.0); + private static final double STRONG_MULTITAG_MAX_DISTANCE_METERS = 4.0; + /** * April Tag Field Layout of the year. */ @@ -102,42 +111,356 @@ public static Pose2d getAprilTagPose(int aprilTag, Transform2d robotOffset) { } - /** - * Update the pose estimation inside of {@link SwerveDrive} with all of the - * given poses. - * - * @param swerveDrive {@link SwerveDrive} instance. + /* + * POSITION ESTIMATION + */ +/** + * Computes the average distance from the camera to each AprilTag used in a pose estimate. + * + *

This is used as a rough quality metric for the estimate. In general, estimates built + * from closer tags are more trustworthy than estimates built from distant tags, since distance + * tends to amplify vision noise and pose uncertainty. + * + * @param est the estimated robot pose containing the list of AprilTag targets used + * @return the average camera-to-tag distance in meters, or {@link Double#POSITIVE_INFINITY} + * if no targets were used + */ +private double averageTagDistance(EstimatedRobotPose est) { + // If no tags contributed to this estimate, treat the distance as effectively unusable. + if (est.targetsUsed == null || est.targetsUsed.isEmpty()) return Double.POSITIVE_INFINITY; + + double sum = 0.0; + + /* + * Sum the Euclidean distance from the camera to every tag that contributed + * to this pose estimate. + */ + for (var target : est.targetsUsed) { + sum += target.getBestCameraToTarget().getTranslation().getNorm(); + } + + // Return the mean distance across all contributing tags. + return sum / est.targetsUsed.size(); +} + +/** + * Finds the highest pose ambiguity among all AprilTags used in a pose estimate. + * + *

Pose ambiguity is a PhotonVision-provided measure describing how uncertain a tag-based + * pose solve is. A larger ambiguity generally indicates a less trustworthy estimate. + * This method uses the worst contributing tag as a conservative quality metric. + * + * @param est the estimated robot pose containing the list of AprilTag targets used + * @return the maximum ambiguity value across all used tags, or {@code 1.0} if no targets were used + */ +private double maxAmbiguity(EstimatedRobotPose est) { + // If no tags were used, return a pessimistic ambiguity value. + if (est.targetsUsed == null || est.targetsUsed.isEmpty()) return 1.0; + + double max = 0.0; + + /* + * Track the worst ambiguity among all tags contributing to this estimate. + * Using the maximum ambiguity is intentionally conservative. */ - public void updatePoseEstimation(SwerveDrive swerveDrive) { - if (SwerveDriveTelemetry.isSimulation && swerveDrive.getSimulationDriveTrainPose().isPresent()) { + for (var target : est.targetsUsed) { + max = Math.max(max, target.getPoseAmbiguity()); + } + + return max; +} + +/** + * Determines whether two robot poses agree closely enough to be considered consistent. + * + *

Agreement is based on both translation and rotation: + *

+ * + * @param posA the first pose + * @param posB the second pose + * @param maxXY the maximum allowed translational difference in meters + * @param maxThetaRad the maximum allowed rotational difference in radians + * @return {@code true} if the poses are within both tolerances, otherwise {@code false} + */ +private boolean agrees(Pose2d posA, Pose2d posB, double maxXY, double maxThetaRad) { + return posA.getTranslation().getDistance(posB.getTranslation()) < maxXY + && Math.abs(posA.getRotation().minus(posB.getRotation()).getRadians()) < maxThetaRad; +} + +/** + * Filters a list of vision observations by requiring cross-camera agreement or strong multitag support. + * + *

An observation is accepted if either: + *

+ * + *

This helps reject outliers from single-camera failures while still allowing good multitag + * estimates to pass through even if no other camera currently agrees. + * + * @param obs the raw list of vision observations + * @return a filtered list containing only observations that passed consensus checks + */ +private List filterByConsensus(List obs) { + List accepted = new ArrayList<>(); + + /* + * Compare every observation against every other observation. + * If another camera agrees with it, it gains support and is more likely to be valid. + */ + for (VisionObservation obsA : obs) { + int agreements = 0; + + for (VisionObservation obsB : obs) { + // Skip comparing an observation to itself. + if (obsA == obsB) continue; + /* - * In the maple-sim, odometry is simulated using encoder values, accounting for - * factors like skidding and drifting. - * As a result, the odometry may not always be 100% accurate. - * However, the vision system should be able to provide a reasonably accurate - * pose estimation, even when odometry is incorrect. - * (This is why teams implement vision system to correct odometry.) - * Therefore, we must ensure that the actual robot pose is provided in the - * simulator when updating the vision simulation during the simulation. + * Count how many other observations are spatially consistent with this one. + * The thresholds here define what "agreement" means across cameras. */ - visionSim.update(swerveDrive.getSimulationDriveTrainPose().get()); + if (agrees( + obsA.pose, + obsB.pose, + CONSENSUS_MAX_TRANSLATION_METERS, + CONSENSUS_MAX_ROTATION_RADIANS)) { + agreements++; + } + } + + /* + * Keep the observation if: + * 1. At least one other camera agrees with it, indicating cross-camera consensus, or + * 2. It is a strong multitag result, which is generally more trustworthy on its own. + */ + if (agreements > 0 + || (obsA.isMultiTag + && obsA.tagCount >= 2 + && obsA.avgTagDistance < STRONG_MULTITAG_MAX_DISTANCE_METERS)) { + accepted.add(obsA); } - for (Cameras camera : Cameras.values()) { - Optional poseEst = getEstimatedGlobalPose(camera); - if (poseEst.isPresent()) { - - var pose = poseEst.get(); - swerveDrive.addVisionMeasurement( - pose.estimatedPose.toPose2d(), - pose.timestampSeconds, - camera.curStdDevs); - - SmartDashboard.putNumber("VisionX", pose.estimatedPose.getX()); - SmartDashboard.putNumber("VisionY", pose.estimatedPose.getY()); + } + + return accepted; +} + +/** + * Determines whether a vision measurement should be accepted given the current robot state. + * + *

Robot motion and odometry updates are actively occurring, so blindly accepting large pose + * jumps can destabilize localization. This method allows: + *

+ * + * @param obs the vision observation being considered + * @param currentPose the robot's current estimated pose from odometry / pose estimation + * @param speeds the robot's current chassis speeds + * @param supportCount the number of accepted observations supporting the current vision update cycle + * @return {@code true} if the measurement should be applied, otherwise {@code false} + */ +private boolean shouldAcceptEnabledMeasurement( + VisionObservation obs, + Pose2d currentPose, + ChassisSpeeds speeds, + int supportCount) { + + // Compute translational and angular error between the current estimate and vision. + double translationalError = + currentPose.getTranslation().getDistance(obs.pose.getTranslation()); + double headingError = + Math.abs(currentPose.getRotation().minus(obs.pose.getRotation()).getRadians()); + + // Use planar speed magnitude to scale allowable positional correction. + double speed = Math.hypot(speeds.vxMetersPerSecond, speeds.vyMetersPerSecond); + + /* + * Base tolerance: + * - Allow larger translational jumps when the robot is moving faster, since odometry can drift + * more under motion. + * - Keep heading jump tolerance fixed for now. + */ + double maxTranslationJump = 0.5 + 0.35 * speed; + double maxHeadingJump = Math.toRadians(20); + + /* + * Define conditions under which a larger-than-normal correction is acceptable: + * - strongVision: multitag solution with reasonably close tags + * - hasSupport: multiple accepted observations this cycle + */ + boolean strongVision = obs.isMultiTag && obs.avgTagDistance < 4.0; + boolean hasSupport = supportCount >= 2; + + // Accept small, ordinary corrections immediately. + if (translationalError < maxTranslationJump && headingError < maxHeadingJump) { + return true; + } + + /* + * Otherwise, only accept the correction if the vision data is especially trustworthy + * or multiple observations support it. + */ + return strongVision || hasSupport; +} + +/** + * Updates the robot pose estimator with valid vision measurements from all configured cameras. + * + *

This method performs the following high-level steps: + *

    + *
  1. Updates the vision simulator during simulation
  2. + *
  3. Collects pose estimates from each camera
  4. + *
  5. Builds {@link VisionObservation} objects with quality metadata
  6. + *
  7. Filters out inconsistent observations using cross-camera consensus
  8. + *
  9. Falls back to multitag-only observations if consensus fails
  10. + *
  11. Applies accepted measurements to the swerve drive pose estimator
  12. + *
+ * + *

The goal is to make vision updates robust against bad individual camera measurements while + * still allowing strong observations to correct odometry drift. + * + * @param swerveDrive the {@link SwerveDrive} instance whose pose estimator should be updated + */ +public void updatePoseEstimation(SwerveDrive swerveDrive) { + if (SwerveDriveTelemetry.isSimulation && swerveDrive.getSimulationDriveTrainPose().isPresent()) { + /* + * In simulation, the drivetrain pose used by the simulator represents the "ground truth" + * position of the robot. + * + * Simulated odometry may still include effects such as skidding or drift, meaning the pose + * estimator's internal odometry can deviate from truth just like on a real robot. + * + * Since vision exists specifically to correct odometry drift, the vision simulator should + * be updated using the true simulated robot pose rather than the potentially drifted estimate. + */ + visionSim.update(swerveDrive.getSimulationDriveTrainPose().get()); + } + + // Read the robot's current estimated pose and velocity for gating future measurements. + Pose2d currentPose = swerveDrive.getPose(); + ChassisSpeeds speeds = swerveDrive.getRobotVelocity(); + + List observations = new ArrayList<>(); + + /* + * Query every configured camera for a global pose estimate. + * Each valid estimate is converted into a VisionObservation containing + * pose data plus metadata used for filtering and trust evaluation. + */ + for (Cameras camera : Cameras.values()) { + Optional poseEst = getEstimatedGlobalPose(camera); + + // Skip cameras that do not currently have a valid estimate. + if (!poseEst.isPresent()) continue; + + EstimatedRobotPose estPos = poseEst.get(); + Pose2d estPose2d = estPos.estimatedPose.toPose2d(); + + // Determine how many tags contributed to this estimate. + int tagsUsed = estPos.targetsUsed == null ? 0 : estPos.targetsUsed.size(); + + // Ignore estimates that were not built from any tags. + if (tagsUsed == 0) continue; + + // Compute quality metrics used later for filtering and acceptance decisions. + double avgDist = averageTagDistance(estPos); + double maxTagAmbiguity = maxAmbiguity(estPos); + boolean isMultiTag = tagsUsed >= 2; + Matrix currStdDevs = camera.curStdDevs; + + // Reject highly ambiguous estimates before they enter the consensus pipeline. + if (maxTagAmbiguity > maximumAmbiguity) { + continue; + } + + /* + * Store a normalized observation record for later filtering. + * This keeps the later pipeline cleaner by collecting all relevant + * metadata in one place. + */ + observations.add(new VisionObservation( + camera, + estPos, + estPose2d, + estPos.timestampSeconds, + tagsUsed, + avgDist, + maxTagAmbiguity, + isMultiTag, + currStdDevs + )); + } + + // Nothing to do if no camera produced a usable observation. + if (observations.isEmpty()) return; + + // First-pass rejection of outliers using cross-camera agreement. + List filtered = filterByConsensus(observations); + + if (filtered.isEmpty()) { + /* + * If no observations achieved consensus, fall back to multitag-only observations. + * This gives strong single-camera multitag estimates a chance to still correct pose. + */ + filtered = observations.stream() + .filter(o -> o.isMultiTag) + .toList(); + } + + // If nothing survived filtering or fallback, skip this update cycle entirely. + if (filtered.isEmpty()) { + return; + } + + /* + * Compute per-observation support: for each observation, count how many other + * observations have a sufficiently similar pose (i.e., they agree). This avoids + * treating all multitag observations as mutually supporting when they disagree. + */ + int n = filtered.size(); + List supportCounts = new ArrayList<>(n); + for (int i = 0; i < n; i++) { + // Start each observation with a support count of 1 (itself). + supportCounts.add(1); + } + + // Thresholds for considering two observations to be in agreement. + final double translationEpsMeters = 0.5; // positional agreement threshold + final double rotationEpsRadians = Math.toRadians(10.0); // angular agreement threshold + + for (int i = 0; i < n; i++) { + Pose2d poseI = filtered.get(i).pose; + for (int j = i + 1; j < n; j++) { + Pose2d poseJ = filtered.get(j).pose; + Transform2d delta = poseI.minus(poseJ); + double dist = delta.getTranslation().getNorm(); + double rotDiff = Math.abs(delta.getRotation().getRadians()); + if (dist <= translationEpsMeters && rotDiff <= rotationEpsRadians) { + // Observations i and j agree; increment support for both. + supportCounts.set(i, supportCounts.get(i) + 1); + supportCounts.set(j, supportCounts.get(j) + 1); } } + } + /* + * Feed accepted vision measurements into the drivetrain pose estimator. + * Each measurement is individually gated against current pose and robot motion. + */ + for (int i = 0; i < n; i++) { + VisionObservation obs = filtered.get(i); + int support = supportCounts.get(i); + if (shouldAcceptEnabledMeasurement(obs, currentPose, speeds, support)) { + swerveDrive.addVisionMeasurement(obs.pose, obs.timestamp, obs.stdDevs); + } } +} /** * Generates the estimated robot pose. Returns empty if: diff --git a/src/main/java/frc/robot/util/vision/VisionObservation.java b/src/main/java/frc/robot/util/vision/VisionObservation.java new file mode 100644 index 0000000..bcac2d9 --- /dev/null +++ b/src/main/java/frc/robot/util/vision/VisionObservation.java @@ -0,0 +1,41 @@ +package frc.robot.util.vision; + +import org.photonvision.EstimatedRobotPose; + +import edu.wpi.first.math.Matrix; +import edu.wpi.first.math.geometry.Pose2d; +import edu.wpi.first.math.numbers.N1; +import edu.wpi.first.math.numbers.N3; + +public class VisionObservation { + public final Cameras camera; + public final EstimatedRobotPose estimate; + public final Pose2d pose; + public final double timestamp; + public final int tagCount; + public final double avgTagDistance; + public final double ambiguity; + public final boolean isMultiTag; + public final Matrix stdDevs; + + public VisionObservation( + Cameras camera, + EstimatedRobotPose estimate, + Pose2d pose, + double timestamp, + int tagCount, + double avgTagDistance, + double ambiguity, + boolean isMultiTag, + Matrix stdDevs) { + this.camera = camera; + this.estimate = estimate; + this.pose = pose; + this.timestamp = timestamp; + this.tagCount = tagCount; + this.avgTagDistance = avgTagDistance; + this.ambiguity = ambiguity; + this.isMultiTag = isMultiTag; + this.stdDevs = stdDevs; + } +} \ No newline at end of file