001// Copyright (c) FIRST and other WPILib contributors. 002// Open Source Software; you can modify and/or share it under the terms of 003// the WPILib BSD license file in the root directory of this project. 004 005package edu.wpi.first.math.estimator; 006 007import edu.wpi.first.math.MathSharedStore; 008import edu.wpi.first.math.MathUsageId; 009import edu.wpi.first.math.MathUtil; 010import edu.wpi.first.math.Matrix; 011import edu.wpi.first.math.Nat; 012import edu.wpi.first.math.VecBuilder; 013import edu.wpi.first.math.geometry.Pose2d; 014import edu.wpi.first.math.geometry.Rotation2d; 015import edu.wpi.first.math.geometry.Transform2d; 016import edu.wpi.first.math.geometry.Translation2d; 017import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer; 018import edu.wpi.first.math.kinematics.Kinematics; 019import edu.wpi.first.math.kinematics.Odometry; 020import edu.wpi.first.math.numbers.N1; 021import edu.wpi.first.math.numbers.N3; 022import java.util.NavigableMap; 023import java.util.Optional; 024import java.util.TreeMap; 025 026/** 027 * This class wraps {@link Odometry} to fuse latency-compensated vision measurements with encoder 028 * measurements. Robot code should not use this directly- Instead, use the particular type for your 029 * drivetrain (e.g., {@link DifferentialDrivePoseEstimator}). It is intended to be a drop-in 030 * replacement for {@link Odometry}; in fact, if you never call {@link 031 * PoseEstimator#addVisionMeasurement} and only call {@link PoseEstimator#update} then this will 032 * behave exactly the same as Odometry. 033 * 034 * <p>{@link PoseEstimator#update} should be called every robot loop. 035 * 036 * <p>{@link PoseEstimator#addVisionMeasurement} can be called as infrequently as you want; if you 037 * never call it then this class will behave exactly like regular encoder odometry. 038 * 039 * @param <T> Wheel positions type. 040 */ 041public class PoseEstimator<T> { 042 private final Odometry<T> m_odometry; 043 private final Matrix<N3, N1> m_q = new Matrix<>(Nat.N3(), Nat.N1()); 044 private final Matrix<N3, N3> m_visionK = new Matrix<>(Nat.N3(), Nat.N3()); 045 046 private static final double kBufferDuration = 1.5; 047 // Maps timestamps to odometry-only pose estimates 048 private final TimeInterpolatableBuffer<Pose2d> m_odometryPoseBuffer = 049 TimeInterpolatableBuffer.createBuffer(kBufferDuration); 050 // Maps timestamps to vision updates 051 // Always contains one entry before the oldest entry in m_odometryPoseBuffer, unless there have 052 // been no vision measurements after the last reset. May contain one entry while 053 // m_odometryPoseBuffer is empty to correct for translation/rotation after a call to 054 // ResetRotation/ResetTranslation. 055 private final NavigableMap<Double, VisionUpdate> m_visionUpdates = new TreeMap<>(); 056 057 private Pose2d m_poseEstimate; 058 059 /** 060 * Constructs a PoseEstimator. 061 * 062 * @param kinematics A correctly-configured kinematics object for your drivetrain. 063 * @param odometry A correctly-configured odometry object for your drivetrain. 064 * @param stateStdDevs Standard deviations of the pose estimate (x position in meters, y position 065 * in meters, and heading in radians). Increase these numbers to trust your state estimate 066 * less. 067 * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position 068 * in meters, y position in meters, and heading in radians). Increase these numbers to trust 069 * the vision pose measurement less. 070 */ 071 @SuppressWarnings("PMD.UnusedFormalParameter") 072 public PoseEstimator( 073 Kinematics<?, T> kinematics, 074 Odometry<T> odometry, 075 Matrix<N3, N1> stateStdDevs, 076 Matrix<N3, N1> visionMeasurementStdDevs) { 077 m_odometry = odometry; 078 079 m_poseEstimate = m_odometry.getPoseMeters(); 080 081 for (int i = 0; i < 3; ++i) { 082 m_q.set(i, 0, stateStdDevs.get(i, 0) * stateStdDevs.get(i, 0)); 083 } 084 setVisionMeasurementStdDevs(visionMeasurementStdDevs); 085 MathSharedStore.getMathShared().reportUsage(MathUsageId.kEstimator_PoseEstimator, 1); 086 } 087 088 /** 089 * Sets the pose estimator's trust of global measurements. This might be used to change trust in 090 * vision measurements after the autonomous period, or to change trust as distance to a vision 091 * target increases. 092 * 093 * @param visionMeasurementStdDevs Standard deviations of the vision measurements. Increase these 094 * numbers to trust global measurements from vision less. This matrix is in the form [x, y, 095 * theta]ᵀ, with units in meters and radians. 096 */ 097 public final void setVisionMeasurementStdDevs(Matrix<N3, N1> visionMeasurementStdDevs) { 098 var r = new double[3]; 099 for (int i = 0; i < 3; ++i) { 100 r[i] = visionMeasurementStdDevs.get(i, 0) * visionMeasurementStdDevs.get(i, 0); 101 } 102 103 // Solve for closed form Kalman gain for continuous Kalman filter with A = 0 104 // and C = I. See wpimath/algorithms.md. 105 for (int row = 0; row < 3; ++row) { 106 if (m_q.get(row, 0) == 0.0) { 107 m_visionK.set(row, row, 0.0); 108 } else { 109 m_visionK.set( 110 row, row, m_q.get(row, 0) / (m_q.get(row, 0) + Math.sqrt(m_q.get(row, 0) * r[row]))); 111 } 112 } 113 } 114 115 /** 116 * Resets the robot's position on the field. 117 * 118 * <p>The gyroscope angle does not need to be reset here on the user's robot code. The library 119 * automatically takes care of offsetting the gyro angle. 120 * 121 * @param gyroAngle The angle reported by the gyroscope. 122 * @param wheelPositions The current encoder readings. 123 * @param poseMeters The position on the field that your robot is at. 124 */ 125 public void resetPosition(Rotation2d gyroAngle, T wheelPositions, Pose2d poseMeters) { 126 // Reset state estimate and error covariance 127 m_odometry.resetPosition(gyroAngle, wheelPositions, poseMeters); 128 m_odometryPoseBuffer.clear(); 129 m_visionUpdates.clear(); 130 m_poseEstimate = m_odometry.getPoseMeters(); 131 } 132 133 /** 134 * Resets the robot's pose. 135 * 136 * @param pose The pose to reset to. 137 */ 138 public void resetPose(Pose2d pose) { 139 m_odometry.resetPose(pose); 140 m_odometryPoseBuffer.clear(); 141 m_visionUpdates.clear(); 142 m_poseEstimate = m_odometry.getPoseMeters(); 143 } 144 145 /** 146 * Resets the robot's translation. 147 * 148 * @param translation The pose to translation to. 149 */ 150 public void resetTranslation(Translation2d translation) { 151 m_odometry.resetTranslation(translation); 152 153 final var latestVisionUpdate = m_visionUpdates.lastEntry(); 154 m_odometryPoseBuffer.clear(); 155 m_visionUpdates.clear(); 156 157 if (latestVisionUpdate != null) { 158 // apply vision compensation to the pose rotation 159 final var visionUpdate = 160 new VisionUpdate( 161 new Pose2d(translation, latestVisionUpdate.getValue().visionPose.getRotation()), 162 new Pose2d(translation, latestVisionUpdate.getValue().odometryPose.getRotation())); 163 m_visionUpdates.put(latestVisionUpdate.getKey(), visionUpdate); 164 m_poseEstimate = visionUpdate.compensate(m_odometry.getPoseMeters()); 165 } else { 166 m_poseEstimate = m_odometry.getPoseMeters(); 167 } 168 } 169 170 /** 171 * Resets the robot's rotation. 172 * 173 * @param rotation The rotation to reset to. 174 */ 175 public void resetRotation(Rotation2d rotation) { 176 m_odometry.resetRotation(rotation); 177 178 final var latestVisionUpdate = m_visionUpdates.lastEntry(); 179 m_odometryPoseBuffer.clear(); 180 m_visionUpdates.clear(); 181 182 if (latestVisionUpdate != null) { 183 // apply vision compensation to the pose translation 184 final var visionUpdate = 185 new VisionUpdate( 186 new Pose2d(latestVisionUpdate.getValue().visionPose.getTranslation(), rotation), 187 new Pose2d(latestVisionUpdate.getValue().odometryPose.getTranslation(), rotation)); 188 m_visionUpdates.put(latestVisionUpdate.getKey(), visionUpdate); 189 m_poseEstimate = visionUpdate.compensate(m_odometry.getPoseMeters()); 190 } else { 191 m_poseEstimate = m_odometry.getPoseMeters(); 192 } 193 } 194 195 /** 196 * Gets the estimated robot pose. 197 * 198 * @return The estimated robot pose in meters. 199 */ 200 public Pose2d getEstimatedPosition() { 201 return m_poseEstimate; 202 } 203 204 /** 205 * Return the pose at a given timestamp, if the buffer is not empty. 206 * 207 * @param timestampSeconds The pose's timestamp in seconds. 208 * @return The pose at the given timestamp (or Optional.empty() if the buffer is empty). 209 */ 210 public Optional<Pose2d> sampleAt(double timestampSeconds) { 211 // Step 0: If there are no odometry updates to sample, skip. 212 if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()) { 213 return Optional.empty(); 214 } 215 216 // Step 1: Make sure timestamp matches the sample from the odometry pose buffer. (When sampling, 217 // the buffer will always use a timestamp between the first and last timestamps) 218 double oldestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().firstKey(); 219 double newestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().lastKey(); 220 timestampSeconds = 221 MathUtil.clamp(timestampSeconds, oldestOdometryTimestamp, newestOdometryTimestamp); 222 223 // Step 2: If there are no applicable vision updates, use the odometry-only information. 224 if (m_visionUpdates.isEmpty() || timestampSeconds < m_visionUpdates.firstKey()) { 225 return m_odometryPoseBuffer.getSample(timestampSeconds); 226 } 227 228 // Step 3: Get the latest vision update from before or at the timestamp to sample at. 229 double floorTimestamp = m_visionUpdates.floorKey(timestampSeconds); 230 var visionUpdate = m_visionUpdates.get(floorTimestamp); 231 232 // Step 4: Get the pose measured by odometry at the time of the sample. 233 var odometryEstimate = m_odometryPoseBuffer.getSample(timestampSeconds); 234 235 // Step 5: Apply the vision compensation to the odometry pose. 236 return odometryEstimate.map(odometryPose -> visionUpdate.compensate(odometryPose)); 237 } 238 239 /** Removes stale vision updates that won't affect sampling. */ 240 private void cleanUpVisionUpdates() { 241 // Step 0: If there are no odometry samples, skip. 242 if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()) { 243 return; 244 } 245 246 // Step 1: Find the oldest timestamp that needs a vision update. 247 double oldestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().firstKey(); 248 249 // Step 2: If there are no vision updates before that timestamp, skip. 250 if (m_visionUpdates.isEmpty() || oldestOdometryTimestamp < m_visionUpdates.firstKey()) { 251 return; 252 } 253 254 // Step 3: Find the newest vision update timestamp before or at the oldest timestamp. 255 double newestNeededVisionUpdateTimestamp = m_visionUpdates.floorKey(oldestOdometryTimestamp); 256 257 // Step 4: Remove all entries strictly before the newest timestamp we need. 258 m_visionUpdates.headMap(newestNeededVisionUpdateTimestamp, false).clear(); 259 } 260 261 /** 262 * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate 263 * while still accounting for measurement noise. 264 * 265 * <p>This method can be called as infrequently as you want, as long as you are calling {@link 266 * PoseEstimator#update} every loop. 267 * 268 * <p>To promote stability of the pose estimate and make it robust to bad vision data, we 269 * recommend only adding vision measurements that are already within one meter or so of the 270 * current pose estimate. 271 * 272 * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera. 273 * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you 274 * don't use your own time source by calling {@link 275 * PoseEstimator#updateWithTime(double,Rotation2d,Object)} then you must use a timestamp with 276 * an epoch since FPGA startup (i.e., the epoch of this timestamp is the same epoch as {@link 277 * edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}.) This means that you should use {@link 278 * edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source or sync the epochs. 279 */ 280 public void addVisionMeasurement(Pose2d visionRobotPoseMeters, double timestampSeconds) { 281 // Step 0: If this measurement is old enough to be outside the pose buffer's timespan, skip. 282 if (m_odometryPoseBuffer.getInternalBuffer().isEmpty() 283 || m_odometryPoseBuffer.getInternalBuffer().lastKey() - kBufferDuration 284 > timestampSeconds) { 285 return; 286 } 287 288 // Step 1: Clean up any old entries 289 cleanUpVisionUpdates(); 290 291 // Step 2: Get the pose measured by odometry at the moment the vision measurement was made. 292 var odometrySample = m_odometryPoseBuffer.getSample(timestampSeconds); 293 294 if (odometrySample.isEmpty()) { 295 return; 296 } 297 298 // Step 3: Get the vision-compensated pose estimate at the moment the vision measurement was 299 // made. 300 var visionSample = sampleAt(timestampSeconds); 301 302 if (visionSample.isEmpty()) { 303 return; 304 } 305 306 // Step 4: Measure the transform between the old pose estimate and the vision pose. 307 var transform = visionRobotPoseMeters.minus(visionSample.get()); 308 309 // Step 5: We should not trust the transform entirely, so instead we scale this transform by a 310 // Kalman 311 // gain matrix representing how much we trust vision measurements compared to our current pose. 312 var k_times_transform = 313 m_visionK.times( 314 VecBuilder.fill( 315 transform.getX(), transform.getY(), transform.getRotation().getRadians())); 316 317 // Step 6: Convert back to Transform2d. 318 var scaledTransform = 319 new Transform2d( 320 k_times_transform.get(0, 0), 321 k_times_transform.get(1, 0), 322 Rotation2d.fromRadians(k_times_transform.get(2, 0))); 323 324 // Step 7: Calculate and record the vision update. 325 var visionUpdate = 326 new VisionUpdate(visionSample.get().plus(scaledTransform), odometrySample.get()); 327 m_visionUpdates.put(timestampSeconds, visionUpdate); 328 329 // Step 8: Remove later vision measurements. (Matches previous behavior) 330 m_visionUpdates.tailMap(timestampSeconds, false).entrySet().clear(); 331 332 // Step 9: Update latest pose estimate. Since we cleared all updates after this vision update, 333 // it's guaranteed to be the latest vision update. 334 m_poseEstimate = visionUpdate.compensate(m_odometry.getPoseMeters()); 335 } 336 337 /** 338 * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate 339 * while still accounting for measurement noise. 340 * 341 * <p>This method can be called as infrequently as you want, as long as you are calling {@link 342 * PoseEstimator#update} every loop. 343 * 344 * <p>To promote stability of the pose estimate and make it robust to bad vision data, we 345 * recommend only adding vision measurements that are already within one meter or so of the 346 * current pose estimate. 347 * 348 * <p>Note that the vision measurement standard deviations passed into this method will continue 349 * to apply to future measurements until a subsequent call to {@link 350 * PoseEstimator#setVisionMeasurementStdDevs(Matrix)} or this method. 351 * 352 * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera. 353 * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you 354 * don't use your own time source by calling {@link #updateWithTime}, then you must use a 355 * timestamp with an epoch since FPGA startup (i.e., the epoch of this timestamp is the same 356 * epoch as {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}). This means that you 357 * should use {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source in 358 * this case. 359 * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position 360 * in meters, y position in meters, and heading in radians). Increase these numbers to trust 361 * the vision pose measurement less. 362 */ 363 public void addVisionMeasurement( 364 Pose2d visionRobotPoseMeters, 365 double timestampSeconds, 366 Matrix<N3, N1> visionMeasurementStdDevs) { 367 setVisionMeasurementStdDevs(visionMeasurementStdDevs); 368 addVisionMeasurement(visionRobotPoseMeters, timestampSeconds); 369 } 370 371 /** 372 * Updates the pose estimator with wheel encoder and gyro information. This should be called every 373 * loop. 374 * 375 * @param gyroAngle The current gyro angle. 376 * @param wheelPositions The current encoder readings. 377 * @return The estimated pose of the robot in meters. 378 */ 379 public Pose2d update(Rotation2d gyroAngle, T wheelPositions) { 380 return updateWithTime(MathSharedStore.getTimestamp(), gyroAngle, wheelPositions); 381 } 382 383 /** 384 * Updates the pose estimator with wheel encoder and gyro information. This should be called every 385 * loop. 386 * 387 * @param currentTimeSeconds Time at which this method was called, in seconds. 388 * @param gyroAngle The current gyro angle. 389 * @param wheelPositions The current encoder readings. 390 * @return The estimated pose of the robot in meters. 391 */ 392 public Pose2d updateWithTime(double currentTimeSeconds, Rotation2d gyroAngle, T wheelPositions) { 393 var odometryEstimate = m_odometry.update(gyroAngle, wheelPositions); 394 395 m_odometryPoseBuffer.addSample(currentTimeSeconds, odometryEstimate); 396 397 if (m_visionUpdates.isEmpty()) { 398 m_poseEstimate = odometryEstimate; 399 } else { 400 var visionUpdate = m_visionUpdates.get(m_visionUpdates.lastKey()); 401 m_poseEstimate = visionUpdate.compensate(odometryEstimate); 402 } 403 404 return getEstimatedPosition(); 405 } 406 407 /** 408 * Represents a vision update record. The record contains the vision-compensated pose estimate as 409 * well as the corresponding odometry pose estimate. 410 */ 411 private static final class VisionUpdate { 412 // The vision-compensated pose estimate. 413 private final Pose2d visionPose; 414 415 // The pose estimated based solely on odometry. 416 private final Pose2d odometryPose; 417 418 /** 419 * Constructs a vision update record with the specified parameters. 420 * 421 * @param visionPose The vision-compensated pose estimate. 422 * @param odometryPose The pose estimate based solely on odometry. 423 */ 424 private VisionUpdate(Pose2d visionPose, Pose2d odometryPose) { 425 this.visionPose = visionPose; 426 this.odometryPose = odometryPose; 427 } 428 429 /** 430 * Returns the vision-compensated version of the pose. Specifically, changes the pose from being 431 * relative to this record's odometry pose to being relative to this record's vision pose. 432 * 433 * @param pose The pose to compensate. 434 * @return The compensated pose. 435 */ 436 public Pose2d compensate(Pose2d pose) { 437 var delta = pose.minus(this.odometryPose); 438 return this.visionPose.plus(delta); 439 } 440 } 441}