001// Copyright (c) FIRST and other WPILib contributors.
002// Open Source Software; you can modify and/or share it under the terms of
003// the WPILib BSD license file in the root directory of this project.
004
005package org.wpilib.math.estimator;
006
007import java.util.NavigableMap;
008import java.util.Optional;
009import java.util.TreeMap;
010import org.wpilib.math.geometry.Pose2d;
011import org.wpilib.math.geometry.Rotation2d;
012import org.wpilib.math.geometry.Transform2d;
013import org.wpilib.math.geometry.Translation2d;
014import org.wpilib.math.interpolation.TimeInterpolatableBuffer;
015import org.wpilib.math.kinematics.Kinematics;
016import org.wpilib.math.kinematics.Odometry;
017import org.wpilib.math.linalg.Matrix;
018import org.wpilib.math.numbers.N1;
019import org.wpilib.math.numbers.N3;
020import org.wpilib.math.util.MathSharedStore;
021
022/**
023 * This class wraps {@link Odometry} to fuse latency-compensated vision measurements with encoder
024 * measurements. Robot code should not use this directly- Instead, use the particular type for your
025 * drivetrain (e.g., {@link DifferentialDrivePoseEstimator}). It is intended to be a drop-in
026 * replacement for {@link Odometry}; in fact, if you never call {@link
027 * PoseEstimator#addVisionMeasurement} and only call {@link PoseEstimator#update} then this will
028 * behave exactly the same as Odometry.
029 *
030 * <p>{@link PoseEstimator#update} should be called every robot loop.
031 *
032 * <p>{@link PoseEstimator#addVisionMeasurement} can be called as infrequently as you want; if you
033 * never call it then this class will behave exactly like regular encoder odometry.
034 *
035 * @param <T> Wheel positions type.
036 */
037public class PoseEstimator<T> {
038  private final Odometry<T> m_odometry;
039
040  // Diagonal of process noise covariance matrix Q
041  private final double[] m_q = new double[] {0.0, 0.0, 0.0};
042
043  // Diagonal of Kalman gain matrix K
044  private final double[] m_vision_k = new double[] {0.0, 0.0, 0.0};
045
046  private static final double kBufferDuration = 1.5;
047  // Maps timestamps to odometry-only pose estimates
048  private final TimeInterpolatableBuffer<Pose2d> m_odometryPoseBuffer =
049      TimeInterpolatableBuffer.createBuffer(kBufferDuration);
050  // Maps timestamps to vision updates
051  // Always contains one entry before the oldest entry in m_odometryPoseBuffer, unless there have
052  // been no vision measurements after the last reset. May contain one entry while
053  // m_odometryPoseBuffer is empty to correct for translation/rotation after a call to
054  // ResetRotation/ResetTranslation.
055  private final NavigableMap<Double, VisionUpdate> m_visionUpdates = new TreeMap<>();
056
057  private Pose2d m_poseEstimate;
058
059  /**
060   * Constructs a PoseEstimator.
061   *
062   * @param kinematics A correctly-configured kinematics object for your drivetrain.
063   * @param odometry A correctly-configured odometry object for your drivetrain.
064   * @param stateStdDevs Standard deviations of the pose estimate (x position in meters, y position
065   *     in meters, and heading in radians). Increase these numbers to trust your state estimate
066   *     less.
067   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
068   *     in meters, y position in meters, and heading in radians). Increase these numbers to trust
069   *     the vision pose measurement less.
070   */
071  @SuppressWarnings("PMD.UnusedFormalParameter")
072  public PoseEstimator(
073      Kinematics<T, ?, ?> kinematics,
074      Odometry<T> odometry,
075      Matrix<N3, N1> stateStdDevs,
076      Matrix<N3, N1> visionMeasurementStdDevs) {
077    m_odometry = odometry;
078
079    m_poseEstimate = m_odometry.getPose();
080
081    for (int i = 0; i < 3; ++i) {
082      m_q[i] = stateStdDevs.get(i, 0) * stateStdDevs.get(i, 0);
083    }
084    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
085    MathSharedStore.getMathShared().reportUsage("PoseEstimator", "");
086  }
087
088  /**
089   * Sets the pose estimator's trust of global measurements. This might be used to change trust in
090   * vision measurements after the autonomous period, or to change trust as distance to a vision
091   * target increases.
092   *
093   * @param visionMeasurementStdDevs Standard deviations of the vision measurements. Increase these
094   *     numbers to trust global measurements from vision less. This matrix is in the form [x, y,
095   *     theta]ᵀ, with units in meters and radians.
096   */
097  public final void setVisionMeasurementStdDevs(Matrix<N3, N1> visionMeasurementStdDevs) {
098    // Diagonal of measurement noise covariance matrix R
099    var r = new double[3];
100    for (int i = 0; i < 3; ++i) {
101      r[i] = visionMeasurementStdDevs.get(i, 0) * visionMeasurementStdDevs.get(i, 0);
102    }
103
104    // Solve for closed form Kalman gain for continuous Kalman filter with A = 0
105    // and C = I. See wpimath/algorithms.md.
106    for (int row = 0; row < 3; ++row) {
107      if (m_q[row] == 0.0) {
108        m_vision_k[row] = 0.0;
109      } else {
110        m_vision_k[row] = m_q[row] / (m_q[row] + Math.sqrt(m_q[row] * r[row]));
111      }
112    }
113  }
114
115  /**
116   * Resets the robot's position on the field.
117   *
118   * <p>The gyroscope angle does not need to be reset here on the user's robot code. The library
119   * automatically takes care of offsetting the gyro angle.
120   *
121   * @param gyroAngle The angle reported by the gyroscope.
122   * @param wheelPositions The current encoder readings.
123   * @param pose The position on the field that your robot is at.
124   */
125  public void resetPosition(Rotation2d gyroAngle, T wheelPositions, Pose2d pose) {
126    // Reset state estimate and error covariance
127    m_odometry.resetPosition(gyroAngle, wheelPositions, pose);
128    m_odometryPoseBuffer.clear();
129    m_visionUpdates.clear();
130    m_poseEstimate = m_odometry.getPose();
131  }
132
133  /**
134   * Resets the robot's pose.
135   *
136   * @param pose The pose to reset to.
137   */
138  public void resetPose(Pose2d pose) {
139    m_odometry.resetPose(pose);
140    m_odometryPoseBuffer.clear();
141    m_visionUpdates.clear();
142    m_poseEstimate = m_odometry.getPose();
143  }
144
145  /**
146   * Resets the robot's translation.
147   *
148   * @param translation The pose to translation to.
149   */
150  public void resetTranslation(Translation2d translation) {
151    m_odometry.resetTranslation(translation);
152
153    final var latestVisionUpdate = m_visionUpdates.lastEntry();
154    m_odometryPoseBuffer.clear();
155    m_visionUpdates.clear();
156
157    if (latestVisionUpdate != null) {
158      // apply vision compensation to the pose rotation
159      final var visionUpdate =
160          new VisionUpdate(
161              new Pose2d(translation, latestVisionUpdate.getValue().visionPose.getRotation()),
162              new Pose2d(translation, latestVisionUpdate.getValue().odometryPose.getRotation()));
163      m_visionUpdates.put(latestVisionUpdate.getKey(), visionUpdate);
164      m_poseEstimate = visionUpdate.compensate(m_odometry.getPose());
165    } else {
166      m_poseEstimate = m_odometry.getPose();
167    }
168  }
169
170  /**
171   * Resets the robot's rotation.
172   *
173   * @param rotation The rotation to reset to.
174   */
175  public void resetRotation(Rotation2d rotation) {
176    m_odometry.resetRotation(rotation);
177
178    final var latestVisionUpdate = m_visionUpdates.lastEntry();
179    m_odometryPoseBuffer.clear();
180    m_visionUpdates.clear();
181
182    if (latestVisionUpdate != null) {
183      // apply vision compensation to the pose translation
184      final var visionUpdate =
185          new VisionUpdate(
186              new Pose2d(latestVisionUpdate.getValue().visionPose.getTranslation(), rotation),
187              new Pose2d(latestVisionUpdate.getValue().odometryPose.getTranslation(), rotation));
188      m_visionUpdates.put(latestVisionUpdate.getKey(), visionUpdate);
189      m_poseEstimate = visionUpdate.compensate(m_odometry.getPose());
190    } else {
191      m_poseEstimate = m_odometry.getPose();
192    }
193  }
194
195  /**
196   * Gets the estimated robot pose.
197   *
198   * @return The estimated robot pose in meters.
199   */
200  public Pose2d getEstimatedPosition() {
201    return m_poseEstimate;
202  }
203
204  /**
205   * Return the pose at a given timestamp, if the buffer is not empty.
206   *
207   * @param timestamp The pose's timestamp in seconds.
208   * @return The pose at the given timestamp (or Optional.empty() if the buffer is empty).
209   */
210  public Optional<Pose2d> sampleAt(double timestamp) {
211    // Step 0: If there are no odometry updates to sample, skip.
212    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()) {
213      return Optional.empty();
214    }
215
216    // Step 1: Make sure timestamp matches the sample from the odometry pose buffer. (When sampling,
217    // the buffer will always use a timestamp between the first and last timestamps)
218    double oldestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().firstKey();
219    double newestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().lastKey();
220    timestamp = Math.clamp(timestamp, oldestOdometryTimestamp, newestOdometryTimestamp);
221
222    // Step 2: If there are no applicable vision updates, use the odometry-only information.
223    if (m_visionUpdates.isEmpty() || timestamp < m_visionUpdates.firstKey()) {
224      return m_odometryPoseBuffer.getSample(timestamp);
225    }
226
227    // Step 3: Get the latest vision update from before or at the timestamp to sample at.
228    double floorTimestamp = m_visionUpdates.floorKey(timestamp);
229    var visionUpdate = m_visionUpdates.get(floorTimestamp);
230
231    // Step 4: Get the pose measured by odometry at the time of the sample.
232    var odometryEstimate = m_odometryPoseBuffer.getSample(timestamp);
233
234    // Step 5: Apply the vision compensation to the odometry pose.
235    return odometryEstimate.map(odometryPose -> visionUpdate.compensate(odometryPose));
236  }
237
238  /** Removes stale vision updates that won't affect sampling. */
239  private void cleanUpVisionUpdates() {
240    // Step 0: If there are no odometry samples, skip.
241    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()) {
242      return;
243    }
244
245    // Step 1: Find the oldest timestamp that needs a vision update.
246    double oldestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().firstKey();
247
248    // Step 2: If there are no vision updates before that timestamp, skip.
249    if (m_visionUpdates.isEmpty() || oldestOdometryTimestamp < m_visionUpdates.firstKey()) {
250      return;
251    }
252
253    // Step 3: Find the newest vision update timestamp before or at the oldest timestamp.
254    double newestNeededVisionUpdateTimestamp = m_visionUpdates.floorKey(oldestOdometryTimestamp);
255
256    // Step 4: Remove all entries strictly before the newest timestamp we need.
257    m_visionUpdates.headMap(newestNeededVisionUpdateTimestamp, false).clear();
258  }
259
260  /**
261   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
262   * while still accounting for measurement noise.
263   *
264   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
265   * PoseEstimator#update} every loop.
266   *
267   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
268   * recommend only adding vision measurements that are already within one meter or so of the
269   * current pose estimate.
270   *
271   * @param visionRobotPose The pose of the robot as measured by the vision camera.
272   * @param timestamp The timestamp of the vision measurement in seconds. Note that if you don't use
273   *     your own time source by calling {@link
274   *     PoseEstimator#updateWithTime(double,Rotation2d,Object)} then you must use a timestamp with
275   *     an epoch since FPGA startup (i.e., the epoch of this timestamp is the same epoch as {@link
276   *     org.wpilib.system.Timer#getFPGATimestamp()}.) This means that you should use {@link
277   *     org.wpilib.system.Timer#getFPGATimestamp()} as your time source or sync the epochs.
278   */
279  public void addVisionMeasurement(Pose2d visionRobotPose, double timestamp) {
280    // Step 0: If this measurement is old enough to be outside the pose buffer's timespan, skip.
281    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()
282        || m_odometryPoseBuffer.getInternalBuffer().lastKey() - kBufferDuration > timestamp) {
283      return;
284    }
285
286    // Step 1: Clean up any old entries
287    cleanUpVisionUpdates();
288
289    // Step 2: Get the pose measured by odometry at the moment the vision measurement was made.
290    var odometrySample = m_odometryPoseBuffer.getSample(timestamp);
291
292    if (odometrySample.isEmpty()) {
293      return;
294    }
295
296    // Step 3: Get the vision-compensated pose estimate at the moment the vision measurement was
297    // made.
298    var visionSample = sampleAt(timestamp);
299
300    if (visionSample.isEmpty()) {
301      return;
302    }
303
304    // Step 4: Measure the transform between the old pose estimate and the vision pose.
305    var transform = visionRobotPose.minus(visionSample.get());
306
307    // Step 5: We should not trust the transform entirely, so instead we scale this transform by a
308    // Kalman gain matrix representing how much we trust vision measurements compared to our current
309    // pose. Then, we convert the result back to a Transform2d.
310    var scaledTransform =
311        new Transform2d(
312            m_vision_k[0] * transform.getX(),
313            m_vision_k[1] * transform.getY(),
314            Rotation2d.fromRadians(m_vision_k[2] * transform.getRotation().getRadians()));
315
316    // Step 6: Calculate and record the vision update.
317    var visionUpdate =
318        new VisionUpdate(visionSample.get().plus(scaledTransform), odometrySample.get());
319    m_visionUpdates.put(timestamp, visionUpdate);
320
321    // Step 7: Remove later vision measurements. (Matches previous behavior)
322    m_visionUpdates.tailMap(timestamp, false).entrySet().clear();
323
324    // Step 8: Update latest pose estimate. Since we cleared all updates after this vision update,
325    // it's guaranteed to be the latest vision update.
326    m_poseEstimate = visionUpdate.compensate(m_odometry.getPose());
327  }
328
329  /**
330   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
331   * while still accounting for measurement noise.
332   *
333   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
334   * PoseEstimator#update} every loop.
335   *
336   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
337   * recommend only adding vision measurements that are already within one meter or so of the
338   * current pose estimate.
339   *
340   * <p>Note that the vision measurement standard deviations passed into this method will continue
341   * to apply to future measurements until a subsequent call to {@link
342   * PoseEstimator#setVisionMeasurementStdDevs(Matrix)} or this method.
343   *
344   * @param visionRobotPose The pose of the robot as measured by the vision camera.
345   * @param timestamp The timestamp of the vision measurement in seconds. Note that if you don't use
346   *     your own time source by calling {@link #updateWithTime}, then you must use a timestamp with
347   *     an epoch since FPGA startup (i.e., the epoch of this timestamp is the same epoch as {@link
348   *     org.wpilib.system.Timer#getFPGATimestamp()}). This means that you should use {@link
349   *     org.wpilib.system.Timer#getFPGATimestamp()} as your time source in this case.
350   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
351   *     in meters, y position in meters, and heading in radians). Increase these numbers to trust
352   *     the vision pose measurement less.
353   */
354  public void addVisionMeasurement(
355      Pose2d visionRobotPose, double timestamp, Matrix<N3, N1> visionMeasurementStdDevs) {
356    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
357    addVisionMeasurement(visionRobotPose, timestamp);
358  }
359
360  /**
361   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
362   * loop.
363   *
364   * @param gyroAngle The current gyro angle.
365   * @param wheelPositions The current encoder readings.
366   * @return The estimated pose of the robot in meters.
367   */
368  public Pose2d update(Rotation2d gyroAngle, T wheelPositions) {
369    return updateWithTime(MathSharedStore.getTimestamp(), gyroAngle, wheelPositions);
370  }
371
372  /**
373   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
374   * loop.
375   *
376   * @param currentTime Time at which this method was called, in seconds.
377   * @param gyroAngle The current gyro angle.
378   * @param wheelPositions The current encoder readings.
379   * @return The estimated pose of the robot in meters.
380   */
381  public Pose2d updateWithTime(double currentTime, Rotation2d gyroAngle, T wheelPositions) {
382    var odometryEstimate = m_odometry.update(gyroAngle, wheelPositions);
383
384    m_odometryPoseBuffer.addSample(currentTime, odometryEstimate);
385
386    if (m_visionUpdates.isEmpty()) {
387      m_poseEstimate = odometryEstimate;
388    } else {
389      var visionUpdate = m_visionUpdates.get(m_visionUpdates.lastKey());
390      m_poseEstimate = visionUpdate.compensate(odometryEstimate);
391    }
392
393    return getEstimatedPosition();
394  }
395
396  /**
397   * Represents a vision update record. The record contains the vision-compensated pose estimate as
398   * well as the corresponding odometry pose estimate.
399   */
400  private static final class VisionUpdate {
401    // The vision-compensated pose estimate.
402    private final Pose2d visionPose;
403
404    // The pose estimated based solely on odometry.
405    private final Pose2d odometryPose;
406
407    /**
408     * Constructs a vision update record with the specified parameters.
409     *
410     * @param visionPose The vision-compensated pose estimate.
411     * @param odometryPose The pose estimate based solely on odometry.
412     */
413    private VisionUpdate(Pose2d visionPose, Pose2d odometryPose) {
414      this.visionPose = visionPose;
415      this.odometryPose = odometryPose;
416    }
417
418    /**
419     * Returns the vision-compensated version of the pose. Specifically, changes the pose from being
420     * relative to this record's odometry pose to being relative to this record's vision pose.
421     *
422     * @param pose The pose to compensate.
423     * @return The compensated pose.
424     */
425    public Pose2d compensate(Pose2d pose) {
426      var delta = pose.minus(this.odometryPose);
427      return this.visionPose.plus(delta);
428    }
429  }
430}