001// Copyright (c) FIRST and other WPILib contributors.
002// Open Source Software; you can modify and/or share it under the terms of
003// the WPILib BSD license file in the root directory of this project.
004
005package org.wpilib.math.estimator;
006
007import java.util.NavigableMap;
008import java.util.Optional;
009import java.util.TreeMap;
010import org.wpilib.math.geometry.Pose2d;
011import org.wpilib.math.geometry.Rotation2d;
012import org.wpilib.math.geometry.Transform2d;
013import org.wpilib.math.geometry.Translation2d;
014import org.wpilib.math.interpolation.TimeInterpolatableBuffer;
015import org.wpilib.math.kinematics.Kinematics;
016import org.wpilib.math.kinematics.Odometry;
017import org.wpilib.math.linalg.Matrix;
018import org.wpilib.math.numbers.N1;
019import org.wpilib.math.numbers.N3;
020import org.wpilib.math.util.MathSharedStore;
021
022/**
023 * This class wraps {@link Odometry} to fuse latency-compensated vision measurements with encoder
024 * measurements. Robot code should not use this directly- Instead, use the particular type for your
025 * drivetrain (e.g., {@link DifferentialDrivePoseEstimator}). It is intended to be a drop-in
026 * replacement for {@link Odometry}; in fact, if you never call {@link
027 * PoseEstimator#addVisionMeasurement} and only call {@link PoseEstimator#update} then this will
028 * behave exactly the same as Odometry.
029 *
030 * <p>{@link PoseEstimator#update} should be called every robot loop.
031 *
032 * <p>{@link PoseEstimator#addVisionMeasurement} can be called as infrequently as you want; if you
033 * never call it then this class will behave exactly like regular encoder odometry.
034 *
035 * @param <T> Wheel positions type.
036 */
037public class PoseEstimator<T> {
038  private final Odometry<T> m_odometry;
039
040  // Diagonal of process noise covariance matrix Q
041  private final double[] m_q = new double[] {0.0, 0.0, 0.0};
042
043  // Diagonal of Kalman gain matrix K
044  private final double[] m_vision_k = new double[] {0.0, 0.0, 0.0};
045
046  private static final double kBufferDuration = 1.5;
047  // Maps timestamps to odometry-only pose estimates
048  private final TimeInterpolatableBuffer<Pose2d> m_odometryPoseBuffer =
049      TimeInterpolatableBuffer.createBuffer(kBufferDuration);
050  // Maps timestamps to vision updates
051  // Always contains one entry before the oldest entry in m_odometryPoseBuffer, unless there have
052  // been no vision measurements after the last reset. May contain one entry while
053  // m_odometryPoseBuffer is empty to correct for translation/rotation after a call to
054  // ResetRotation/ResetTranslation.
055  private final NavigableMap<Double, VisionUpdate> m_visionUpdates = new TreeMap<>();
056
057  private Pose2d m_poseEstimate;
058
059  /**
060   * Constructs a PoseEstimator.
061   *
062   * @param kinematics A correctly-configured kinematics object for your drivetrain.
063   * @param odometry A correctly-configured odometry object for your drivetrain.
064   * @param stateStdDevs Standard deviations of the pose estimate (x position in meters, y position
065   *     in meters, and heading in radians). Increase these numbers to trust your state estimate
066   *     less.
067   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
068   *     in meters, y position in meters, and heading in radians). Increase these numbers to trust
069   *     the vision pose measurement less.
070   */
071  public PoseEstimator(
072      Kinematics<T, ?, ?> kinematics,
073      Odometry<T> odometry,
074      Matrix<N3, N1> stateStdDevs,
075      Matrix<N3, N1> visionMeasurementStdDevs) {
076    m_odometry = odometry;
077
078    m_poseEstimate = m_odometry.getPose();
079
080    for (int i = 0; i < 3; ++i) {
081      m_q[i] = stateStdDevs.get(i, 0) * stateStdDevs.get(i, 0);
082    }
083    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
084    MathSharedStore.getMathShared().reportUsage("PoseEstimator", "");
085  }
086
087  /**
088   * Sets the pose estimator's trust of global measurements. This might be used to change trust in
089   * vision measurements after the autonomous period, or to change trust as distance to a vision
090   * target increases.
091   *
092   * @param visionMeasurementStdDevs Standard deviations of the vision measurements. Increase these
093   *     numbers to trust global measurements from vision less. This matrix is in the form [x, y,
094   *     theta]ᵀ, with units in meters and radians.
095   */
096  public final void setVisionMeasurementStdDevs(Matrix<N3, N1> visionMeasurementStdDevs) {
097    // Diagonal of measurement noise covariance matrix R
098    var r = new double[3];
099    for (int i = 0; i < 3; ++i) {
100      r[i] = visionMeasurementStdDevs.get(i, 0) * visionMeasurementStdDevs.get(i, 0);
101    }
102
103    // Solve for closed form Kalman gain for continuous Kalman filter with A = 0
104    // and C = I. See wpimath/algorithms.md.
105    for (int row = 0; row < 3; ++row) {
106      if (m_q[row] == 0.0) {
107        m_vision_k[row] = 0.0;
108      } else {
109        m_vision_k[row] = m_q[row] / (m_q[row] + Math.sqrt(m_q[row] * r[row]));
110      }
111    }
112  }
113
114  /**
115   * Resets the robot's position on the field.
116   *
117   * <p>The gyroscope angle does not need to be reset here on the user's robot code. The library
118   * automatically takes care of offsetting the gyro angle.
119   *
120   * @param gyroAngle The angle reported by the gyroscope.
121   * @param wheelPositions The current encoder readings.
122   * @param pose The position on the field that your robot is at.
123   */
124  public void resetPosition(Rotation2d gyroAngle, T wheelPositions, Pose2d pose) {
125    // Reset state estimate and error covariance
126    m_odometry.resetPosition(gyroAngle, wheelPositions, pose);
127    m_odometryPoseBuffer.clear();
128    m_visionUpdates.clear();
129    m_poseEstimate = m_odometry.getPose();
130  }
131
132  /**
133   * Resets the robot's pose.
134   *
135   * @param pose The pose to reset to.
136   */
137  public void resetPose(Pose2d pose) {
138    m_odometry.resetPose(pose);
139    m_odometryPoseBuffer.clear();
140    m_visionUpdates.clear();
141    m_poseEstimate = m_odometry.getPose();
142  }
143
144  /**
145   * Resets the robot's translation.
146   *
147   * @param translation The pose to translation to.
148   */
149  public void resetTranslation(Translation2d translation) {
150    m_odometry.resetTranslation(translation);
151
152    final var latestVisionUpdate = m_visionUpdates.lastEntry();
153    m_odometryPoseBuffer.clear();
154    m_visionUpdates.clear();
155
156    if (latestVisionUpdate != null) {
157      // apply vision compensation to the pose rotation
158      final var visionUpdate =
159          new VisionUpdate(
160              new Pose2d(translation, latestVisionUpdate.getValue().visionPose.getRotation()),
161              new Pose2d(translation, latestVisionUpdate.getValue().odometryPose.getRotation()));
162      m_visionUpdates.put(latestVisionUpdate.getKey(), visionUpdate);
163      m_poseEstimate = visionUpdate.compensate(m_odometry.getPose());
164    } else {
165      m_poseEstimate = m_odometry.getPose();
166    }
167  }
168
169  /**
170   * Resets the robot's rotation.
171   *
172   * @param rotation The rotation to reset to.
173   */
174  public void resetRotation(Rotation2d rotation) {
175    m_odometry.resetRotation(rotation);
176
177    final var latestVisionUpdate = m_visionUpdates.lastEntry();
178    m_odometryPoseBuffer.clear();
179    m_visionUpdates.clear();
180
181    if (latestVisionUpdate != null) {
182      // apply vision compensation to the pose translation
183      final var visionUpdate =
184          new VisionUpdate(
185              new Pose2d(latestVisionUpdate.getValue().visionPose.getTranslation(), rotation),
186              new Pose2d(latestVisionUpdate.getValue().odometryPose.getTranslation(), rotation));
187      m_visionUpdates.put(latestVisionUpdate.getKey(), visionUpdate);
188      m_poseEstimate = visionUpdate.compensate(m_odometry.getPose());
189    } else {
190      m_poseEstimate = m_odometry.getPose();
191    }
192  }
193
194  /**
195   * Gets the estimated robot pose.
196   *
197   * @return The estimated robot pose in meters.
198   */
199  public Pose2d getEstimatedPosition() {
200    return m_poseEstimate;
201  }
202
203  /**
204   * Return the pose at a given timestamp, if the buffer is not empty.
205   *
206   * @param timestamp The pose's timestamp in seconds.
207   * @return The pose at the given timestamp (or Optional.empty() if the buffer is empty).
208   */
209  public Optional<Pose2d> sampleAt(double timestamp) {
210    // Step 0: If there are no odometry updates to sample, skip.
211    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()) {
212      return Optional.empty();
213    }
214
215    // Step 1: Make sure timestamp matches the sample from the odometry pose buffer. (When sampling,
216    // the buffer will always use a timestamp between the first and last timestamps)
217    double oldestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().firstKey();
218    double newestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().lastKey();
219    timestamp = Math.clamp(timestamp, oldestOdometryTimestamp, newestOdometryTimestamp);
220
221    // Step 2: If there are no applicable vision updates, use the odometry-only information.
222    if (m_visionUpdates.isEmpty() || timestamp < m_visionUpdates.firstKey()) {
223      return m_odometryPoseBuffer.getSample(timestamp);
224    }
225
226    // Step 3: Get the latest vision update from before or at the timestamp to sample at.
227    double floorTimestamp = m_visionUpdates.floorKey(timestamp);
228    var visionUpdate = m_visionUpdates.get(floorTimestamp);
229
230    // Step 4: Get the pose measured by odometry at the time of the sample.
231    var odometryEstimate = m_odometryPoseBuffer.getSample(timestamp);
232
233    // Step 5: Apply the vision compensation to the odometry pose.
234    return odometryEstimate.map(odometryPose -> visionUpdate.compensate(odometryPose));
235  }
236
237  /** Removes stale vision updates that won't affect sampling. */
238  private void cleanUpVisionUpdates() {
239    // Step 0: If there are no odometry samples, skip.
240    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()) {
241      return;
242    }
243
244    // Step 1: Find the oldest timestamp that needs a vision update.
245    double oldestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().firstKey();
246
247    // Step 2: If there are no vision updates before that timestamp, skip.
248    if (m_visionUpdates.isEmpty() || oldestOdometryTimestamp < m_visionUpdates.firstKey()) {
249      return;
250    }
251
252    // Step 3: Find the newest vision update timestamp before or at the oldest timestamp.
253    double newestNeededVisionUpdateTimestamp = m_visionUpdates.floorKey(oldestOdometryTimestamp);
254
255    // Step 4: Remove all entries strictly before the newest timestamp we need.
256    m_visionUpdates.headMap(newestNeededVisionUpdateTimestamp, false).clear();
257  }
258
259  /**
260   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
261   * while still accounting for measurement noise.
262   *
263   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
264   * PoseEstimator#update} every loop.
265   *
266   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
267   * recommend only adding vision measurements that are already within one meter or so of the
268   * current pose estimate.
269   *
270   * @param visionRobotPose The pose of the robot as measured by the vision camera.
271   * @param timestamp The timestamp of the vision measurement in seconds. Note that if you don't use
272   *     your own time source by calling {@link
273   *     PoseEstimator#updateWithTime(double,Rotation2d,Object)} then you must use a timestamp with
274   *     the same epoch as {@link org.wpilib.system.Timer#getMonotonicTimestamp()}.) This means that
275   *     you should use {@link org.wpilib.system.Timer#getMonotonicTimestamp()} as your time source
276   *     or sync the epochs.
277   */
278  public void addVisionMeasurement(Pose2d visionRobotPose, double timestamp) {
279    // Step 0: If this measurement is old enough to be outside the pose buffer's timespan, skip.
280    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()
281        || m_odometryPoseBuffer.getInternalBuffer().lastKey() - kBufferDuration > timestamp) {
282      return;
283    }
284
285    // Step 1: Clean up any old entries
286    cleanUpVisionUpdates();
287
288    // Step 2: Get the pose measured by odometry at the moment the vision measurement was made.
289    var odometrySample = m_odometryPoseBuffer.getSample(timestamp);
290
291    if (odometrySample.isEmpty()) {
292      return;
293    }
294
295    // Step 3: Get the vision-compensated pose estimate at the moment the vision measurement was
296    // made.
297    var visionSample = sampleAt(timestamp);
298
299    if (visionSample.isEmpty()) {
300      return;
301    }
302
303    // Step 4: Measure the transform between the old pose estimate and the vision pose.
304    var transform = visionRobotPose.minus(visionSample.get());
305
306    // Step 5: We should not trust the transform entirely, so instead we scale this transform by a
307    // Kalman gain matrix representing how much we trust vision measurements compared to our current
308    // pose. Then, we convert the result back to a Transform2d.
309    var scaledTransform =
310        new Transform2d(
311            m_vision_k[0] * transform.getX(),
312            m_vision_k[1] * transform.getY(),
313            Rotation2d.fromRadians(m_vision_k[2] * transform.getRotation().getRadians()));
314
315    // Step 6: Calculate and record the vision update.
316    var visionUpdate =
317        new VisionUpdate(visionSample.get().plus(scaledTransform), odometrySample.get());
318    m_visionUpdates.put(timestamp, visionUpdate);
319
320    // Step 7: Remove later vision measurements. (Matches previous behavior)
321    m_visionUpdates.tailMap(timestamp, false).entrySet().clear();
322
323    // Step 8: Update latest pose estimate. Since we cleared all updates after this vision update,
324    // it's guaranteed to be the latest vision update.
325    m_poseEstimate = visionUpdate.compensate(m_odometry.getPose());
326  }
327
328  /**
329   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
330   * while still accounting for measurement noise.
331   *
332   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
333   * PoseEstimator#update} every loop.
334   *
335   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
336   * recommend only adding vision measurements that are already within one meter or so of the
337   * current pose estimate.
338   *
339   * <p>Note that the vision measurement standard deviations passed into this method will continue
340   * to apply to future measurements until a subsequent call to {@link
341   * PoseEstimator#setVisionMeasurementStdDevs(Matrix)} or this method.
342   *
343   * @param visionRobotPose The pose of the robot as measured by the vision camera.
344   * @param timestamp The timestamp of the vision measurement in seconds. Note that if you don't use
345   *     your own time source by calling {@link #updateWithTime}, then you must use a timestamp with
346   *     the same epoch as {@link org.wpilib.system.Timer#getMonotonicTimestamp()}). This means that
347   *     you should use {@link org.wpilib.system.Timer#getMonotonicTimestamp()} as your time source
348   *     in this case.
349   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
350   *     in meters, y position in meters, and heading in radians). Increase these numbers to trust
351   *     the vision pose measurement less.
352   */
353  public void addVisionMeasurement(
354      Pose2d visionRobotPose, double timestamp, Matrix<N3, N1> visionMeasurementStdDevs) {
355    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
356    addVisionMeasurement(visionRobotPose, timestamp);
357  }
358
359  /**
360   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
361   * loop.
362   *
363   * @param gyroAngle The current gyro angle.
364   * @param wheelPositions The current encoder readings.
365   * @return The estimated pose of the robot in meters.
366   */
367  public Pose2d update(Rotation2d gyroAngle, T wheelPositions) {
368    return updateWithTime(MathSharedStore.getTimestamp(), gyroAngle, wheelPositions);
369  }
370
371  /**
372   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
373   * loop.
374   *
375   * @param currentTime Time at which this method was called, in seconds.
376   * @param gyroAngle The current gyro angle.
377   * @param wheelPositions The current encoder readings.
378   * @return The estimated pose of the robot in meters.
379   */
380  public Pose2d updateWithTime(double currentTime, Rotation2d gyroAngle, T wheelPositions) {
381    var odometryEstimate = m_odometry.update(gyroAngle, wheelPositions);
382
383    m_odometryPoseBuffer.addSample(currentTime, odometryEstimate);
384
385    if (m_visionUpdates.isEmpty()) {
386      m_poseEstimate = odometryEstimate;
387    } else {
388      var visionUpdate = m_visionUpdates.get(m_visionUpdates.lastKey());
389      m_poseEstimate = visionUpdate.compensate(odometryEstimate);
390    }
391
392    return getEstimatedPosition();
393  }
394
395  /**
396   * Represents a vision update record. The record contains the vision-compensated pose estimate as
397   * well as the corresponding odometry pose estimate.
398   */
399  private static final class VisionUpdate {
400    // The vision-compensated pose estimate.
401    private final Pose2d visionPose;
402
403    // The pose estimated based solely on odometry.
404    private final Pose2d odometryPose;
405
406    /**
407     * Constructs a vision update record with the specified parameters.
408     *
409     * @param visionPose The vision-compensated pose estimate.
410     * @param odometryPose The pose estimate based solely on odometry.
411     */
412    private VisionUpdate(Pose2d visionPose, Pose2d odometryPose) {
413      this.visionPose = visionPose;
414      this.odometryPose = odometryPose;
415    }
416
417    /**
418     * Returns the vision-compensated version of the pose. Specifically, changes the pose from being
419     * relative to this record's odometry pose to being relative to this record's vision pose.
420     *
421     * @param pose The pose to compensate.
422     * @return The compensated pose.
423     */
424    public Pose2d compensate(Pose2d pose) {
425      var delta = pose.minus(this.odometryPose);
426      return this.visionPose.plus(delta);
427    }
428  }
429}