001// Copyright (c) FIRST and other WPILib contributors.
002// Open Source Software; you can modify and/or share it under the terms of
003// the WPILib BSD license file in the root directory of this project.
004
005package edu.wpi.first.math.estimator;
006
007import edu.wpi.first.math.MathSharedStore;
008import edu.wpi.first.math.MathUtil;
009import edu.wpi.first.math.Matrix;
010import edu.wpi.first.math.Nat;
011import edu.wpi.first.math.VecBuilder;
012import edu.wpi.first.math.geometry.Pose2d;
013import edu.wpi.first.math.geometry.Rotation2d;
014import edu.wpi.first.math.geometry.Translation2d;
015import edu.wpi.first.math.geometry.Twist2d;
016import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer;
017import edu.wpi.first.math.kinematics.Kinematics;
018import edu.wpi.first.math.kinematics.Odometry;
019import edu.wpi.first.math.numbers.N1;
020import edu.wpi.first.math.numbers.N3;
021import java.util.NavigableMap;
022import java.util.Optional;
023import java.util.TreeMap;
024
025/**
026 * This class wraps {@link Odometry} to fuse latency-compensated vision measurements with encoder
027 * measurements. Robot code should not use this directly- Instead, use the particular type for your
028 * drivetrain (e.g., {@link DifferentialDrivePoseEstimator}). It is intended to be a drop-in
029 * replacement for {@link Odometry}; in fact, if you never call {@link
030 * PoseEstimator#addVisionMeasurement} and only call {@link PoseEstimator#update} then this will
031 * behave exactly the same as Odometry.
032 *
033 * <p>{@link PoseEstimator#update} should be called every robot loop.
034 *
035 * <p>{@link PoseEstimator#addVisionMeasurement} can be called as infrequently as you want; if you
036 * never call it then this class will behave exactly like regular encoder odometry.
037 *
038 * @param <T> Wheel positions type.
039 */
040public class PoseEstimator<T> {
041  private final Odometry<T> m_odometry;
042  private final Matrix<N3, N1> m_q = new Matrix<>(Nat.N3(), Nat.N1());
043  private final Matrix<N3, N3> m_visionK = new Matrix<>(Nat.N3(), Nat.N3());
044
045  private static final double kBufferDuration = 1.5;
046  // Maps timestamps to odometry-only pose estimates
047  private final TimeInterpolatableBuffer<Pose2d> m_odometryPoseBuffer =
048      TimeInterpolatableBuffer.createBuffer(kBufferDuration);
049  // Maps timestamps to vision updates
050  // Always contains one entry before the oldest entry in m_odometryPoseBuffer, unless there have
051  // been no vision measurements after the last reset
052  private final NavigableMap<Double, VisionUpdate> m_visionUpdates = new TreeMap<>();
053
054  private Pose2d m_poseEstimate;
055
056  /**
057   * Constructs a PoseEstimator.
058   *
059   * @param kinematics A correctly-configured kinematics object for your drivetrain.
060   * @param odometry A correctly-configured odometry object for your drivetrain.
061   * @param stateStdDevs Standard deviations of the pose estimate (x position in meters, y position
062   *     in meters, and heading in radians). Increase these numbers to trust your state estimate
063   *     less.
064   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
065   *     in meters, y position in meters, and heading in radians). Increase these numbers to trust
066   *     the vision pose measurement less.
067   */
068  @SuppressWarnings("PMD.UnusedFormalParameter")
069  public PoseEstimator(
070      Kinematics<?, T> kinematics,
071      Odometry<T> odometry,
072      Matrix<N3, N1> stateStdDevs,
073      Matrix<N3, N1> visionMeasurementStdDevs) {
074    m_odometry = odometry;
075
076    m_poseEstimate = m_odometry.getPose();
077
078    for (int i = 0; i < 3; ++i) {
079      m_q.set(i, 0, stateStdDevs.get(i, 0) * stateStdDevs.get(i, 0));
080    }
081    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
082  }
083
084  /**
085   * Sets the pose estimator's trust of global measurements. This might be used to change trust in
086   * vision measurements after the autonomous period, or to change trust as distance to a vision
087   * target increases.
088   *
089   * @param visionMeasurementStdDevs Standard deviations of the vision measurements. Increase these
090   *     numbers to trust global measurements from vision less. This matrix is in the form [x, y,
091   *     theta]ᵀ, with units in meters and radians.
092   */
093  public final void setVisionMeasurementStdDevs(Matrix<N3, N1> visionMeasurementStdDevs) {
094    var r = new double[3];
095    for (int i = 0; i < 3; ++i) {
096      r[i] = visionMeasurementStdDevs.get(i, 0) * visionMeasurementStdDevs.get(i, 0);
097    }
098
099    // Solve for closed form Kalman gain for continuous Kalman filter with A = 0
100    // and C = I. See wpimath/algorithms.md.
101    for (int row = 0; row < 3; ++row) {
102      if (m_q.get(row, 0) == 0.0) {
103        m_visionK.set(row, row, 0.0);
104      } else {
105        m_visionK.set(
106            row, row, m_q.get(row, 0) / (m_q.get(row, 0) + Math.sqrt(m_q.get(row, 0) * r[row])));
107      }
108    }
109  }
110
111  /**
112   * Resets the robot's position on the field.
113   *
114   * <p>The gyroscope angle does not need to be reset here on the user's robot code. The library
115   * automatically takes care of offsetting the gyro angle.
116   *
117   * @param gyroAngle The angle reported by the gyroscope.
118   * @param wheelPositions The current encoder readings.
119   * @param pose The position on the field that your robot is at.
120   */
121  public void resetPosition(Rotation2d gyroAngle, T wheelPositions, Pose2d pose) {
122    // Reset state estimate and error covariance
123    m_odometry.resetPosition(gyroAngle, wheelPositions, pose);
124    m_odometryPoseBuffer.clear();
125    m_visionUpdates.clear();
126    m_poseEstimate = m_odometry.getPose();
127  }
128
129  /**
130   * Resets the robot's pose.
131   *
132   * @param pose The pose to reset to.
133   */
134  public void resetPose(Pose2d pose) {
135    m_odometry.resetPose(pose);
136    m_odometryPoseBuffer.clear();
137    m_visionUpdates.clear();
138    m_poseEstimate = m_odometry.getPose();
139  }
140
141  /**
142   * Resets the robot's translation.
143   *
144   * @param translation The pose to translation to.
145   */
146  public void resetTranslation(Translation2d translation) {
147    m_odometry.resetTranslation(translation);
148    m_odometryPoseBuffer.clear();
149    m_visionUpdates.clear();
150    m_poseEstimate = m_odometry.getPose();
151  }
152
153  /**
154   * Resets the robot's rotation.
155   *
156   * @param rotation The rotation to reset to.
157   */
158  public void resetRotation(Rotation2d rotation) {
159    m_odometry.resetRotation(rotation);
160    m_odometryPoseBuffer.clear();
161    m_visionUpdates.clear();
162    m_poseEstimate = m_odometry.getPose();
163  }
164
165  /**
166   * Gets the estimated robot pose.
167   *
168   * @return The estimated robot pose in meters.
169   */
170  public Pose2d getEstimatedPosition() {
171    return m_poseEstimate;
172  }
173
174  /**
175   * Return the pose at a given timestamp, if the buffer is not empty.
176   *
177   * @param timestamp The pose's timestamp in seconds.
178   * @return The pose at the given timestamp (or Optional.empty() if the buffer is empty).
179   */
180  public Optional<Pose2d> sampleAt(double timestamp) {
181    // Step 0: If there are no odometry updates to sample, skip.
182    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()) {
183      return Optional.empty();
184    }
185
186    // Step 1: Make sure timestamp matches the sample from the odometry pose buffer. (When sampling,
187    // the buffer will always use a timestamp between the first and last timestamps)
188    double oldestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().firstKey();
189    double newestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().lastKey();
190    timestamp = MathUtil.clamp(timestamp, oldestOdometryTimestamp, newestOdometryTimestamp);
191
192    // Step 2: If there are no applicable vision updates, use the odometry-only information.
193    if (m_visionUpdates.isEmpty() || timestamp < m_visionUpdates.firstKey()) {
194      return m_odometryPoseBuffer.getSample(timestamp);
195    }
196
197    // Step 3: Get the latest vision update from before or at the timestamp to sample at.
198    double floorTimestamp = m_visionUpdates.floorKey(timestamp);
199    var visionUpdate = m_visionUpdates.get(floorTimestamp);
200
201    // Step 4: Get the pose measured by odometry at the time of the sample.
202    var odometryEstimate = m_odometryPoseBuffer.getSample(timestamp);
203
204    // Step 5: Apply the vision compensation to the odometry pose.
205    return odometryEstimate.map(odometryPose -> visionUpdate.compensate(odometryPose));
206  }
207
208  /** Removes stale vision updates that won't affect sampling. */
209  private void cleanUpVisionUpdates() {
210    // Step 0: If there are no odometry samples, skip.
211    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()) {
212      return;
213    }
214
215    // Step 1: Find the oldest timestamp that needs a vision update.
216    double oldestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().firstKey();
217
218    // Step 2: If there are no vision updates before that timestamp, skip.
219    if (m_visionUpdates.isEmpty() || oldestOdometryTimestamp < m_visionUpdates.firstKey()) {
220      return;
221    }
222
223    // Step 3: Find the newest vision update timestamp before or at the oldest timestamp.
224    double newestNeededVisionUpdateTimestamp = m_visionUpdates.floorKey(oldestOdometryTimestamp);
225
226    // Step 4: Remove all entries strictly before the newest timestamp we need.
227    m_visionUpdates.headMap(newestNeededVisionUpdateTimestamp, false).clear();
228  }
229
230  /**
231   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
232   * while still accounting for measurement noise.
233   *
234   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
235   * PoseEstimator#update} every loop.
236   *
237   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
238   * recommend only adding vision measurements that are already within one meter or so of the
239   * current pose estimate.
240   *
241   * @param visionRobotPose The pose of the robot as measured by the vision camera.
242   * @param timestamp The timestamp of the vision measurement in seconds. Note that if you don't use
243   *     your own time source by calling {@link
244   *     PoseEstimator#updateWithTime(double,Rotation2d,Object)} then you must use a timestamp with
245   *     an epoch since FPGA startup (i.e., the epoch of this timestamp is the same epoch as {@link
246   *     edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}.) This means that you should use {@link
247   *     edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source or sync the epochs.
248   */
249  public void addVisionMeasurement(Pose2d visionRobotPose, double timestamp) {
250    // Step 0: If this measurement is old enough to be outside the pose buffer's timespan, skip.
251    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()
252        || m_odometryPoseBuffer.getInternalBuffer().lastKey() - kBufferDuration > timestamp) {
253      return;
254    }
255
256    // Step 1: Clean up any old entries
257    cleanUpVisionUpdates();
258
259    // Step 2: Get the pose measured by odometry at the moment the vision measurement was made.
260    var odometrySample = m_odometryPoseBuffer.getSample(timestamp);
261
262    if (odometrySample.isEmpty()) {
263      return;
264    }
265
266    // Step 3: Get the vision-compensated pose estimate at the moment the vision measurement was
267    // made.
268    var visionSample = sampleAt(timestamp);
269
270    if (visionSample.isEmpty()) {
271      return;
272    }
273
274    // Step 4: Measure the twist between the old pose estimate and the vision pose.
275    var twist = visionSample.get().log(visionRobotPose);
276
277    // Step 5: We should not trust the twist entirely, so instead we scale this twist by a Kalman
278    // gain matrix representing how much we trust vision measurements compared to our current pose.
279    var k_times_twist = m_visionK.times(VecBuilder.fill(twist.dx, twist.dy, twist.dtheta));
280
281    // Step 6: Convert back to Twist2d.
282    var scaledTwist =
283        new Twist2d(k_times_twist.get(0, 0), k_times_twist.get(1, 0), k_times_twist.get(2, 0));
284
285    // Step 7: Calculate and record the vision update.
286    var visionUpdate = new VisionUpdate(visionSample.get().exp(scaledTwist), odometrySample.get());
287    m_visionUpdates.put(timestamp, visionUpdate);
288
289    // Step 8: Remove later vision measurements. (Matches previous behavior)
290    m_visionUpdates.tailMap(timestamp, false).entrySet().clear();
291
292    // Step 9: Update latest pose estimate. Since we cleared all updates after this vision update,
293    // it's guaranteed to be the latest vision update.
294    m_poseEstimate = visionUpdate.compensate(m_odometry.getPose());
295  }
296
297  /**
298   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
299   * while still accounting for measurement noise.
300   *
301   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
302   * PoseEstimator#update} every loop.
303   *
304   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
305   * recommend only adding vision measurements that are already within one meter or so of the
306   * current pose estimate.
307   *
308   * <p>Note that the vision measurement standard deviations passed into this method will continue
309   * to apply to future measurements until a subsequent call to {@link
310   * PoseEstimator#setVisionMeasurementStdDevs(Matrix)} or this method.
311   *
312   * @param visionRobotPose The pose of the robot as measured by the vision camera.
313   * @param timestamp The timestamp of the vision measurement in seconds. Note that if you don't use
314   *     your own time source by calling {@link #updateWithTime}, then you must use a timestamp with
315   *     an epoch since FPGA startup (i.e., the epoch of this timestamp is the same epoch as {@link
316   *     edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}). This means that you should use {@link
317   *     edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source in this case.
318   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
319   *     in meters, y position in meters, and heading in radians). Increase these numbers to trust
320   *     the vision pose measurement less.
321   */
322  public void addVisionMeasurement(
323      Pose2d visionRobotPose, double timestamp, Matrix<N3, N1> visionMeasurementStdDevs) {
324    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
325    addVisionMeasurement(visionRobotPose, timestamp);
326  }
327
328  /**
329   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
330   * loop.
331   *
332   * @param gyroAngle The current gyro angle.
333   * @param wheelPositions The current encoder readings.
334   * @return The estimated pose of the robot in meters.
335   */
336  public Pose2d update(Rotation2d gyroAngle, T wheelPositions) {
337    return updateWithTime(MathSharedStore.getTimestamp(), gyroAngle, wheelPositions);
338  }
339
340  /**
341   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
342   * loop.
343   *
344   * @param currentTime Time at which this method was called, in seconds.
345   * @param gyroAngle The current gyro angle.
346   * @param wheelPositions The current encoder readings.
347   * @return The estimated pose of the robot in meters.
348   */
349  public Pose2d updateWithTime(double currentTime, Rotation2d gyroAngle, T wheelPositions) {
350    var odometryEstimate = m_odometry.update(gyroAngle, wheelPositions);
351
352    m_odometryPoseBuffer.addSample(currentTime, odometryEstimate);
353
354    if (m_visionUpdates.isEmpty()) {
355      m_poseEstimate = odometryEstimate;
356    } else {
357      var visionUpdate = m_visionUpdates.get(m_visionUpdates.lastKey());
358      m_poseEstimate = visionUpdate.compensate(odometryEstimate);
359    }
360
361    return getEstimatedPosition();
362  }
363
364  /**
365   * Represents a vision update record. The record contains the vision-compensated pose estimate as
366   * well as the corresponding odometry pose estimate.
367   */
368  private static final class VisionUpdate {
369    // The vision-compensated pose estimate.
370    private final Pose2d visionPose;
371
372    // The pose estimated based solely on odometry.
373    private final Pose2d odometryPose;
374
375    /**
376     * Constructs a vision update record with the specified parameters.
377     *
378     * @param visionPose The vision-compensated pose estimate.
379     * @param odometryPose The pose estimate based solely on odometry.
380     */
381    private VisionUpdate(Pose2d visionPose, Pose2d odometryPose) {
382      this.visionPose = visionPose;
383      this.odometryPose = odometryPose;
384    }
385
386    /**
387     * Returns the vision-compensated version of the pose. Specifically, changes the pose from being
388     * relative to this record's odometry pose to being relative to this record's vision pose.
389     *
390     * @param pose The pose to compensate.
391     * @return The compensated pose.
392     */
393    public Pose2d compensate(Pose2d pose) {
394      var delta = pose.minus(this.odometryPose);
395      return this.visionPose.plus(delta);
396    }
397  }
398}