001// Copyright (c) FIRST and other WPILib contributors.
002// Open Source Software; you can modify and/or share it under the terms of
003// the WPILib BSD license file in the root directory of this project.
004
005package edu.wpi.first.math.estimator;
006
007import edu.wpi.first.math.MathSharedStore;
008import edu.wpi.first.math.Matrix;
009import edu.wpi.first.math.Nat;
010import edu.wpi.first.math.VecBuilder;
011import edu.wpi.first.math.geometry.Pose2d;
012import edu.wpi.first.math.geometry.Rotation2d;
013import edu.wpi.first.math.geometry.Translation2d;
014import edu.wpi.first.math.geometry.Twist2d;
015import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer;
016import edu.wpi.first.math.kinematics.Kinematics;
017import edu.wpi.first.math.kinematics.Odometry;
018import edu.wpi.first.math.numbers.N1;
019import edu.wpi.first.math.numbers.N3;
020import java.util.NavigableMap;
021import java.util.Optional;
022import java.util.TreeMap;
023
024/**
025 * This class wraps {@link Odometry} to fuse latency-compensated vision measurements with encoder
026 * measurements. Robot code should not use this directly- Instead, use the particular type for your
027 * drivetrain (e.g., {@link DifferentialDrivePoseEstimator}). It is intended to be a drop-in
028 * replacement for {@link Odometry}; in fact, if you never call {@link
029 * PoseEstimator#addVisionMeasurement} and only call {@link PoseEstimator#update} then this will
030 * behave exactly the same as Odometry.
031 *
032 * <p>{@link PoseEstimator#update} should be called every robot loop.
033 *
034 * <p>{@link PoseEstimator#addVisionMeasurement} can be called as infrequently as you want; if you
035 * never call it then this class will behave exactly like regular encoder odometry.
036 *
037 * @param <T> Wheel positions type.
038 */
039public class PoseEstimator<T> {
040  private final Odometry<T> m_odometry;
041  private final Matrix<N3, N1> m_q = new Matrix<>(Nat.N3(), Nat.N1());
042  private final Matrix<N3, N3> m_visionK = new Matrix<>(Nat.N3(), Nat.N3());
043
044  private static final double kBufferDuration = 1.5;
045  // Maps timestamps to odometry-only pose estimates
046  private final TimeInterpolatableBuffer<Pose2d> m_odometryPoseBuffer =
047      TimeInterpolatableBuffer.createBuffer(kBufferDuration);
048  // Maps timestamps to vision updates
049  // Always contains one entry before the oldest entry in m_odometryPoseBuffer, unless there have
050  // been no vision measurements after the last reset
051  private final NavigableMap<Double, VisionUpdate> m_visionUpdates = new TreeMap<>();
052
053  private Pose2d m_poseEstimate;
054
055  /**
056   * Constructs a PoseEstimator.
057   *
058   * @param kinematics A correctly-configured kinematics object for your drivetrain.
059   * @param odometry A correctly-configured odometry object for your drivetrain.
060   * @param stateStdDevs Standard deviations of the pose estimate (x position in meters, y position
061   *     in meters, and heading in radians). Increase these numbers to trust your state estimate
062   *     less.
063   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
064   *     in meters, y position in meters, and heading in radians). Increase these numbers to trust
065   *     the vision pose measurement less.
066   */
067  @SuppressWarnings("PMD.UnusedFormalParameter")
068  public PoseEstimator(
069      Kinematics<?, T> kinematics,
070      Odometry<T> odometry,
071      Matrix<N3, N1> stateStdDevs,
072      Matrix<N3, N1> visionMeasurementStdDevs) {
073    m_odometry = odometry;
074
075    m_poseEstimate = m_odometry.getPose();
076
077    for (int i = 0; i < 3; ++i) {
078      m_q.set(i, 0, stateStdDevs.get(i, 0) * stateStdDevs.get(i, 0));
079    }
080    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
081  }
082
083  /**
084   * Sets the pose estimator's trust of global measurements. This might be used to change trust in
085   * vision measurements after the autonomous period, or to change trust as distance to a vision
086   * target increases.
087   *
088   * @param visionMeasurementStdDevs Standard deviations of the vision measurements. Increase these
089   *     numbers to trust global measurements from vision less. This matrix is in the form [x, y,
090   *     theta]ᵀ, with units in meters and radians.
091   */
092  public final void setVisionMeasurementStdDevs(Matrix<N3, N1> visionMeasurementStdDevs) {
093    var r = new double[3];
094    for (int i = 0; i < 3; ++i) {
095      r[i] = visionMeasurementStdDevs.get(i, 0) * visionMeasurementStdDevs.get(i, 0);
096    }
097
098    // Solve for closed form Kalman gain for continuous Kalman filter with A = 0
099    // and C = I. See wpimath/algorithms.md.
100    for (int row = 0; row < 3; ++row) {
101      if (m_q.get(row, 0) == 0.0) {
102        m_visionK.set(row, row, 0.0);
103      } else {
104        m_visionK.set(
105            row, row, m_q.get(row, 0) / (m_q.get(row, 0) + Math.sqrt(m_q.get(row, 0) * r[row])));
106      }
107    }
108  }
109
110  /**
111   * Resets the robot's position on the field.
112   *
113   * <p>The gyroscope angle does not need to be reset here on the user's robot code. The library
114   * automatically takes care of offsetting the gyro angle.
115   *
116   * @param gyroAngle The angle reported by the gyroscope.
117   * @param wheelPositions The current encoder readings.
118   * @param pose The position on the field that your robot is at.
119   */
120  public void resetPosition(Rotation2d gyroAngle, T wheelPositions, Pose2d pose) {
121    // Reset state estimate and error covariance
122    m_odometry.resetPosition(gyroAngle, wheelPositions, pose);
123    m_odometryPoseBuffer.clear();
124    m_visionUpdates.clear();
125    m_poseEstimate = m_odometry.getPose();
126  }
127
128  /**
129   * Resets the robot's pose.
130   *
131   * @param pose The pose to reset to.
132   */
133  public void resetPose(Pose2d pose) {
134    m_odometry.resetPose(pose);
135    m_odometryPoseBuffer.clear();
136    m_visionUpdates.clear();
137    m_poseEstimate = m_odometry.getPose();
138  }
139
140  /**
141   * Resets the robot's translation.
142   *
143   * @param translation The pose to translation to.
144   */
145  public void resetTranslation(Translation2d translation) {
146    m_odometry.resetTranslation(translation);
147    m_odometryPoseBuffer.clear();
148    m_visionUpdates.clear();
149    m_poseEstimate = m_odometry.getPose();
150  }
151
152  /**
153   * Resets the robot's rotation.
154   *
155   * @param rotation The rotation to reset to.
156   */
157  public void resetRotation(Rotation2d rotation) {
158    m_odometry.resetRotation(rotation);
159    m_odometryPoseBuffer.clear();
160    m_visionUpdates.clear();
161    m_poseEstimate = m_odometry.getPose();
162  }
163
164  /**
165   * Gets the estimated robot pose.
166   *
167   * @return The estimated robot pose in meters.
168   */
169  public Pose2d getEstimatedPosition() {
170    return m_poseEstimate;
171  }
172
173  /**
174   * Return the pose at a given timestamp, if the buffer is not empty.
175   *
176   * @param timestamp The pose's timestamp in seconds.
177   * @return The pose at the given timestamp (or Optional.empty() if the buffer is empty).
178   */
179  public Optional<Pose2d> sampleAt(double timestamp) {
180    // Step 0: If there are no odometry updates to sample, skip.
181    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()) {
182      return Optional.empty();
183    }
184
185    // Step 1: Make sure timestamp matches the sample from the odometry pose buffer. (When sampling,
186    // the buffer will always use a timestamp between the first and last timestamps)
187    double oldestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().firstKey();
188    double newestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().lastKey();
189    timestamp = Math.clamp(timestamp, oldestOdometryTimestamp, newestOdometryTimestamp);
190
191    // Step 2: If there are no applicable vision updates, use the odometry-only information.
192    if (m_visionUpdates.isEmpty() || timestamp < m_visionUpdates.firstKey()) {
193      return m_odometryPoseBuffer.getSample(timestamp);
194    }
195
196    // Step 3: Get the latest vision update from before or at the timestamp to sample at.
197    double floorTimestamp = m_visionUpdates.floorKey(timestamp);
198    var visionUpdate = m_visionUpdates.get(floorTimestamp);
199
200    // Step 4: Get the pose measured by odometry at the time of the sample.
201    var odometryEstimate = m_odometryPoseBuffer.getSample(timestamp);
202
203    // Step 5: Apply the vision compensation to the odometry pose.
204    return odometryEstimate.map(odometryPose -> visionUpdate.compensate(odometryPose));
205  }
206
207  /** Removes stale vision updates that won't affect sampling. */
208  private void cleanUpVisionUpdates() {
209    // Step 0: If there are no odometry samples, skip.
210    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()) {
211      return;
212    }
213
214    // Step 1: Find the oldest timestamp that needs a vision update.
215    double oldestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().firstKey();
216
217    // Step 2: If there are no vision updates before that timestamp, skip.
218    if (m_visionUpdates.isEmpty() || oldestOdometryTimestamp < m_visionUpdates.firstKey()) {
219      return;
220    }
221
222    // Step 3: Find the newest vision update timestamp before or at the oldest timestamp.
223    double newestNeededVisionUpdateTimestamp = m_visionUpdates.floorKey(oldestOdometryTimestamp);
224
225    // Step 4: Remove all entries strictly before the newest timestamp we need.
226    m_visionUpdates.headMap(newestNeededVisionUpdateTimestamp, false).clear();
227  }
228
229  /**
230   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
231   * while still accounting for measurement noise.
232   *
233   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
234   * PoseEstimator#update} every loop.
235   *
236   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
237   * recommend only adding vision measurements that are already within one meter or so of the
238   * current pose estimate.
239   *
240   * @param visionRobotPose The pose of the robot as measured by the vision camera.
241   * @param timestamp The timestamp of the vision measurement in seconds. Note that if you don't use
242   *     your own time source by calling {@link
243   *     PoseEstimator#updateWithTime(double,Rotation2d,Object)} then you must use a timestamp with
244   *     an epoch since FPGA startup (i.e., the epoch of this timestamp is the same epoch as {@link
245   *     edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}.) This means that you should use {@link
246   *     edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source or sync the epochs.
247   */
248  public void addVisionMeasurement(Pose2d visionRobotPose, double timestamp) {
249    // Step 0: If this measurement is old enough to be outside the pose buffer's timespan, skip.
250    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()
251        || m_odometryPoseBuffer.getInternalBuffer().lastKey() - kBufferDuration > timestamp) {
252      return;
253    }
254
255    // Step 1: Clean up any old entries
256    cleanUpVisionUpdates();
257
258    // Step 2: Get the pose measured by odometry at the moment the vision measurement was made.
259    var odometrySample = m_odometryPoseBuffer.getSample(timestamp);
260
261    if (odometrySample.isEmpty()) {
262      return;
263    }
264
265    // Step 3: Get the vision-compensated pose estimate at the moment the vision measurement was
266    // made.
267    var visionSample = sampleAt(timestamp);
268
269    if (visionSample.isEmpty()) {
270      return;
271    }
272
273    // Step 4: Measure the twist between the old pose estimate and the vision pose.
274    var twist = visionRobotPose.minus(visionSample.get()).log();
275
276    // Step 5: We should not trust the twist entirely, so instead we scale this twist by a Kalman
277    // gain matrix representing how much we trust vision measurements compared to our current pose.
278    var k_times_twist = m_visionK.times(VecBuilder.fill(twist.dx, twist.dy, twist.dtheta));
279
280    // Step 6: Convert back to Twist2d.
281    var scaledTwist =
282        new Twist2d(k_times_twist.get(0, 0), k_times_twist.get(1, 0), k_times_twist.get(2, 0));
283
284    // Step 7: Calculate and record the vision update.
285    var visionUpdate =
286        new VisionUpdate(visionSample.get().plus(scaledTwist.exp()), odometrySample.get());
287    m_visionUpdates.put(timestamp, visionUpdate);
288
289    // Step 8: Remove later vision measurements. (Matches previous behavior)
290    m_visionUpdates.tailMap(timestamp, false).entrySet().clear();
291
292    // Step 9: Update latest pose estimate. Since we cleared all updates after this vision update,
293    // it's guaranteed to be the latest vision update.
294    m_poseEstimate = visionUpdate.compensate(m_odometry.getPose());
295  }
296
297  /**
298   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
299   * while still accounting for measurement noise.
300   *
301   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
302   * PoseEstimator#update} every loop.
303   *
304   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
305   * recommend only adding vision measurements that are already within one meter or so of the
306   * current pose estimate.
307   *
308   * <p>Note that the vision measurement standard deviations passed into this method will continue
309   * to apply to future measurements until a subsequent call to {@link
310   * PoseEstimator#setVisionMeasurementStdDevs(Matrix)} or this method.
311   *
312   * @param visionRobotPose The pose of the robot as measured by the vision camera.
313   * @param timestamp The timestamp of the vision measurement in seconds. Note that if you don't use
314   *     your own time source by calling {@link #updateWithTime}, then you must use a timestamp with
315   *     an epoch since FPGA startup (i.e., the epoch of this timestamp is the same epoch as {@link
316   *     edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}). This means that you should use {@link
317   *     edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source in this case.
318   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
319   *     in meters, y position in meters, and heading in radians). Increase these numbers to trust
320   *     the vision pose measurement less.
321   */
322  public void addVisionMeasurement(
323      Pose2d visionRobotPose, double timestamp, Matrix<N3, N1> visionMeasurementStdDevs) {
324    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
325    addVisionMeasurement(visionRobotPose, timestamp);
326  }
327
328  /**
329   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
330   * loop.
331   *
332   * @param gyroAngle The current gyro angle.
333   * @param wheelPositions The current encoder readings.
334   * @return The estimated pose of the robot in meters.
335   */
336  public Pose2d update(Rotation2d gyroAngle, T wheelPositions) {
337    return updateWithTime(MathSharedStore.getTimestamp(), gyroAngle, wheelPositions);
338  }
339
340  /**
341   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
342   * loop.
343   *
344   * @param currentTime Time at which this method was called, in seconds.
345   * @param gyroAngle The current gyro angle.
346   * @param wheelPositions The current encoder readings.
347   * @return The estimated pose of the robot in meters.
348   */
349  public Pose2d updateWithTime(double currentTime, Rotation2d gyroAngle, T wheelPositions) {
350    var odometryEstimate = m_odometry.update(gyroAngle, wheelPositions);
351
352    m_odometryPoseBuffer.addSample(currentTime, odometryEstimate);
353
354    if (m_visionUpdates.isEmpty()) {
355      m_poseEstimate = odometryEstimate;
356    } else {
357      var visionUpdate = m_visionUpdates.get(m_visionUpdates.lastKey());
358      m_poseEstimate = visionUpdate.compensate(odometryEstimate);
359    }
360
361    return getEstimatedPosition();
362  }
363
364  /**
365   * Represents a vision update record. The record contains the vision-compensated pose estimate as
366   * well as the corresponding odometry pose estimate.
367   */
368  private static final class VisionUpdate {
369    // The vision-compensated pose estimate.
370    private final Pose2d visionPose;
371
372    // The pose estimated based solely on odometry.
373    private final Pose2d odometryPose;
374
375    /**
376     * Constructs a vision update record with the specified parameters.
377     *
378     * @param visionPose The vision-compensated pose estimate.
379     * @param odometryPose The pose estimate based solely on odometry.
380     */
381    private VisionUpdate(Pose2d visionPose, Pose2d odometryPose) {
382      this.visionPose = visionPose;
383      this.odometryPose = odometryPose;
384    }
385
386    /**
387     * Returns the vision-compensated version of the pose. Specifically, changes the pose from being
388     * relative to this record's odometry pose to being relative to this record's vision pose.
389     *
390     * @param pose The pose to compensate.
391     * @return The compensated pose.
392     */
393    public Pose2d compensate(Pose2d pose) {
394      var delta = pose.minus(this.odometryPose);
395      return this.visionPose.plus(delta);
396    }
397  }
398}