001// Copyright (c) FIRST and other WPILib contributors.
002// Open Source Software; you can modify and/or share it under the terms of
003// the WPILib BSD license file in the root directory of this project.
004
005package edu.wpi.first.math.estimator;
006
007import edu.wpi.first.math.MathSharedStore;
008import edu.wpi.first.math.Matrix;
009import edu.wpi.first.math.Nat;
010import edu.wpi.first.math.VecBuilder;
011import edu.wpi.first.math.geometry.Pose2d;
012import edu.wpi.first.math.geometry.Pose3d;
013import edu.wpi.first.math.geometry.Rotation2d;
014import edu.wpi.first.math.geometry.Rotation3d;
015import edu.wpi.first.math.geometry.Translation2d;
016import edu.wpi.first.math.geometry.Translation3d;
017import edu.wpi.first.math.geometry.Twist3d;
018import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer;
019import edu.wpi.first.math.kinematics.Kinematics;
020import edu.wpi.first.math.kinematics.Odometry3d;
021import edu.wpi.first.math.numbers.N1;
022import edu.wpi.first.math.numbers.N4;
023import edu.wpi.first.math.numbers.N6;
024import java.util.NavigableMap;
025import java.util.Optional;
026import java.util.TreeMap;
027
028/**
029 * This class wraps {@link Odometry3d} to fuse latency-compensated vision measurements with encoder
030 * measurements. Robot code should not use this directly- Instead, use the particular type for your
031 * drivetrain (e.g., {@link DifferentialDrivePoseEstimator3d}). It is intended to be a drop-in
032 * replacement for {@link Odometry3d}; in fact, if you never call {@link
033 * PoseEstimator3d#addVisionMeasurement} and only call {@link PoseEstimator3d#update} then this will
034 * behave exactly the same as Odometry3d. It is also intended to be an easy replacement for {@link
035 * PoseEstimator}, only requiring the addition of a standard deviation for Z and appropriate
036 * conversions between 2D and 3D versions of geometry classes. (See {@link Pose3d#Pose3d(Pose2d)},
037 * {@link Rotation3d#Rotation3d(Rotation2d)}, {@link Translation3d#Translation3d(Translation2d)},
038 * and {@link Pose3d#toPose2d()}.)
039 *
040 * <p>{@link PoseEstimator3d#update} should be called every robot loop.
041 *
042 * <p>{@link PoseEstimator3d#addVisionMeasurement} can be called as infrequently as you want; if you
043 * never call it then this class will behave exactly like regular encoder odometry.
044 *
045 * @param <T> Wheel positions type.
046 */
047public class PoseEstimator3d<T> {
048  private final Odometry3d<T> m_odometry;
049  private final Matrix<N4, N1> m_q = new Matrix<>(Nat.N4(), Nat.N1());
050  private final Matrix<N6, N6> m_visionK = new Matrix<>(Nat.N6(), Nat.N6());
051
052  private static final double kBufferDuration = 1.5;
053  // Maps timestamps to odometry-only pose estimates
054  private final TimeInterpolatableBuffer<Pose3d> m_odometryPoseBuffer =
055      TimeInterpolatableBuffer.createBuffer(kBufferDuration);
056  // Maps timestamps to vision updates
057  // Always contains one entry before the oldest entry in m_odometryPoseBuffer, unless there have
058  // been no vision measurements after the last reset
059  private final NavigableMap<Double, VisionUpdate> m_visionUpdates = new TreeMap<>();
060
061  private Pose3d m_poseEstimate;
062
063  /**
064   * Constructs a PoseEstimator3d.
065   *
066   * @param kinematics A correctly-configured kinematics object for your drivetrain.
067   * @param odometry A correctly-configured odometry object for your drivetrain.
068   * @param stateStdDevs Standard deviations of the pose estimate (x position in meters, y position
069   *     in meters, z position in meters, and angle in radians). Increase these numbers to trust
070   *     your state estimate less.
071   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
072   *     in meters, y position in meters, z position in meters, and angle in radians). Increase
073   *     these numbers to trust the vision pose measurement less.
074   */
075  @SuppressWarnings("PMD.UnusedFormalParameter")
076  public PoseEstimator3d(
077      Kinematics<?, T> kinematics,
078      Odometry3d<T> odometry,
079      Matrix<N4, N1> stateStdDevs,
080      Matrix<N4, N1> visionMeasurementStdDevs) {
081    m_odometry = odometry;
082
083    m_poseEstimate = m_odometry.getPose();
084
085    for (int i = 0; i < 4; ++i) {
086      m_q.set(i, 0, stateStdDevs.get(i, 0) * stateStdDevs.get(i, 0));
087    }
088    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
089  }
090
091  /**
092   * Sets the pose estimator's trust of global measurements. This might be used to change trust in
093   * vision measurements after the autonomous period, or to change trust as distance to a vision
094   * target increases.
095   *
096   * @param visionMeasurementStdDevs Standard deviations of the vision measurements. Increase these
097   *     numbers to trust global measurements from vision less. This matrix is in the form [x, y, z,
098   *     theta]ᵀ, with units in meters and radians.
099   */
100  public final void setVisionMeasurementStdDevs(Matrix<N4, N1> visionMeasurementStdDevs) {
101    var r = new double[4];
102    for (int i = 0; i < 4; ++i) {
103      r[i] = visionMeasurementStdDevs.get(i, 0) * visionMeasurementStdDevs.get(i, 0);
104    }
105
106    // Solve for closed form Kalman gain for continuous Kalman filter with A = 0
107    // and C = I. See wpimath/algorithms.md.
108    for (int row = 0; row < 4; ++row) {
109      if (m_q.get(row, 0) == 0.0) {
110        m_visionK.set(row, row, 0.0);
111      } else {
112        m_visionK.set(
113            row, row, m_q.get(row, 0) / (m_q.get(row, 0) + Math.sqrt(m_q.get(row, 0) * r[row])));
114      }
115    }
116    // Fill in the gains for the other components of the rotation vector
117    double angle_gain = m_visionK.get(3, 3);
118    m_visionK.set(4, 4, angle_gain);
119    m_visionK.set(5, 5, angle_gain);
120  }
121
122  /**
123   * Resets the robot's position on the field.
124   *
125   * <p>The gyroscope angle does not need to be reset here on the user's robot code. The library
126   * automatically takes care of offsetting the gyro angle.
127   *
128   * @param gyroAngle The angle reported by the gyroscope.
129   * @param wheelPositions The current encoder readings.
130   * @param pose The position on the field that your robot is at.
131   */
132  public void resetPosition(Rotation3d gyroAngle, T wheelPositions, Pose3d pose) {
133    // Reset state estimate and error covariance
134    m_odometry.resetPosition(gyroAngle, wheelPositions, pose);
135    m_odometryPoseBuffer.clear();
136    m_visionUpdates.clear();
137    m_poseEstimate = m_odometry.getPose();
138  }
139
140  /**
141   * Resets the robot's pose.
142   *
143   * @param pose The pose to reset to.
144   */
145  public void resetPose(Pose3d pose) {
146    m_odometry.resetPose(pose);
147    m_odometryPoseBuffer.clear();
148    m_visionUpdates.clear();
149    m_poseEstimate = m_odometry.getPose();
150  }
151
152  /**
153   * Resets the robot's translation.
154   *
155   * @param translation The pose to translation to.
156   */
157  public void resetTranslation(Translation3d translation) {
158    m_odometry.resetTranslation(translation);
159    m_odometryPoseBuffer.clear();
160    m_visionUpdates.clear();
161    m_poseEstimate = m_odometry.getPose();
162  }
163
164  /**
165   * Resets the robot's rotation.
166   *
167   * @param rotation The rotation to reset to.
168   */
169  public void resetRotation(Rotation3d rotation) {
170    m_odometry.resetRotation(rotation);
171    m_odometryPoseBuffer.clear();
172    m_visionUpdates.clear();
173    m_poseEstimate = m_odometry.getPose();
174  }
175
176  /**
177   * Gets the estimated robot pose.
178   *
179   * @return The estimated robot pose in meters.
180   */
181  public Pose3d getEstimatedPosition() {
182    return m_poseEstimate;
183  }
184
185  /**
186   * Return the pose at a given timestamp, if the buffer is not empty.
187   *
188   * @param timestamp The pose's timestamp in seconds.
189   * @return The pose at the given timestamp (or Optional.empty() if the buffer is empty).
190   */
191  public Optional<Pose3d> sampleAt(double timestamp) {
192    // Step 0: If there are no odometry updates to sample, skip.
193    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()) {
194      return Optional.empty();
195    }
196
197    // Step 1: Make sure timestamp matches the sample from the odometry pose buffer. (When sampling,
198    // the buffer will always use a timestamp between the first and last timestamps)
199    double oldestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().firstKey();
200    double newestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().lastKey();
201    timestamp = Math.clamp(timestamp, oldestOdometryTimestamp, newestOdometryTimestamp);
202
203    // Step 2: If there are no applicable vision updates, use the odometry-only information.
204    if (m_visionUpdates.isEmpty() || timestamp < m_visionUpdates.firstKey()) {
205      return m_odometryPoseBuffer.getSample(timestamp);
206    }
207
208    // Step 3: Get the latest vision update from before or at the timestamp to sample at.
209    double floorTimestamp = m_visionUpdates.floorKey(timestamp);
210    var visionUpdate = m_visionUpdates.get(floorTimestamp);
211
212    // Step 4: Get the pose measured by odometry at the time of the sample.
213    var odometryEstimate = m_odometryPoseBuffer.getSample(timestamp);
214
215    // Step 5: Apply the vision compensation to the odometry pose.
216    return odometryEstimate.map(odometryPose -> visionUpdate.compensate(odometryPose));
217  }
218
219  /** Removes stale vision updates that won't affect sampling. */
220  private void cleanUpVisionUpdates() {
221    // Step 0: If there are no odometry samples, skip.
222    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()) {
223      return;
224    }
225
226    // Step 1: Find the oldest timestamp that needs a vision update.
227    double oldestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().firstKey();
228
229    // Step 2: If there are no vision updates before that timestamp, skip.
230    if (m_visionUpdates.isEmpty() || oldestOdometryTimestamp < m_visionUpdates.firstKey()) {
231      return;
232    }
233
234    // Step 3: Find the newest vision update timestamp before or at the oldest timestamp.
235    double newestNeededVisionUpdateTimestamp = m_visionUpdates.floorKey(oldestOdometryTimestamp);
236
237    // Step 4: Remove all entries strictly before the newest timestamp we need.
238    m_visionUpdates.headMap(newestNeededVisionUpdateTimestamp, false).clear();
239  }
240
241  /**
242   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
243   * while still accounting for measurement noise.
244   *
245   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
246   * PoseEstimator3d#update} every loop.
247   *
248   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
249   * recommend only adding vision measurements that are already within one meter or so of the
250   * current pose estimate.
251   *
252   * @param visionRobotPose The pose of the robot as measured by the vision camera.
253   * @param timestamp The timestamp of the vision measurement in seconds. Note that if you don't use
254   *     your own time source by calling {@link
255   *     PoseEstimator3d#updateWithTime(double,Rotation3d,Object)} then you must use a timestamp
256   *     with an epoch since FPGA startup (i.e., the epoch of this timestamp is the same epoch as
257   *     {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}.) This means that you should use
258   *     {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source or sync the
259   *     epochs.
260   */
261  public void addVisionMeasurement(Pose3d visionRobotPose, double timestamp) {
262    // Step 0: If this measurement is old enough to be outside the pose buffer's timespan, skip.
263    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()
264        || m_odometryPoseBuffer.getInternalBuffer().lastKey() - kBufferDuration > timestamp) {
265      return;
266    }
267
268    // Step 1: Clean up any old entries
269    cleanUpVisionUpdates();
270
271    // Step 2: Get the pose measured by odometry at the moment the vision measurement was made.
272    var odometrySample = m_odometryPoseBuffer.getSample(timestamp);
273
274    if (odometrySample.isEmpty()) {
275      return;
276    }
277
278    // Step 3: Get the vision-compensated pose estimate at the moment the vision measurement was
279    // made.
280    var visionSample = sampleAt(timestamp);
281
282    if (visionSample.isEmpty()) {
283      return;
284    }
285
286    // Step 4: Measure the twist between the old pose estimate and the vision pose.
287    var twist = visionRobotPose.minus(visionSample.get()).log();
288
289    // Step 5: We should not trust the twist entirely, so instead we scale this twist by a Kalman
290    // gain matrix representing how much we trust vision measurements compared to our current pose.
291    var k_times_twist =
292        m_visionK.times(
293            VecBuilder.fill(twist.dx, twist.dy, twist.dz, twist.rx, twist.ry, twist.rz));
294
295    // Step 6: Convert back to Twist3d.
296    var scaledTwist =
297        new Twist3d(
298            k_times_twist.get(0, 0),
299            k_times_twist.get(1, 0),
300            k_times_twist.get(2, 0),
301            k_times_twist.get(3, 0),
302            k_times_twist.get(4, 0),
303            k_times_twist.get(5, 0));
304
305    // Step 7: Calculate and record the vision update.
306    var visionUpdate =
307        new VisionUpdate(visionSample.get().plus(scaledTwist.exp()), odometrySample.get());
308    m_visionUpdates.put(timestamp, visionUpdate);
309
310    // Step 8: Remove later vision measurements. (Matches previous behavior)
311    m_visionUpdates.tailMap(timestamp, false).entrySet().clear();
312
313    // Step 9: Update latest pose estimate. Since we cleared all updates after this vision update,
314    // it's guaranteed to be the latest vision update.
315    m_poseEstimate = visionUpdate.compensate(m_odometry.getPose());
316  }
317
318  /**
319   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
320   * while still accounting for measurement noise.
321   *
322   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
323   * PoseEstimator3d#update} every loop.
324   *
325   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
326   * recommend only adding vision measurements that are already within one meter or so of the
327   * current pose estimate.
328   *
329   * <p>Note that the vision measurement standard deviations passed into this method will continue
330   * to apply to future measurements until a subsequent call to {@link
331   * PoseEstimator3d#setVisionMeasurementStdDevs(Matrix)} or this method.
332   *
333   * @param visionRobotPose The pose of the robot as measured by the vision camera.
334   * @param timestamp The timestamp of the vision measurement in seconds. Note that if you don't use
335   *     your own time source by calling {@link #updateWithTime}, then you must use a timestamp with
336   *     an epoch since FPGA startup (i.e., the epoch of this timestamp is the same epoch as {@link
337   *     edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}). This means that you should use {@link
338   *     edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source in this case.
339   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
340   *     in meters, y position in meters, z position in meters, and angle in radians). Increase
341   *     these numbers to trust the vision pose measurement less.
342   */
343  public void addVisionMeasurement(
344      Pose3d visionRobotPose, double timestamp, Matrix<N4, N1> visionMeasurementStdDevs) {
345    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
346    addVisionMeasurement(visionRobotPose, timestamp);
347  }
348
349  /**
350   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
351   * loop.
352   *
353   * @param gyroAngle The current gyro angle.
354   * @param wheelPositions The current encoder readings.
355   * @return The estimated pose of the robot in meters.
356   */
357  public Pose3d update(Rotation3d gyroAngle, T wheelPositions) {
358    return updateWithTime(MathSharedStore.getTimestamp(), gyroAngle, wheelPositions);
359  }
360
361  /**
362   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
363   * loop.
364   *
365   * @param currentTime Time at which this method was called, in seconds.
366   * @param gyroAngle The current gyro angle.
367   * @param wheelPositions The current encoder readings.
368   * @return The estimated pose of the robot in meters.
369   */
370  public Pose3d updateWithTime(double currentTime, Rotation3d gyroAngle, T wheelPositions) {
371    var odometryEstimate = m_odometry.update(gyroAngle, wheelPositions);
372
373    m_odometryPoseBuffer.addSample(currentTime, odometryEstimate);
374
375    if (m_visionUpdates.isEmpty()) {
376      m_poseEstimate = odometryEstimate;
377    } else {
378      var visionUpdate = m_visionUpdates.get(m_visionUpdates.lastKey());
379      m_poseEstimate = visionUpdate.compensate(odometryEstimate);
380    }
381
382    return getEstimatedPosition();
383  }
384
385  /**
386   * Represents a vision update record. The record contains the vision-compensated pose estimate as
387   * well as the corresponding odometry pose estimate.
388   */
389  private static final class VisionUpdate {
390    // The vision-compensated pose estimate.
391    private final Pose3d visionPose;
392
393    // The pose estimated based solely on odometry.
394    private final Pose3d odometryPose;
395
396    /**
397     * Constructs a vision update record with the specified parameters.
398     *
399     * @param visionPose The vision-compensated pose estimate.
400     * @param odometryPose The pose estimate based solely on odometry.
401     */
402    private VisionUpdate(Pose3d visionPose, Pose3d odometryPose) {
403      this.visionPose = visionPose;
404      this.odometryPose = odometryPose;
405    }
406
407    /**
408     * Returns the vision-compensated version of the pose. Specifically, changes the pose from being
409     * relative to this record's odometry pose to being relative to this record's vision pose.
410     *
411     * @param pose The pose to compensate.
412     * @return The compensated pose.
413     */
414    public Pose3d compensate(Pose3d pose) {
415      var delta = pose.minus(this.odometryPose);
416      return this.visionPose.plus(delta);
417    }
418  }
419}