001// Copyright (c) FIRST and other WPILib contributors.
002// Open Source Software; you can modify and/or share it under the terms of
003// the WPILib BSD license file in the root directory of this project.
004
005package edu.wpi.first.math.estimator;
006
007import edu.wpi.first.math.MathSharedStore;
008import edu.wpi.first.math.MathUtil;
009import edu.wpi.first.math.Matrix;
010import edu.wpi.first.math.Nat;
011import edu.wpi.first.math.VecBuilder;
012import edu.wpi.first.math.geometry.Pose2d;
013import edu.wpi.first.math.geometry.Pose3d;
014import edu.wpi.first.math.geometry.Rotation2d;
015import edu.wpi.first.math.geometry.Rotation3d;
016import edu.wpi.first.math.geometry.Translation2d;
017import edu.wpi.first.math.geometry.Translation3d;
018import edu.wpi.first.math.geometry.Twist3d;
019import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer;
020import edu.wpi.first.math.kinematics.Kinematics;
021import edu.wpi.first.math.kinematics.Odometry3d;
022import edu.wpi.first.math.numbers.N1;
023import edu.wpi.first.math.numbers.N4;
024import edu.wpi.first.math.numbers.N6;
025import java.util.NavigableMap;
026import java.util.Optional;
027import java.util.TreeMap;
028
029/**
030 * This class wraps {@link Odometry3d} to fuse latency-compensated vision measurements with encoder
031 * measurements. Robot code should not use this directly- Instead, use the particular type for your
032 * drivetrain (e.g., {@link DifferentialDrivePoseEstimator3d}). It is intended to be a drop-in
033 * replacement for {@link Odometry3d}; in fact, if you never call {@link
034 * PoseEstimator3d#addVisionMeasurement} and only call {@link PoseEstimator3d#update} then this will
035 * behave exactly the same as Odometry3d. It is also intended to be an easy replacement for {@link
036 * PoseEstimator}, only requiring the addition of a standard deviation for Z and appropriate
037 * conversions between 2D and 3D versions of geometry classes. (See {@link Pose3d#Pose3d(Pose2d)},
038 * {@link Rotation3d#Rotation3d(Rotation2d)}, {@link Translation3d#Translation3d(Translation2d)},
039 * and {@link Pose3d#toPose2d()}.)
040 *
041 * <p>{@link PoseEstimator3d#update} should be called every robot loop.
042 *
043 * <p>{@link PoseEstimator3d#addVisionMeasurement} can be called as infrequently as you want; if you
044 * never call it then this class will behave exactly like regular encoder odometry.
045 *
046 * @param <T> Wheel positions type.
047 */
048public class PoseEstimator3d<T> {
049  private final Odometry3d<T> m_odometry;
050  private final Matrix<N4, N1> m_q = new Matrix<>(Nat.N4(), Nat.N1());
051  private final Matrix<N6, N6> m_visionK = new Matrix<>(Nat.N6(), Nat.N6());
052
053  private static final double kBufferDuration = 1.5;
054  // Maps timestamps to odometry-only pose estimates
055  private final TimeInterpolatableBuffer<Pose3d> m_odometryPoseBuffer =
056      TimeInterpolatableBuffer.createBuffer(kBufferDuration);
057  // Maps timestamps to vision updates
058  // Always contains one entry before the oldest entry in m_odometryPoseBuffer, unless there have
059  // been no vision measurements after the last reset
060  private final NavigableMap<Double, VisionUpdate> m_visionUpdates = new TreeMap<>();
061
062  private Pose3d m_poseEstimate;
063
064  /**
065   * Constructs a PoseEstimator3d.
066   *
067   * @param kinematics A correctly-configured kinematics object for your drivetrain.
068   * @param odometry A correctly-configured odometry object for your drivetrain.
069   * @param stateStdDevs Standard deviations of the pose estimate (x position in meters, y position
070   *     in meters, z position in meters, and angle in radians). Increase these numbers to trust
071   *     your state estimate less.
072   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
073   *     in meters, y position in meters, z position in meters, and angle in radians). Increase
074   *     these numbers to trust the vision pose measurement less.
075   */
076  @SuppressWarnings("PMD.UnusedFormalParameter")
077  public PoseEstimator3d(
078      Kinematics<?, T> kinematics,
079      Odometry3d<T> odometry,
080      Matrix<N4, N1> stateStdDevs,
081      Matrix<N4, N1> visionMeasurementStdDevs) {
082    m_odometry = odometry;
083
084    m_poseEstimate = m_odometry.getPoseMeters();
085
086    for (int i = 0; i < 4; ++i) {
087      m_q.set(i, 0, stateStdDevs.get(i, 0) * stateStdDevs.get(i, 0));
088    }
089    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
090  }
091
092  /**
093   * Sets the pose estimator's trust of global measurements. This might be used to change trust in
094   * vision measurements after the autonomous period, or to change trust as distance to a vision
095   * target increases.
096   *
097   * @param visionMeasurementStdDevs Standard deviations of the vision measurements. Increase these
098   *     numbers to trust global measurements from vision less. This matrix is in the form [x, y, z,
099   *     theta]ᵀ, with units in meters and radians.
100   */
101  public final void setVisionMeasurementStdDevs(Matrix<N4, N1> visionMeasurementStdDevs) {
102    var r = new double[4];
103    for (int i = 0; i < 4; ++i) {
104      r[i] = visionMeasurementStdDevs.get(i, 0) * visionMeasurementStdDevs.get(i, 0);
105    }
106
107    // Solve for closed form Kalman gain for continuous Kalman filter with A = 0
108    // and C = I. See wpimath/algorithms.md.
109    for (int row = 0; row < 4; ++row) {
110      if (m_q.get(row, 0) == 0.0) {
111        m_visionK.set(row, row, 0.0);
112      } else {
113        m_visionK.set(
114            row, row, m_q.get(row, 0) / (m_q.get(row, 0) + Math.sqrt(m_q.get(row, 0) * r[row])));
115      }
116    }
117    // Fill in the gains for the other components of the rotation vector
118    double angle_gain = m_visionK.get(3, 3);
119    m_visionK.set(4, 4, angle_gain);
120    m_visionK.set(5, 5, angle_gain);
121  }
122
123  /**
124   * Resets the robot's position on the field.
125   *
126   * <p>The gyroscope angle does not need to be reset here on the user's robot code. The library
127   * automatically takes care of offsetting the gyro angle.
128   *
129   * @param gyroAngle The angle reported by the gyroscope.
130   * @param wheelPositions The current encoder readings.
131   * @param poseMeters The position on the field that your robot is at.
132   */
133  public void resetPosition(Rotation3d gyroAngle, T wheelPositions, Pose3d poseMeters) {
134    // Reset state estimate and error covariance
135    m_odometry.resetPosition(gyroAngle, wheelPositions, poseMeters);
136    m_odometryPoseBuffer.clear();
137    m_visionUpdates.clear();
138    m_poseEstimate = m_odometry.getPoseMeters();
139  }
140
141  /**
142   * Resets the robot's pose.
143   *
144   * @param pose The pose to reset to.
145   */
146  public void resetPose(Pose3d pose) {
147    m_odometry.resetPose(pose);
148    m_odometryPoseBuffer.clear();
149    m_visionUpdates.clear();
150    m_poseEstimate = m_odometry.getPoseMeters();
151  }
152
153  /**
154   * Resets the robot's translation.
155   *
156   * @param translation The pose to translation to.
157   */
158  public void resetTranslation(Translation3d translation) {
159    m_odometry.resetTranslation(translation);
160    m_odometryPoseBuffer.clear();
161    m_visionUpdates.clear();
162    m_poseEstimate = m_odometry.getPoseMeters();
163  }
164
165  /**
166   * Resets the robot's rotation.
167   *
168   * @param rotation The rotation to reset to.
169   */
170  public void resetRotation(Rotation3d rotation) {
171    m_odometry.resetRotation(rotation);
172    m_odometryPoseBuffer.clear();
173    m_visionUpdates.clear();
174    m_poseEstimate = m_odometry.getPoseMeters();
175  }
176
177  /**
178   * Gets the estimated robot pose.
179   *
180   * @return The estimated robot pose in meters.
181   */
182  public Pose3d getEstimatedPosition() {
183    return m_poseEstimate;
184  }
185
186  /**
187   * Return the pose at a given timestamp, if the buffer is not empty.
188   *
189   * @param timestampSeconds The pose's timestamp in seconds.
190   * @return The pose at the given timestamp (or Optional.empty() if the buffer is empty).
191   */
192  public Optional<Pose3d> sampleAt(double timestampSeconds) {
193    // Step 0: If there are no odometry updates to sample, skip.
194    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()) {
195      return Optional.empty();
196    }
197
198    // Step 1: Make sure timestamp matches the sample from the odometry pose buffer. (When sampling,
199    // the buffer will always use a timestamp between the first and last timestamps)
200    double oldestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().firstKey();
201    double newestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().lastKey();
202    timestampSeconds =
203        MathUtil.clamp(timestampSeconds, oldestOdometryTimestamp, newestOdometryTimestamp);
204
205    // Step 2: If there are no applicable vision updates, use the odometry-only information.
206    if (m_visionUpdates.isEmpty() || timestampSeconds < m_visionUpdates.firstKey()) {
207      return m_odometryPoseBuffer.getSample(timestampSeconds);
208    }
209
210    // Step 3: Get the latest vision update from before or at the timestamp to sample at.
211    double floorTimestamp = m_visionUpdates.floorKey(timestampSeconds);
212    var visionUpdate = m_visionUpdates.get(floorTimestamp);
213
214    // Step 4: Get the pose measured by odometry at the time of the sample.
215    var odometryEstimate = m_odometryPoseBuffer.getSample(timestampSeconds);
216
217    // Step 5: Apply the vision compensation to the odometry pose.
218    return odometryEstimate.map(odometryPose -> visionUpdate.compensate(odometryPose));
219  }
220
221  /** Removes stale vision updates that won't affect sampling. */
222  private void cleanUpVisionUpdates() {
223    // Step 0: If there are no odometry samples, skip.
224    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()) {
225      return;
226    }
227
228    // Step 1: Find the oldest timestamp that needs a vision update.
229    double oldestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().firstKey();
230
231    // Step 2: If there are no vision updates before that timestamp, skip.
232    if (m_visionUpdates.isEmpty() || oldestOdometryTimestamp < m_visionUpdates.firstKey()) {
233      return;
234    }
235
236    // Step 3: Find the newest vision update timestamp before or at the oldest timestamp.
237    double newestNeededVisionUpdateTimestamp = m_visionUpdates.floorKey(oldestOdometryTimestamp);
238
239    // Step 4: Remove all entries strictly before the newest timestamp we need.
240    m_visionUpdates.headMap(newestNeededVisionUpdateTimestamp, false).clear();
241  }
242
243  /**
244   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
245   * while still accounting for measurement noise.
246   *
247   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
248   * PoseEstimator3d#update} every loop.
249   *
250   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
251   * recommend only adding vision measurements that are already within one meter or so of the
252   * current pose estimate.
253   *
254   * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera.
255   * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you
256   *     don't use your own time source by calling {@link
257   *     PoseEstimator3d#updateWithTime(double,Rotation3d,Object)} then you must use a timestamp
258   *     with an epoch since FPGA startup (i.e., the epoch of this timestamp is the same epoch as
259   *     {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}.) This means that you should use
260   *     {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source or sync the
261   *     epochs.
262   */
263  public void addVisionMeasurement(Pose3d visionRobotPoseMeters, double timestampSeconds) {
264    // Step 0: If this measurement is old enough to be outside the pose buffer's timespan, skip.
265    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()
266        || m_odometryPoseBuffer.getInternalBuffer().lastKey() - kBufferDuration
267            > timestampSeconds) {
268      return;
269    }
270
271    // Step 1: Clean up any old entries
272    cleanUpVisionUpdates();
273
274    // Step 2: Get the pose measured by odometry at the moment the vision measurement was made.
275    var odometrySample = m_odometryPoseBuffer.getSample(timestampSeconds);
276
277    if (odometrySample.isEmpty()) {
278      return;
279    }
280
281    // Step 3: Get the vision-compensated pose estimate at the moment the vision measurement was
282    // made.
283    var visionSample = sampleAt(timestampSeconds);
284
285    if (visionSample.isEmpty()) {
286      return;
287    }
288
289    // Step 4: Measure the twist between the old pose estimate and the vision pose.
290    var twist = visionSample.get().log(visionRobotPoseMeters);
291
292    // Step 5: We should not trust the twist entirely, so instead we scale this twist by a Kalman
293    // gain matrix representing how much we trust vision measurements compared to our current pose.
294    var k_times_twist =
295        m_visionK.times(
296            VecBuilder.fill(twist.dx, twist.dy, twist.dz, twist.rx, twist.ry, twist.rz));
297
298    // Step 6: Convert back to Twist3d.
299    var scaledTwist =
300        new Twist3d(
301            k_times_twist.get(0, 0),
302            k_times_twist.get(1, 0),
303            k_times_twist.get(2, 0),
304            k_times_twist.get(3, 0),
305            k_times_twist.get(4, 0),
306            k_times_twist.get(5, 0));
307
308    // Step 7: Calculate and record the vision update.
309    var visionUpdate = new VisionUpdate(visionSample.get().exp(scaledTwist), odometrySample.get());
310    m_visionUpdates.put(timestampSeconds, visionUpdate);
311
312    // Step 8: Remove later vision measurements. (Matches previous behavior)
313    m_visionUpdates.tailMap(timestampSeconds, false).entrySet().clear();
314
315    // Step 9: Update latest pose estimate. Since we cleared all updates after this vision update,
316    // it's guaranteed to be the latest vision update.
317    m_poseEstimate = visionUpdate.compensate(m_odometry.getPoseMeters());
318  }
319
320  /**
321   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
322   * while still accounting for measurement noise.
323   *
324   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
325   * PoseEstimator3d#update} every loop.
326   *
327   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
328   * recommend only adding vision measurements that are already within one meter or so of the
329   * current pose estimate.
330   *
331   * <p>Note that the vision measurement standard deviations passed into this method will continue
332   * to apply to future measurements until a subsequent call to {@link
333   * PoseEstimator3d#setVisionMeasurementStdDevs(Matrix)} or this method.
334   *
335   * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera.
336   * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you
337   *     don't use your own time source by calling {@link #updateWithTime}, then you must use a
338   *     timestamp with an epoch since FPGA startup (i.e., the epoch of this timestamp is the same
339   *     epoch as {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}). This means that you
340   *     should use {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source in
341   *     this case.
342   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
343   *     in meters, y position in meters, z position in meters, and angle in radians). Increase
344   *     these numbers to trust the vision pose measurement less.
345   */
346  public void addVisionMeasurement(
347      Pose3d visionRobotPoseMeters,
348      double timestampSeconds,
349      Matrix<N4, N1> visionMeasurementStdDevs) {
350    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
351    addVisionMeasurement(visionRobotPoseMeters, timestampSeconds);
352  }
353
354  /**
355   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
356   * loop.
357   *
358   * @param gyroAngle The current gyro angle.
359   * @param wheelPositions The current encoder readings.
360   * @return The estimated pose of the robot in meters.
361   */
362  public Pose3d update(Rotation3d gyroAngle, T wheelPositions) {
363    return updateWithTime(MathSharedStore.getTimestamp(), gyroAngle, wheelPositions);
364  }
365
366  /**
367   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
368   * loop.
369   *
370   * @param currentTimeSeconds Time at which this method was called, in seconds.
371   * @param gyroAngle The current gyro angle.
372   * @param wheelPositions The current encoder readings.
373   * @return The estimated pose of the robot in meters.
374   */
375  public Pose3d updateWithTime(double currentTimeSeconds, Rotation3d gyroAngle, T wheelPositions) {
376    var odometryEstimate = m_odometry.update(gyroAngle, wheelPositions);
377
378    m_odometryPoseBuffer.addSample(currentTimeSeconds, odometryEstimate);
379
380    if (m_visionUpdates.isEmpty()) {
381      m_poseEstimate = odometryEstimate;
382    } else {
383      var visionUpdate = m_visionUpdates.get(m_visionUpdates.lastKey());
384      m_poseEstimate = visionUpdate.compensate(odometryEstimate);
385    }
386
387    return getEstimatedPosition();
388  }
389
390  /**
391   * Represents a vision update record. The record contains the vision-compensated pose estimate as
392   * well as the corresponding odometry pose estimate.
393   */
394  private static final class VisionUpdate {
395    // The vision-compensated pose estimate.
396    private final Pose3d visionPose;
397
398    // The pose estimated based solely on odometry.
399    private final Pose3d odometryPose;
400
401    /**
402     * Constructs a vision update record with the specified parameters.
403     *
404     * @param visionPose The vision-compensated pose estimate.
405     * @param odometryPose The pose estimate based solely on odometry.
406     */
407    private VisionUpdate(Pose3d visionPose, Pose3d odometryPose) {
408      this.visionPose = visionPose;
409      this.odometryPose = odometryPose;
410    }
411
412    /**
413     * Returns the vision-compensated version of the pose. Specifically, changes the pose from being
414     * relative to this record's odometry pose to being relative to this record's vision pose.
415     *
416     * @param pose The pose to compensate.
417     * @return The compensated pose.
418     */
419    public Pose3d compensate(Pose3d pose) {
420      var delta = pose.minus(this.odometryPose);
421      return this.visionPose.plus(delta);
422    }
423  }
424}