001// Copyright (c) FIRST and other WPILib contributors.
002// Open Source Software; you can modify and/or share it under the terms of
003// the WPILib BSD license file in the root directory of this project.
004
005package edu.wpi.first.math.estimator;
006
007import edu.wpi.first.math.MathSharedStore;
008import edu.wpi.first.math.MathUsageId;
009import edu.wpi.first.math.MathUtil;
010import edu.wpi.first.math.Matrix;
011import edu.wpi.first.math.geometry.Pose2d;
012import edu.wpi.first.math.geometry.Pose3d;
013import edu.wpi.first.math.geometry.Rotation2d;
014import edu.wpi.first.math.geometry.Rotation3d;
015import edu.wpi.first.math.geometry.Transform3d;
016import edu.wpi.first.math.geometry.Translation2d;
017import edu.wpi.first.math.geometry.Translation3d;
018import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer;
019import edu.wpi.first.math.kinematics.Kinematics;
020import edu.wpi.first.math.kinematics.Odometry3d;
021import edu.wpi.first.math.numbers.N1;
022import edu.wpi.first.math.numbers.N4;
023import java.util.NavigableMap;
024import java.util.Optional;
025import java.util.TreeMap;
026
027/**
028 * This class wraps {@link Odometry3d} to fuse latency-compensated vision measurements with encoder
029 * measurements. Robot code should not use this directly- Instead, use the particular type for your
030 * drivetrain (e.g., {@link DifferentialDrivePoseEstimator3d}). It is intended to be a drop-in
031 * replacement for {@link Odometry3d}; in fact, if you never call {@link
032 * PoseEstimator3d#addVisionMeasurement} and only call {@link PoseEstimator3d#update} then this will
033 * behave exactly the same as Odometry3d. It is also intended to be an easy replacement for {@link
034 * PoseEstimator}, only requiring the addition of a standard deviation for Z and appropriate
035 * conversions between 2D and 3D versions of geometry classes. (See {@link Pose3d#Pose3d(Pose2d)},
036 * {@link Rotation3d#Rotation3d(Rotation2d)}, {@link Translation3d#Translation3d(Translation2d)},
037 * and {@link Pose3d#toPose2d()}.)
038 *
039 * <p>{@link PoseEstimator3d#update} should be called every robot loop.
040 *
041 * <p>{@link PoseEstimator3d#addVisionMeasurement} can be called as infrequently as you want; if you
042 * never call it then this class will behave exactly like regular encoder odometry.
043 *
044 * @param <T> Wheel positions type.
045 */
046public class PoseEstimator3d<T> {
047  private final Odometry3d<T> m_odometry;
048
049  // Diagonal of process noise covariance matrix Q
050  private final double[] m_q = new double[] {0.0, 0.0, 0.0, 0.0};
051
052  // Diagonal of Kalman gain matrix K
053  private final double[] m_vision_k = new double[] {0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
054
055  private static final double kBufferDuration = 1.5;
056  // Maps timestamps to odometry-only pose estimates
057  private final TimeInterpolatableBuffer<Pose3d> m_odometryPoseBuffer =
058      TimeInterpolatableBuffer.createBuffer(kBufferDuration);
059  // Maps timestamps to vision updates
060  // Always contains one entry before the oldest entry in m_odometryPoseBuffer, unless there have
061  // been no vision measurements after the last reset. May contain one entry while
062  // m_odometryPoseBuffer is empty to correct for translation/rotation after a call to
063  // ResetRotation/ResetTranslation.
064  private final NavigableMap<Double, VisionUpdate> m_visionUpdates = new TreeMap<>();
065
066  private Pose3d m_poseEstimate;
067
068  /**
069   * Constructs a PoseEstimator3d.
070   *
071   * @param kinematics A correctly-configured kinematics object for your drivetrain.
072   * @param odometry A correctly-configured odometry object for your drivetrain.
073   * @param stateStdDevs Standard deviations of the pose estimate (x position in meters, y position
074   *     in meters, z position in meters, and angle in radians). Increase these numbers to trust
075   *     your state estimate less.
076   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
077   *     in meters, y position in meters, z position in meters, and angle in radians). Increase
078   *     these numbers to trust the vision pose measurement less.
079   */
080  @SuppressWarnings("PMD.UnusedFormalParameter")
081  public PoseEstimator3d(
082      Kinematics<?, T> kinematics,
083      Odometry3d<T> odometry,
084      Matrix<N4, N1> stateStdDevs,
085      Matrix<N4, N1> visionMeasurementStdDevs) {
086    m_odometry = odometry;
087
088    m_poseEstimate = m_odometry.getPoseMeters();
089
090    for (int i = 0; i < 4; ++i) {
091      m_q[i] = stateStdDevs.get(i, 0) * stateStdDevs.get(i, 0);
092    }
093    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
094    MathSharedStore.getMathShared().reportUsage(MathUsageId.kEstimator_PoseEstimator3d, 1);
095  }
096
097  /**
098   * Sets the pose estimator's trust of global measurements. This might be used to change trust in
099   * vision measurements after the autonomous period, or to change trust as distance to a vision
100   * target increases.
101   *
102   * @param visionMeasurementStdDevs Standard deviations of the vision measurements. Increase these
103   *     numbers to trust global measurements from vision less. This matrix is in the form [x, y, z,
104   *     theta]ᵀ, with units in meters and radians.
105   */
106  public final void setVisionMeasurementStdDevs(Matrix<N4, N1> visionMeasurementStdDevs) {
107    // Diagonal of measurement covariance matrix R
108    var r = new double[4];
109    for (int i = 0; i < 4; ++i) {
110      r[i] = visionMeasurementStdDevs.get(i, 0) * visionMeasurementStdDevs.get(i, 0);
111    }
112
113    // Solve for closed form Kalman gain for continuous Kalman filter with A = 0
114    // and C = I. See wpimath/algorithms.md.
115    for (int row = 0; row < 4; ++row) {
116      if (m_q[row] == 0.0) {
117        m_vision_k[row] = 0.0;
118      } else {
119        m_vision_k[row] = m_q[row] / (m_q[row] + Math.sqrt(m_q[row] * r[row]));
120      }
121    }
122    // Fill in the gains for the other components of the rotation vector
123    double angle_gain = m_vision_k[3];
124    m_vision_k[4] = angle_gain;
125    m_vision_k[5] = angle_gain;
126  }
127
128  /**
129   * Resets the robot's position on the field.
130   *
131   * <p>The gyroscope angle does not need to be reset here on the user's robot code. The library
132   * automatically takes care of offsetting the gyro angle.
133   *
134   * @param gyroAngle The angle reported by the gyroscope.
135   * @param wheelPositions The current encoder readings.
136   * @param poseMeters The position on the field that your robot is at.
137   */
138  public void resetPosition(Rotation3d gyroAngle, T wheelPositions, Pose3d poseMeters) {
139    // Reset state estimate and error covariance
140    m_odometry.resetPosition(gyroAngle, wheelPositions, poseMeters);
141    m_odometryPoseBuffer.clear();
142    m_visionUpdates.clear();
143    m_poseEstimate = m_odometry.getPoseMeters();
144  }
145
146  /**
147   * Resets the robot's pose.
148   *
149   * @param pose The pose to reset to.
150   */
151  public void resetPose(Pose3d pose) {
152    m_odometry.resetPose(pose);
153    m_odometryPoseBuffer.clear();
154    m_visionUpdates.clear();
155    m_poseEstimate = m_odometry.getPoseMeters();
156  }
157
158  /**
159   * Resets the robot's translation.
160   *
161   * @param translation The pose to translation to.
162   */
163  public void resetTranslation(Translation3d translation) {
164    m_odometry.resetTranslation(translation);
165
166    final var latestVisionUpdate = m_visionUpdates.lastEntry();
167    m_odometryPoseBuffer.clear();
168    m_visionUpdates.clear();
169
170    if (latestVisionUpdate != null) {
171      // apply vision compensation to the pose rotation
172      final var visionUpdate =
173          new VisionUpdate(
174              new Pose3d(translation, latestVisionUpdate.getValue().visionPose.getRotation()),
175              new Pose3d(translation, latestVisionUpdate.getValue().odometryPose.getRotation()));
176      m_visionUpdates.put(latestVisionUpdate.getKey(), visionUpdate);
177      m_poseEstimate = visionUpdate.compensate(m_odometry.getPoseMeters());
178    } else {
179      m_poseEstimate = m_odometry.getPoseMeters();
180    }
181  }
182
183  /**
184   * Resets the robot's rotation.
185   *
186   * @param rotation The rotation to reset to.
187   */
188  public void resetRotation(Rotation3d rotation) {
189    m_odometry.resetRotation(rotation);
190
191    final var latestVisionUpdate = m_visionUpdates.lastEntry();
192    m_odometryPoseBuffer.clear();
193    m_visionUpdates.clear();
194
195    if (latestVisionUpdate != null) {
196      // apply vision compensation to the pose translation
197      final var visionUpdate =
198          new VisionUpdate(
199              new Pose3d(latestVisionUpdate.getValue().visionPose.getTranslation(), rotation),
200              new Pose3d(latestVisionUpdate.getValue().odometryPose.getTranslation(), rotation));
201      m_visionUpdates.put(latestVisionUpdate.getKey(), visionUpdate);
202      m_poseEstimate = visionUpdate.compensate(m_odometry.getPoseMeters());
203    } else {
204      m_poseEstimate = m_odometry.getPoseMeters();
205    }
206  }
207
208  /**
209   * Gets the estimated robot pose.
210   *
211   * @return The estimated robot pose in meters.
212   */
213  public Pose3d getEstimatedPosition() {
214    return m_poseEstimate;
215  }
216
217  /**
218   * Return the pose at a given timestamp, if the buffer is not empty.
219   *
220   * @param timestampSeconds The pose's timestamp in seconds.
221   * @return The pose at the given timestamp (or Optional.empty() if the buffer is empty).
222   */
223  public Optional<Pose3d> sampleAt(double timestampSeconds) {
224    // Step 0: If there are no odometry updates to sample, skip.
225    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()) {
226      return Optional.empty();
227    }
228
229    // Step 1: Make sure timestamp matches the sample from the odometry pose buffer. (When sampling,
230    // the buffer will always use a timestamp between the first and last timestamps)
231    double oldestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().firstKey();
232    double newestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().lastKey();
233    timestampSeconds =
234        MathUtil.clamp(timestampSeconds, oldestOdometryTimestamp, newestOdometryTimestamp);
235
236    // Step 2: If there are no applicable vision updates, use the odometry-only information.
237    if (m_visionUpdates.isEmpty() || timestampSeconds < m_visionUpdates.firstKey()) {
238      return m_odometryPoseBuffer.getSample(timestampSeconds);
239    }
240
241    // Step 3: Get the latest vision update from before or at the timestamp to sample at.
242    double floorTimestamp = m_visionUpdates.floorKey(timestampSeconds);
243    var visionUpdate = m_visionUpdates.get(floorTimestamp);
244
245    // Step 4: Get the pose measured by odometry at the time of the sample.
246    var odometryEstimate = m_odometryPoseBuffer.getSample(timestampSeconds);
247
248    // Step 5: Apply the vision compensation to the odometry pose.
249    return odometryEstimate.map(odometryPose -> visionUpdate.compensate(odometryPose));
250  }
251
252  /** Removes stale vision updates that won't affect sampling. */
253  private void cleanUpVisionUpdates() {
254    // Step 0: If there are no odometry samples, skip.
255    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()) {
256      return;
257    }
258
259    // Step 1: Find the oldest timestamp that needs a vision update.
260    double oldestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().firstKey();
261
262    // Step 2: If there are no vision updates before that timestamp, skip.
263    if (m_visionUpdates.isEmpty() || oldestOdometryTimestamp < m_visionUpdates.firstKey()) {
264      return;
265    }
266
267    // Step 3: Find the newest vision update timestamp before or at the oldest timestamp.
268    double newestNeededVisionUpdateTimestamp = m_visionUpdates.floorKey(oldestOdometryTimestamp);
269
270    // Step 4: Remove all entries strictly before the newest timestamp we need.
271    m_visionUpdates.headMap(newestNeededVisionUpdateTimestamp, false).clear();
272  }
273
274  /**
275   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
276   * while still accounting for measurement noise.
277   *
278   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
279   * PoseEstimator3d#update} every loop.
280   *
281   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
282   * recommend only adding vision measurements that are already within one meter or so of the
283   * current pose estimate.
284   *
285   * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera.
286   * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you
287   *     don't use your own time source by calling {@link
288   *     PoseEstimator3d#updateWithTime(double,Rotation3d,Object)} then you must use a timestamp
289   *     with an epoch since FPGA startup (i.e., the epoch of this timestamp is the same epoch as
290   *     {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}.) This means that you should use
291   *     {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source or sync the
292   *     epochs.
293   */
294  public void addVisionMeasurement(Pose3d visionRobotPoseMeters, double timestampSeconds) {
295    // Step 0: If this measurement is old enough to be outside the pose buffer's timespan, skip.
296    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()
297        || m_odometryPoseBuffer.getInternalBuffer().lastKey() - kBufferDuration
298            > timestampSeconds) {
299      return;
300    }
301
302    // Step 1: Clean up any old entries
303    cleanUpVisionUpdates();
304
305    // Step 2: Get the pose measured by odometry at the moment the vision measurement was made.
306    var odometrySample = m_odometryPoseBuffer.getSample(timestampSeconds);
307
308    if (odometrySample.isEmpty()) {
309      return;
310    }
311
312    // Step 3: Get the vision-compensated pose estimate at the moment the vision measurement was
313    // made.
314    var visionSample = sampleAt(timestampSeconds);
315
316    if (visionSample.isEmpty()) {
317      return;
318    }
319
320    // Step 4: Measure the transform between the old pose estimate and the vision pose.
321    var transform = visionRobotPoseMeters.minus(visionSample.get());
322
323    // Step 5: We should not trust the transform entirely, so instead we scale this transform by a
324    // Kalman gain matrix representing how much we trust vision measurements compared to our current
325    // pose. Then we convert the result back to a Transform3d.
326    var scaledTransform =
327        new Transform3d(
328            m_vision_k[0] * transform.getX(),
329            m_vision_k[1] * transform.getY(),
330            m_vision_k[2] * transform.getZ(),
331            new Rotation3d(
332                m_vision_k[3] * transform.getRotation().getX(),
333                m_vision_k[4] * transform.getRotation().getY(),
334                m_vision_k[5] * transform.getRotation().getZ()));
335
336    // Step 6: Calculate and record the vision update.
337    var visionUpdate =
338        new VisionUpdate(visionSample.get().plus(scaledTransform), odometrySample.get());
339    m_visionUpdates.put(timestampSeconds, visionUpdate);
340
341    // Step 7: Remove later vision measurements. (Matches previous behavior)
342    m_visionUpdates.tailMap(timestampSeconds, false).entrySet().clear();
343
344    // Step 8: Update latest pose estimate. Since we cleared all updates after this vision update,
345    // it's guaranteed to be the latest vision update.
346    m_poseEstimate = visionUpdate.compensate(m_odometry.getPoseMeters());
347  }
348
349  /**
350   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
351   * while still accounting for measurement noise.
352   *
353   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
354   * PoseEstimator3d#update} every loop.
355   *
356   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
357   * recommend only adding vision measurements that are already within one meter or so of the
358   * current pose estimate.
359   *
360   * <p>Note that the vision measurement standard deviations passed into this method will continue
361   * to apply to future measurements until a subsequent call to {@link
362   * PoseEstimator3d#setVisionMeasurementStdDevs(Matrix)} or this method.
363   *
364   * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera.
365   * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you
366   *     don't use your own time source by calling {@link #updateWithTime}, then you must use a
367   *     timestamp with an epoch since FPGA startup (i.e., the epoch of this timestamp is the same
368   *     epoch as {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}). This means that you
369   *     should use {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source in
370   *     this case.
371   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
372   *     in meters, y position in meters, z position in meters, and angle in radians). Increase
373   *     these numbers to trust the vision pose measurement less.
374   */
375  public void addVisionMeasurement(
376      Pose3d visionRobotPoseMeters,
377      double timestampSeconds,
378      Matrix<N4, N1> visionMeasurementStdDevs) {
379    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
380    addVisionMeasurement(visionRobotPoseMeters, timestampSeconds);
381  }
382
383  /**
384   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
385   * loop.
386   *
387   * @param gyroAngle The current gyro angle.
388   * @param wheelPositions The current encoder readings.
389   * @return The estimated pose of the robot in meters.
390   */
391  public Pose3d update(Rotation3d gyroAngle, T wheelPositions) {
392    return updateWithTime(MathSharedStore.getTimestamp(), gyroAngle, wheelPositions);
393  }
394
395  /**
396   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
397   * loop.
398   *
399   * @param currentTimeSeconds Time at which this method was called, in seconds.
400   * @param gyroAngle The current gyro angle.
401   * @param wheelPositions The current encoder readings.
402   * @return The estimated pose of the robot in meters.
403   */
404  public Pose3d updateWithTime(double currentTimeSeconds, Rotation3d gyroAngle, T wheelPositions) {
405    var odometryEstimate = m_odometry.update(gyroAngle, wheelPositions);
406
407    m_odometryPoseBuffer.addSample(currentTimeSeconds, odometryEstimate);
408
409    if (m_visionUpdates.isEmpty()) {
410      m_poseEstimate = odometryEstimate;
411    } else {
412      var visionUpdate = m_visionUpdates.get(m_visionUpdates.lastKey());
413      m_poseEstimate = visionUpdate.compensate(odometryEstimate);
414    }
415
416    return getEstimatedPosition();
417  }
418
419  /**
420   * Represents a vision update record. The record contains the vision-compensated pose estimate as
421   * well as the corresponding odometry pose estimate.
422   */
423  private static final class VisionUpdate {
424    // The vision-compensated pose estimate.
425    private final Pose3d visionPose;
426
427    // The pose estimated based solely on odometry.
428    private final Pose3d odometryPose;
429
430    /**
431     * Constructs a vision update record with the specified parameters.
432     *
433     * @param visionPose The vision-compensated pose estimate.
434     * @param odometryPose The pose estimate based solely on odometry.
435     */
436    private VisionUpdate(Pose3d visionPose, Pose3d odometryPose) {
437      this.visionPose = visionPose;
438      this.odometryPose = odometryPose;
439    }
440
441    /**
442     * Returns the vision-compensated version of the pose. Specifically, changes the pose from being
443     * relative to this record's odometry pose to being relative to this record's vision pose.
444     *
445     * @param pose The pose to compensate.
446     * @return The compensated pose.
447     */
448    public Pose3d compensate(Pose3d pose) {
449      var delta = pose.minus(this.odometryPose);
450      return this.visionPose.plus(delta);
451    }
452  }
453}