001// Copyright (c) FIRST and other WPILib contributors.
002// Open Source Software; you can modify and/or share it under the terms of
003// the WPILib BSD license file in the root directory of this project.
004
005package edu.wpi.first.math.estimator;
006
007import edu.wpi.first.math.MathSharedStore;
008import edu.wpi.first.math.MathUsageId;
009import edu.wpi.first.math.MathUtil;
010import edu.wpi.first.math.Matrix;
011import edu.wpi.first.math.Nat;
012import edu.wpi.first.math.VecBuilder;
013import edu.wpi.first.math.geometry.Pose2d;
014import edu.wpi.first.math.geometry.Pose3d;
015import edu.wpi.first.math.geometry.Rotation2d;
016import edu.wpi.first.math.geometry.Rotation3d;
017import edu.wpi.first.math.geometry.Transform3d;
018import edu.wpi.first.math.geometry.Translation2d;
019import edu.wpi.first.math.geometry.Translation3d;
020import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer;
021import edu.wpi.first.math.kinematics.Kinematics;
022import edu.wpi.first.math.kinematics.Odometry3d;
023import edu.wpi.first.math.numbers.N1;
024import edu.wpi.first.math.numbers.N4;
025import edu.wpi.first.math.numbers.N6;
026import java.util.NavigableMap;
027import java.util.Optional;
028import java.util.TreeMap;
029
030/**
031 * This class wraps {@link Odometry3d} to fuse latency-compensated vision measurements with encoder
032 * measurements. Robot code should not use this directly- Instead, use the particular type for your
033 * drivetrain (e.g., {@link DifferentialDrivePoseEstimator3d}). It is intended to be a drop-in
034 * replacement for {@link Odometry3d}; in fact, if you never call {@link
035 * PoseEstimator3d#addVisionMeasurement} and only call {@link PoseEstimator3d#update} then this will
036 * behave exactly the same as Odometry3d. It is also intended to be an easy replacement for {@link
037 * PoseEstimator}, only requiring the addition of a standard deviation for Z and appropriate
038 * conversions between 2D and 3D versions of geometry classes. (See {@link Pose3d#Pose3d(Pose2d)},
039 * {@link Rotation3d#Rotation3d(Rotation2d)}, {@link Translation3d#Translation3d(Translation2d)},
040 * and {@link Pose3d#toPose2d()}.)
041 *
042 * <p>{@link PoseEstimator3d#update} should be called every robot loop.
043 *
044 * <p>{@link PoseEstimator3d#addVisionMeasurement} can be called as infrequently as you want; if you
045 * never call it then this class will behave exactly like regular encoder odometry.
046 *
047 * @param <T> Wheel positions type.
048 */
049public class PoseEstimator3d<T> {
050  private final Odometry3d<T> m_odometry;
051  private final Matrix<N4, N1> m_q = new Matrix<>(Nat.N4(), Nat.N1());
052  private final Matrix<N6, N6> m_visionK = new Matrix<>(Nat.N6(), Nat.N6());
053
054  private static final double kBufferDuration = 1.5;
055  // Maps timestamps to odometry-only pose estimates
056  private final TimeInterpolatableBuffer<Pose3d> m_odometryPoseBuffer =
057      TimeInterpolatableBuffer.createBuffer(kBufferDuration);
058  // Maps timestamps to vision updates
059  // Always contains one entry before the oldest entry in m_odometryPoseBuffer, unless there have
060  // been no vision measurements after the last reset. May contain one entry while
061  // m_odometryPoseBuffer is empty to correct for translation/rotation after a call to
062  // ResetRotation/ResetTranslation.
063  private final NavigableMap<Double, VisionUpdate> m_visionUpdates = new TreeMap<>();
064
065  private Pose3d m_poseEstimate;
066
067  /**
068   * Constructs a PoseEstimator3d.
069   *
070   * @param kinematics A correctly-configured kinematics object for your drivetrain.
071   * @param odometry A correctly-configured odometry object for your drivetrain.
072   * @param stateStdDevs Standard deviations of the pose estimate (x position in meters, y position
073   *     in meters, z position in meters, and angle in radians). Increase these numbers to trust
074   *     your state estimate less.
075   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
076   *     in meters, y position in meters, z position in meters, and angle in radians). Increase
077   *     these numbers to trust the vision pose measurement less.
078   */
079  @SuppressWarnings("PMD.UnusedFormalParameter")
080  public PoseEstimator3d(
081      Kinematics<?, T> kinematics,
082      Odometry3d<T> odometry,
083      Matrix<N4, N1> stateStdDevs,
084      Matrix<N4, N1> visionMeasurementStdDevs) {
085    m_odometry = odometry;
086
087    m_poseEstimate = m_odometry.getPoseMeters();
088
089    for (int i = 0; i < 4; ++i) {
090      m_q.set(i, 0, stateStdDevs.get(i, 0) * stateStdDevs.get(i, 0));
091    }
092    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
093    MathSharedStore.getMathShared().reportUsage(MathUsageId.kEstimator_PoseEstimator3d, 1);
094  }
095
096  /**
097   * Sets the pose estimator's trust of global measurements. This might be used to change trust in
098   * vision measurements after the autonomous period, or to change trust as distance to a vision
099   * target increases.
100   *
101   * @param visionMeasurementStdDevs Standard deviations of the vision measurements. Increase these
102   *     numbers to trust global measurements from vision less. This matrix is in the form [x, y, z,
103   *     theta]ᵀ, with units in meters and radians.
104   */
105  public final void setVisionMeasurementStdDevs(Matrix<N4, N1> visionMeasurementStdDevs) {
106    var r = new double[4];
107    for (int i = 0; i < 4; ++i) {
108      r[i] = visionMeasurementStdDevs.get(i, 0) * visionMeasurementStdDevs.get(i, 0);
109    }
110
111    // Solve for closed form Kalman gain for continuous Kalman filter with A = 0
112    // and C = I. See wpimath/algorithms.md.
113    for (int row = 0; row < 4; ++row) {
114      if (m_q.get(row, 0) == 0.0) {
115        m_visionK.set(row, row, 0.0);
116      } else {
117        m_visionK.set(
118            row, row, m_q.get(row, 0) / (m_q.get(row, 0) + Math.sqrt(m_q.get(row, 0) * r[row])));
119      }
120    }
121    // Fill in the gains for the other components of the rotation vector
122    double angle_gain = m_visionK.get(3, 3);
123    m_visionK.set(4, 4, angle_gain);
124    m_visionK.set(5, 5, angle_gain);
125  }
126
127  /**
128   * Resets the robot's position on the field.
129   *
130   * <p>The gyroscope angle does not need to be reset here on the user's robot code. The library
131   * automatically takes care of offsetting the gyro angle.
132   *
133   * @param gyroAngle The angle reported by the gyroscope.
134   * @param wheelPositions The current encoder readings.
135   * @param poseMeters The position on the field that your robot is at.
136   */
137  public void resetPosition(Rotation3d gyroAngle, T wheelPositions, Pose3d poseMeters) {
138    // Reset state estimate and error covariance
139    m_odometry.resetPosition(gyroAngle, wheelPositions, poseMeters);
140    m_odometryPoseBuffer.clear();
141    m_visionUpdates.clear();
142    m_poseEstimate = m_odometry.getPoseMeters();
143  }
144
145  /**
146   * Resets the robot's pose.
147   *
148   * @param pose The pose to reset to.
149   */
150  public void resetPose(Pose3d pose) {
151    m_odometry.resetPose(pose);
152    m_odometryPoseBuffer.clear();
153    m_visionUpdates.clear();
154    m_poseEstimate = m_odometry.getPoseMeters();
155  }
156
157  /**
158   * Resets the robot's translation.
159   *
160   * @param translation The pose to translation to.
161   */
162  public void resetTranslation(Translation3d translation) {
163    m_odometry.resetTranslation(translation);
164
165    final var latestVisionUpdate = m_visionUpdates.lastEntry();
166    m_odometryPoseBuffer.clear();
167    m_visionUpdates.clear();
168
169    if (latestVisionUpdate != null) {
170      // apply vision compensation to the pose rotation
171      final var visionUpdate =
172          new VisionUpdate(
173              new Pose3d(translation, latestVisionUpdate.getValue().visionPose.getRotation()),
174              new Pose3d(translation, latestVisionUpdate.getValue().odometryPose.getRotation()));
175      m_visionUpdates.put(latestVisionUpdate.getKey(), visionUpdate);
176      m_poseEstimate = visionUpdate.compensate(m_odometry.getPoseMeters());
177    } else {
178      m_poseEstimate = m_odometry.getPoseMeters();
179    }
180  }
181
182  /**
183   * Resets the robot's rotation.
184   *
185   * @param rotation The rotation to reset to.
186   */
187  public void resetRotation(Rotation3d rotation) {
188    m_odometry.resetRotation(rotation);
189
190    final var latestVisionUpdate = m_visionUpdates.lastEntry();
191    m_odometryPoseBuffer.clear();
192    m_visionUpdates.clear();
193
194    if (latestVisionUpdate != null) {
195      // apply vision compensation to the pose translation
196      final var visionUpdate =
197          new VisionUpdate(
198              new Pose3d(latestVisionUpdate.getValue().visionPose.getTranslation(), rotation),
199              new Pose3d(latestVisionUpdate.getValue().odometryPose.getTranslation(), rotation));
200      m_visionUpdates.put(latestVisionUpdate.getKey(), visionUpdate);
201      m_poseEstimate = visionUpdate.compensate(m_odometry.getPoseMeters());
202    } else {
203      m_poseEstimate = m_odometry.getPoseMeters();
204    }
205  }
206
207  /**
208   * Gets the estimated robot pose.
209   *
210   * @return The estimated robot pose in meters.
211   */
212  public Pose3d getEstimatedPosition() {
213    return m_poseEstimate;
214  }
215
216  /**
217   * Return the pose at a given timestamp, if the buffer is not empty.
218   *
219   * @param timestampSeconds The pose's timestamp in seconds.
220   * @return The pose at the given timestamp (or Optional.empty() if the buffer is empty).
221   */
222  public Optional<Pose3d> sampleAt(double timestampSeconds) {
223    // Step 0: If there are no odometry updates to sample, skip.
224    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()) {
225      return Optional.empty();
226    }
227
228    // Step 1: Make sure timestamp matches the sample from the odometry pose buffer. (When sampling,
229    // the buffer will always use a timestamp between the first and last timestamps)
230    double oldestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().firstKey();
231    double newestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().lastKey();
232    timestampSeconds =
233        MathUtil.clamp(timestampSeconds, oldestOdometryTimestamp, newestOdometryTimestamp);
234
235    // Step 2: If there are no applicable vision updates, use the odometry-only information.
236    if (m_visionUpdates.isEmpty() || timestampSeconds < m_visionUpdates.firstKey()) {
237      return m_odometryPoseBuffer.getSample(timestampSeconds);
238    }
239
240    // Step 3: Get the latest vision update from before or at the timestamp to sample at.
241    double floorTimestamp = m_visionUpdates.floorKey(timestampSeconds);
242    var visionUpdate = m_visionUpdates.get(floorTimestamp);
243
244    // Step 4: Get the pose measured by odometry at the time of the sample.
245    var odometryEstimate = m_odometryPoseBuffer.getSample(timestampSeconds);
246
247    // Step 5: Apply the vision compensation to the odometry pose.
248    return odometryEstimate.map(odometryPose -> visionUpdate.compensate(odometryPose));
249  }
250
251  /** Removes stale vision updates that won't affect sampling. */
252  private void cleanUpVisionUpdates() {
253    // Step 0: If there are no odometry samples, skip.
254    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()) {
255      return;
256    }
257
258    // Step 1: Find the oldest timestamp that needs a vision update.
259    double oldestOdometryTimestamp = m_odometryPoseBuffer.getInternalBuffer().firstKey();
260
261    // Step 2: If there are no vision updates before that timestamp, skip.
262    if (m_visionUpdates.isEmpty() || oldestOdometryTimestamp < m_visionUpdates.firstKey()) {
263      return;
264    }
265
266    // Step 3: Find the newest vision update timestamp before or at the oldest timestamp.
267    double newestNeededVisionUpdateTimestamp = m_visionUpdates.floorKey(oldestOdometryTimestamp);
268
269    // Step 4: Remove all entries strictly before the newest timestamp we need.
270    m_visionUpdates.headMap(newestNeededVisionUpdateTimestamp, false).clear();
271  }
272
273  /**
274   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
275   * while still accounting for measurement noise.
276   *
277   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
278   * PoseEstimator3d#update} every loop.
279   *
280   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
281   * recommend only adding vision measurements that are already within one meter or so of the
282   * current pose estimate.
283   *
284   * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera.
285   * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you
286   *     don't use your own time source by calling {@link
287   *     PoseEstimator3d#updateWithTime(double,Rotation3d,Object)} then you must use a timestamp
288   *     with an epoch since FPGA startup (i.e., the epoch of this timestamp is the same epoch as
289   *     {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}.) This means that you should use
290   *     {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source or sync the
291   *     epochs.
292   */
293  public void addVisionMeasurement(Pose3d visionRobotPoseMeters, double timestampSeconds) {
294    // Step 0: If this measurement is old enough to be outside the pose buffer's timespan, skip.
295    if (m_odometryPoseBuffer.getInternalBuffer().isEmpty()
296        || m_odometryPoseBuffer.getInternalBuffer().lastKey() - kBufferDuration
297            > timestampSeconds) {
298      return;
299    }
300
301    // Step 1: Clean up any old entries
302    cleanUpVisionUpdates();
303
304    // Step 2: Get the pose measured by odometry at the moment the vision measurement was made.
305    var odometrySample = m_odometryPoseBuffer.getSample(timestampSeconds);
306
307    if (odometrySample.isEmpty()) {
308      return;
309    }
310
311    // Step 3: Get the vision-compensated pose estimate at the moment the vision measurement was
312    // made.
313    var visionSample = sampleAt(timestampSeconds);
314
315    if (visionSample.isEmpty()) {
316      return;
317    }
318
319    // Step 4: Measure the transform between the old pose estimate and the vision pose.
320    var transform = visionRobotPoseMeters.minus(visionSample.get());
321
322    // Step 5: We should not trust the transform entirely, so instead we scale this transform by a
323    // Kalman
324    // gain matrix representing how much we trust vision measurements compared to our current pose.
325    var k_times_transform =
326        m_visionK.times(
327            VecBuilder.fill(
328                transform.getX(),
329                transform.getY(),
330                transform.getZ(),
331                transform.getRotation().getX(),
332                transform.getRotation().getY(),
333                transform.getRotation().getZ()));
334
335    // Step 6: Convert back to Transform3d.
336    var scaledTransform =
337        new Transform3d(
338            k_times_transform.get(0, 0),
339            k_times_transform.get(1, 0),
340            k_times_transform.get(2, 0),
341            new Rotation3d(
342                k_times_transform.get(3, 0),
343                k_times_transform.get(4, 0),
344                k_times_transform.get(5, 0)));
345
346    // Step 7: Calculate and record the vision update.
347    var visionUpdate =
348        new VisionUpdate(visionSample.get().plus(scaledTransform), odometrySample.get());
349    m_visionUpdates.put(timestampSeconds, visionUpdate);
350
351    // Step 8: Remove later vision measurements. (Matches previous behavior)
352    m_visionUpdates.tailMap(timestampSeconds, false).entrySet().clear();
353
354    // Step 9: Update latest pose estimate. Since we cleared all updates after this vision update,
355    // it's guaranteed to be the latest vision update.
356    m_poseEstimate = visionUpdate.compensate(m_odometry.getPoseMeters());
357  }
358
359  /**
360   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
361   * while still accounting for measurement noise.
362   *
363   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
364   * PoseEstimator3d#update} every loop.
365   *
366   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
367   * recommend only adding vision measurements that are already within one meter or so of the
368   * current pose estimate.
369   *
370   * <p>Note that the vision measurement standard deviations passed into this method will continue
371   * to apply to future measurements until a subsequent call to {@link
372   * PoseEstimator3d#setVisionMeasurementStdDevs(Matrix)} or this method.
373   *
374   * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera.
375   * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you
376   *     don't use your own time source by calling {@link #updateWithTime}, then you must use a
377   *     timestamp with an epoch since FPGA startup (i.e., the epoch of this timestamp is the same
378   *     epoch as {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}). This means that you
379   *     should use {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source in
380   *     this case.
381   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
382   *     in meters, y position in meters, z position in meters, and angle in radians). Increase
383   *     these numbers to trust the vision pose measurement less.
384   */
385  public void addVisionMeasurement(
386      Pose3d visionRobotPoseMeters,
387      double timestampSeconds,
388      Matrix<N4, N1> visionMeasurementStdDevs) {
389    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
390    addVisionMeasurement(visionRobotPoseMeters, timestampSeconds);
391  }
392
393  /**
394   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
395   * loop.
396   *
397   * @param gyroAngle The current gyro angle.
398   * @param wheelPositions The current encoder readings.
399   * @return The estimated pose of the robot in meters.
400   */
401  public Pose3d update(Rotation3d gyroAngle, T wheelPositions) {
402    return updateWithTime(MathSharedStore.getTimestamp(), gyroAngle, wheelPositions);
403  }
404
405  /**
406   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
407   * loop.
408   *
409   * @param currentTimeSeconds Time at which this method was called, in seconds.
410   * @param gyroAngle The current gyro angle.
411   * @param wheelPositions The current encoder readings.
412   * @return The estimated pose of the robot in meters.
413   */
414  public Pose3d updateWithTime(double currentTimeSeconds, Rotation3d gyroAngle, T wheelPositions) {
415    var odometryEstimate = m_odometry.update(gyroAngle, wheelPositions);
416
417    m_odometryPoseBuffer.addSample(currentTimeSeconds, odometryEstimate);
418
419    if (m_visionUpdates.isEmpty()) {
420      m_poseEstimate = odometryEstimate;
421    } else {
422      var visionUpdate = m_visionUpdates.get(m_visionUpdates.lastKey());
423      m_poseEstimate = visionUpdate.compensate(odometryEstimate);
424    }
425
426    return getEstimatedPosition();
427  }
428
429  /**
430   * Represents a vision update record. The record contains the vision-compensated pose estimate as
431   * well as the corresponding odometry pose estimate.
432   */
433  private static final class VisionUpdate {
434    // The vision-compensated pose estimate.
435    private final Pose3d visionPose;
436
437    // The pose estimated based solely on odometry.
438    private final Pose3d odometryPose;
439
440    /**
441     * Constructs a vision update record with the specified parameters.
442     *
443     * @param visionPose The vision-compensated pose estimate.
444     * @param odometryPose The pose estimate based solely on odometry.
445     */
446    private VisionUpdate(Pose3d visionPose, Pose3d odometryPose) {
447      this.visionPose = visionPose;
448      this.odometryPose = odometryPose;
449    }
450
451    /**
452     * Returns the vision-compensated version of the pose. Specifically, changes the pose from being
453     * relative to this record's odometry pose to being relative to this record's vision pose.
454     *
455     * @param pose The pose to compensate.
456     * @return The compensated pose.
457     */
458    public Pose3d compensate(Pose3d pose) {
459      var delta = pose.minus(this.odometryPose);
460      return this.visionPose.plus(delta);
461    }
462  }
463}