001// Copyright (c) FIRST and other WPILib contributors.
002// Open Source Software; you can modify and/or share it under the terms of
003// the WPILib BSD license file in the root directory of this project.
004
005package edu.wpi.first.math.estimator;
006
007import edu.wpi.first.math.MathSharedStore;
008import edu.wpi.first.math.Matrix;
009import edu.wpi.first.math.Nat;
010import edu.wpi.first.math.VecBuilder;
011import edu.wpi.first.math.geometry.Pose2d;
012import edu.wpi.first.math.geometry.Rotation2d;
013import edu.wpi.first.math.geometry.Twist2d;
014import edu.wpi.first.math.interpolation.Interpolatable;
015import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer;
016import edu.wpi.first.math.kinematics.Kinematics;
017import edu.wpi.first.math.kinematics.Odometry;
018import edu.wpi.first.math.kinematics.WheelPositions;
019import edu.wpi.first.math.numbers.N1;
020import edu.wpi.first.math.numbers.N3;
021import java.util.Map;
022import java.util.NoSuchElementException;
023import java.util.Objects;
024import java.util.Optional;
025
026/**
027 * This class wraps {@link Odometry} to fuse latency-compensated vision measurements with encoder
028 * measurements. Robot code should not use this directly- Instead, use the particular type for your
029 * drivetrain (e.g., {@link DifferentialDrivePoseEstimator}). It is intended to be a drop-in
030 * replacement for {@link Odometry}; in fact, if you never call {@link
031 * PoseEstimator#addVisionMeasurement} and only call {@link PoseEstimator#update} then this will
032 * behave exactly the same as Odometry.
033 *
034 * <p>{@link PoseEstimator#update} should be called every robot loop.
035 *
036 * <p>{@link PoseEstimator#addVisionMeasurement} can be called as infrequently as you want; if you
037 * never call it then this class will behave exactly like regular encoder odometry.
038 *
039 * @param <T> Wheel positions type.
040 */
041public class PoseEstimator<T extends WheelPositions<T>> {
042  private final Kinematics<?, T> m_kinematics;
043  private final Odometry<T> m_odometry;
044  private final Matrix<N3, N1> m_q = new Matrix<>(Nat.N3(), Nat.N1());
045  private final Matrix<N3, N3> m_visionK = new Matrix<>(Nat.N3(), Nat.N3());
046
047  private static final double kBufferDuration = 1.5;
048  private final TimeInterpolatableBuffer<InterpolationRecord> m_poseBuffer =
049      TimeInterpolatableBuffer.createBuffer(kBufferDuration);
050
051  /**
052   * Constructs a PoseEstimator.
053   *
054   * @param kinematics A correctly-configured kinematics object for your drivetrain.
055   * @param odometry A correctly-configured odometry object for your drivetrain.
056   * @param stateStdDevs Standard deviations of the pose estimate (x position in meters, y position
057   *     in meters, and heading in radians). Increase these numbers to trust your state estimate
058   *     less.
059   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
060   *     in meters, y position in meters, and heading in radians). Increase these numbers to trust
061   *     the vision pose measurement less.
062   */
063  public PoseEstimator(
064      Kinematics<?, T> kinematics,
065      Odometry<T> odometry,
066      Matrix<N3, N1> stateStdDevs,
067      Matrix<N3, N1> visionMeasurementStdDevs) {
068    m_kinematics = kinematics;
069    m_odometry = odometry;
070
071    for (int i = 0; i < 3; ++i) {
072      m_q.set(i, 0, stateStdDevs.get(i, 0) * stateStdDevs.get(i, 0));
073    }
074    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
075  }
076
077  /**
078   * Sets the pose estimator's trust of global measurements. This might be used to change trust in
079   * vision measurements after the autonomous period, or to change trust as distance to a vision
080   * target increases.
081   *
082   * @param visionMeasurementStdDevs Standard deviations of the vision measurements. Increase these
083   *     numbers to trust global measurements from vision less. This matrix is in the form [x, y,
084   *     theta]ᵀ, with units in meters and radians.
085   */
086  public final void setVisionMeasurementStdDevs(Matrix<N3, N1> visionMeasurementStdDevs) {
087    var r = new double[3];
088    for (int i = 0; i < 3; ++i) {
089      r[i] = visionMeasurementStdDevs.get(i, 0) * visionMeasurementStdDevs.get(i, 0);
090    }
091
092    // Solve for closed form Kalman gain for continuous Kalman filter with A = 0
093    // and C = I. See wpimath/algorithms.md.
094    for (int row = 0; row < 3; ++row) {
095      if (m_q.get(row, 0) == 0.0) {
096        m_visionK.set(row, row, 0.0);
097      } else {
098        m_visionK.set(
099            row, row, m_q.get(row, 0) / (m_q.get(row, 0) + Math.sqrt(m_q.get(row, 0) * r[row])));
100      }
101    }
102  }
103
104  /**
105   * Resets the robot's position on the field.
106   *
107   * <p>The gyroscope angle does not need to be reset here on the user's robot code. The library
108   * automatically takes care of offsetting the gyro angle.
109   *
110   * @param gyroAngle The angle reported by the gyroscope.
111   * @param wheelPositions The current encoder readings.
112   * @param poseMeters The position on the field that your robot is at.
113   */
114  public void resetPosition(Rotation2d gyroAngle, T wheelPositions, Pose2d poseMeters) {
115    // Reset state estimate and error covariance
116    m_odometry.resetPosition(gyroAngle, wheelPositions, poseMeters);
117    m_poseBuffer.clear();
118  }
119
120  /**
121   * Gets the estimated robot pose.
122   *
123   * @return The estimated robot pose in meters.
124   */
125  public Pose2d getEstimatedPosition() {
126    return m_odometry.getPoseMeters();
127  }
128
129  /**
130   * Return the pose at a given timestamp, if the buffer is not empty.
131   *
132   * @param timestampSeconds The pose's timestamp in seconds.
133   * @return The pose at the given timestamp (or Optional.empty() if the buffer is empty).
134   */
135  public Optional<Pose2d> sampleAt(double timestampSeconds) {
136    return m_poseBuffer.getSample(timestampSeconds).map(record -> record.poseMeters);
137  }
138
139  /**
140   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
141   * while still accounting for measurement noise.
142   *
143   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
144   * PoseEstimator#update} every loop.
145   *
146   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
147   * recommend only adding vision measurements that are already within one meter or so of the
148   * current pose estimate.
149   *
150   * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera.
151   * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you
152   *     don't use your own time source by calling {@link
153   *     PoseEstimator#updateWithTime(double,Rotation2d,WheelPositions)} then you must use a
154   *     timestamp with an epoch since FPGA startup (i.e., the epoch of this timestamp is the same
155   *     epoch as {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}.) This means that you
156   *     should use {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source or
157   *     sync the epochs.
158   */
159  public void addVisionMeasurement(Pose2d visionRobotPoseMeters, double timestampSeconds) {
160    // Step 0: If this measurement is old enough to be outside the pose buffer's timespan, skip.
161    try {
162      if (m_poseBuffer.getInternalBuffer().lastKey() - kBufferDuration > timestampSeconds) {
163        return;
164      }
165    } catch (NoSuchElementException ex) {
166      return;
167    }
168
169    // Step 1: Get the pose odometry measured at the moment the vision measurement was made.
170    var sample = m_poseBuffer.getSample(timestampSeconds);
171
172    if (sample.isEmpty()) {
173      return;
174    }
175
176    // Step 2: Measure the twist between the odometry pose and the vision pose.
177    var twist = sample.get().poseMeters.log(visionRobotPoseMeters);
178
179    // Step 3: We should not trust the twist entirely, so instead we scale this twist by a Kalman
180    // gain matrix representing how much we trust vision measurements compared to our current pose.
181    var k_times_twist = m_visionK.times(VecBuilder.fill(twist.dx, twist.dy, twist.dtheta));
182
183    // Step 4: Convert back to Twist2d.
184    var scaledTwist =
185        new Twist2d(k_times_twist.get(0, 0), k_times_twist.get(1, 0), k_times_twist.get(2, 0));
186
187    // Step 5: Reset Odometry to state at sample with vision adjustment.
188    m_odometry.resetPosition(
189        sample.get().gyroAngle,
190        sample.get().wheelPositions,
191        sample.get().poseMeters.exp(scaledTwist));
192
193    // Step 6: Record the current pose to allow multiple measurements from the same timestamp
194    m_poseBuffer.addSample(
195        timestampSeconds,
196        new InterpolationRecord(
197            getEstimatedPosition(), sample.get().gyroAngle, sample.get().wheelPositions));
198
199    // Step 7: Replay odometry inputs between sample time and latest recorded sample to update the
200    // pose buffer and correct odometry.
201    for (Map.Entry<Double, InterpolationRecord> entry :
202        m_poseBuffer.getInternalBuffer().tailMap(timestampSeconds).entrySet()) {
203      updateWithTime(entry.getKey(), entry.getValue().gyroAngle, entry.getValue().wheelPositions);
204    }
205  }
206
207  /**
208   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
209   * while still accounting for measurement noise.
210   *
211   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
212   * PoseEstimator#update} every loop.
213   *
214   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
215   * recommend only adding vision measurements that are already within one meter or so of the
216   * current pose estimate.
217   *
218   * <p>Note that the vision measurement standard deviations passed into this method will continue
219   * to apply to future measurements until a subsequent call to {@link
220   * PoseEstimator#setVisionMeasurementStdDevs(Matrix)} or this method.
221   *
222   * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera.
223   * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you
224   *     don't use your own time source by calling {@link #updateWithTime}, then you must use a
225   *     timestamp with an epoch since FPGA startup (i.e., the epoch of this timestamp is the same
226   *     epoch as {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}). This means that you
227   *     should use {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source in
228   *     this case.
229   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
230   *     in meters, y position in meters, and heading in radians). Increase these numbers to trust
231   *     the vision pose measurement less.
232   */
233  public void addVisionMeasurement(
234      Pose2d visionRobotPoseMeters,
235      double timestampSeconds,
236      Matrix<N3, N1> visionMeasurementStdDevs) {
237    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
238    addVisionMeasurement(visionRobotPoseMeters, timestampSeconds);
239  }
240
241  /**
242   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
243   * loop.
244   *
245   * @param gyroAngle The current gyro angle.
246   * @param wheelPositions The current encoder readings.
247   * @return The estimated pose of the robot in meters.
248   */
249  public Pose2d update(Rotation2d gyroAngle, T wheelPositions) {
250    return updateWithTime(MathSharedStore.getTimestamp(), gyroAngle, wheelPositions);
251  }
252
253  /**
254   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
255   * loop.
256   *
257   * @param currentTimeSeconds Time at which this method was called, in seconds.
258   * @param gyroAngle The current gyro angle.
259   * @param wheelPositions The current encoder readings.
260   * @return The estimated pose of the robot in meters.
261   */
262  public Pose2d updateWithTime(double currentTimeSeconds, Rotation2d gyroAngle, T wheelPositions) {
263    m_odometry.update(gyroAngle, wheelPositions);
264    m_poseBuffer.addSample(
265        currentTimeSeconds,
266        new InterpolationRecord(getEstimatedPosition(), gyroAngle, wheelPositions.copy()));
267
268    return getEstimatedPosition();
269  }
270
271  /**
272   * Represents an odometry record. The record contains the inputs provided as well as the pose that
273   * was observed based on these inputs, as well as the previous record and its inputs.
274   */
275  private class InterpolationRecord implements Interpolatable<InterpolationRecord> {
276    // The pose observed given the current sensor inputs and the previous pose.
277    private final Pose2d poseMeters;
278
279    // The current gyro angle.
280    private final Rotation2d gyroAngle;
281
282    // The current encoder readings.
283    private final T wheelPositions;
284
285    /**
286     * Constructs an Interpolation Record with the specified parameters.
287     *
288     * @param poseMeters The pose observed given the current sensor inputs and the previous pose.
289     * @param gyro The current gyro angle.
290     * @param wheelPositions The current encoder readings.
291     */
292    private InterpolationRecord(Pose2d poseMeters, Rotation2d gyro, T wheelPositions) {
293      this.poseMeters = poseMeters;
294      this.gyroAngle = gyro;
295      this.wheelPositions = wheelPositions;
296    }
297
298    /**
299     * Return the interpolated record. This object is assumed to be the starting position, or lower
300     * bound.
301     *
302     * @param endValue The upper bound, or end.
303     * @param t How far between the lower and upper bound we are. This should be bounded in [0, 1].
304     * @return The interpolated value.
305     */
306    @Override
307    public InterpolationRecord interpolate(InterpolationRecord endValue, double t) {
308      if (t < 0) {
309        return this;
310      } else if (t >= 1) {
311        return endValue;
312      } else {
313        // Find the new wheel distances.
314        var wheelLerp = wheelPositions.interpolate(endValue.wheelPositions, t);
315
316        // Find the new gyro angle.
317        var gyroLerp = gyroAngle.interpolate(endValue.gyroAngle, t);
318
319        // Create a twist to represent the change based on the interpolated sensor inputs.
320        Twist2d twist = m_kinematics.toTwist2d(wheelPositions, wheelLerp);
321        twist.dtheta = gyroLerp.minus(gyroAngle).getRadians();
322
323        return new InterpolationRecord(poseMeters.exp(twist), gyroLerp, wheelLerp);
324      }
325    }
326
327    @Override
328    public boolean equals(Object obj) {
329      if (this == obj) {
330        return true;
331      }
332      if (!(obj instanceof PoseEstimator.InterpolationRecord)) {
333        return false;
334      }
335      var record = (PoseEstimator<?>.InterpolationRecord) obj;
336      return Objects.equals(gyroAngle, record.gyroAngle)
337          && Objects.equals(wheelPositions, record.wheelPositions)
338          && Objects.equals(poseMeters, record.poseMeters);
339    }
340
341    @Override
342    public int hashCode() {
343      return Objects.hash(gyroAngle, wheelPositions, poseMeters);
344    }
345  }
346}