001// Copyright (c) FIRST and other WPILib contributors.
002// Open Source Software; you can modify and/or share it under the terms of
003// the WPILib BSD license file in the root directory of this project.
004
005package edu.wpi.first.math.estimator;
006
007import edu.wpi.first.math.MathSharedStore;
008import edu.wpi.first.math.Matrix;
009import edu.wpi.first.math.Nat;
010import edu.wpi.first.math.VecBuilder;
011import edu.wpi.first.math.geometry.Pose2d;
012import edu.wpi.first.math.geometry.Rotation2d;
013import edu.wpi.first.math.geometry.Twist2d;
014import edu.wpi.first.math.interpolation.Interpolatable;
015import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer;
016import edu.wpi.first.math.kinematics.Kinematics;
017import edu.wpi.first.math.kinematics.Odometry;
018import edu.wpi.first.math.kinematics.WheelPositions;
019import edu.wpi.first.math.numbers.N1;
020import edu.wpi.first.math.numbers.N3;
021import java.util.Map;
022import java.util.NoSuchElementException;
023import java.util.Objects;
024
025/**
026 * This class wraps {@link Odometry} to fuse latency-compensated vision measurements with encoder
027 * measurements. Robot code should not use this directly- Instead, use the particular type for your
028 * drivetrain (e.g., {@link DifferentialDrivePoseEstimator}). It is intended to be a drop-in
029 * replacement for {@link Odometry}; in fact, if you never call {@link
030 * PoseEstimator#addVisionMeasurement} and only call {@link PoseEstimator#update} then this will
031 * behave exactly the same as Odometry.
032 *
033 * <p>{@link PoseEstimator#update} should be called every robot loop.
034 *
035 * <p>{@link PoseEstimator#addVisionMeasurement} can be called as infrequently as you want; if you
036 * never call it then this class will behave exactly like regular encoder odometry.
037 *
038 * @param <T> Wheel positions type.
039 */
040public class PoseEstimator<T extends WheelPositions<T>> {
041  private final Kinematics<?, T> m_kinematics;
042  private final Odometry<T> m_odometry;
043  private final Matrix<N3, N1> m_q = new Matrix<>(Nat.N3(), Nat.N1());
044  private final Matrix<N3, N3> m_visionK = new Matrix<>(Nat.N3(), Nat.N3());
045
046  private static final double kBufferDuration = 1.5;
047  private final TimeInterpolatableBuffer<InterpolationRecord> m_poseBuffer =
048      TimeInterpolatableBuffer.createBuffer(kBufferDuration);
049
050  /**
051   * Constructs a PoseEstimator.
052   *
053   * @param kinematics A correctly-configured kinematics object for your drivetrain.
054   * @param odometry A correctly-configured odometry object for your drivetrain.
055   * @param stateStdDevs Standard deviations of the pose estimate (x position in meters, y position
056   *     in meters, and heading in radians). Increase these numbers to trust your state estimate
057   *     less.
058   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
059   *     in meters, y position in meters, and heading in radians). Increase these numbers to trust
060   *     the vision pose measurement less.
061   */
062  public PoseEstimator(
063      Kinematics<?, T> kinematics,
064      Odometry<T> odometry,
065      Matrix<N3, N1> stateStdDevs,
066      Matrix<N3, N1> visionMeasurementStdDevs) {
067    m_kinematics = kinematics;
068    m_odometry = odometry;
069
070    for (int i = 0; i < 3; ++i) {
071      m_q.set(i, 0, stateStdDevs.get(i, 0) * stateStdDevs.get(i, 0));
072    }
073    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
074  }
075
076  /**
077   * Sets the pose estimator's trust of global measurements. This might be used to change trust in
078   * vision measurements after the autonomous period, or to change trust as distance to a vision
079   * target increases.
080   *
081   * @param visionMeasurementStdDevs Standard deviations of the vision measurements. Increase these
082   *     numbers to trust global measurements from vision less. This matrix is in the form [x, y,
083   *     theta]ᵀ, with units in meters and radians.
084   */
085  public final void setVisionMeasurementStdDevs(Matrix<N3, N1> visionMeasurementStdDevs) {
086    var r = new double[3];
087    for (int i = 0; i < 3; ++i) {
088      r[i] = visionMeasurementStdDevs.get(i, 0) * visionMeasurementStdDevs.get(i, 0);
089    }
090
091    // Solve for closed form Kalman gain for continuous Kalman filter with A = 0
092    // and C = I. See wpimath/algorithms.md.
093    for (int row = 0; row < 3; ++row) {
094      if (m_q.get(row, 0) == 0.0) {
095        m_visionK.set(row, row, 0.0);
096      } else {
097        m_visionK.set(
098            row, row, m_q.get(row, 0) / (m_q.get(row, 0) + Math.sqrt(m_q.get(row, 0) * r[row])));
099      }
100    }
101  }
102
103  /**
104   * Resets the robot's position on the field.
105   *
106   * <p>The gyroscope angle does not need to be reset here on the user's robot code. The library
107   * automatically takes care of offsetting the gyro angle.
108   *
109   * @param gyroAngle The angle reported by the gyroscope.
110   * @param wheelPositions The current encoder readings.
111   * @param poseMeters The position on the field that your robot is at.
112   */
113  public void resetPosition(Rotation2d gyroAngle, T wheelPositions, Pose2d poseMeters) {
114    // Reset state estimate and error covariance
115    m_odometry.resetPosition(gyroAngle, wheelPositions, poseMeters);
116    m_poseBuffer.clear();
117  }
118
119  /**
120   * Gets the estimated robot pose.
121   *
122   * @return The estimated robot pose in meters.
123   */
124  public Pose2d getEstimatedPosition() {
125    return m_odometry.getPoseMeters();
126  }
127
128  /**
129   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
130   * while still accounting for measurement noise.
131   *
132   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
133   * PoseEstimator#update} every loop.
134   *
135   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
136   * recommend only adding vision measurements that are already within one meter or so of the
137   * current pose estimate.
138   *
139   * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera.
140   * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you
141   *     don't use your own time source by calling {@link
142   *     PoseEstimator#updateWithTime(double,Rotation2d,WheelPositions)} then you must use a
143   *     timestamp with an epoch since FPGA startup (i.e., the epoch of this timestamp is the same
144   *     epoch as {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}.) This means that you
145   *     should use {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source or
146   *     sync the epochs.
147   */
148  public void addVisionMeasurement(Pose2d visionRobotPoseMeters, double timestampSeconds) {
149    // Step 0: If this measurement is old enough to be outside the pose buffer's timespan, skip.
150    try {
151      if (m_poseBuffer.getInternalBuffer().lastKey() - kBufferDuration > timestampSeconds) {
152        return;
153      }
154    } catch (NoSuchElementException ex) {
155      return;
156    }
157
158    // Step 1: Get the pose odometry measured at the moment the vision measurement was made.
159    var sample = m_poseBuffer.getSample(timestampSeconds);
160
161    if (sample.isEmpty()) {
162      return;
163    }
164
165    // Step 2: Measure the twist between the odometry pose and the vision pose.
166    var twist = sample.get().poseMeters.log(visionRobotPoseMeters);
167
168    // Step 3: We should not trust the twist entirely, so instead we scale this twist by a Kalman
169    // gain matrix representing how much we trust vision measurements compared to our current pose.
170    var k_times_twist = m_visionK.times(VecBuilder.fill(twist.dx, twist.dy, twist.dtheta));
171
172    // Step 4: Convert back to Twist2d.
173    var scaledTwist =
174        new Twist2d(k_times_twist.get(0, 0), k_times_twist.get(1, 0), k_times_twist.get(2, 0));
175
176    // Step 5: Reset Odometry to state at sample with vision adjustment.
177    m_odometry.resetPosition(
178        sample.get().gyroAngle,
179        sample.get().wheelPositions,
180        sample.get().poseMeters.exp(scaledTwist));
181
182    // Step 6: Record the current pose to allow multiple measurements from the same timestamp
183    m_poseBuffer.addSample(
184        timestampSeconds,
185        new InterpolationRecord(
186            getEstimatedPosition(), sample.get().gyroAngle, sample.get().wheelPositions));
187
188    // Step 7: Replay odometry inputs between sample time and latest recorded sample to update the
189    // pose buffer and correct odometry.
190    for (Map.Entry<Double, InterpolationRecord> entry :
191        m_poseBuffer.getInternalBuffer().tailMap(timestampSeconds).entrySet()) {
192      updateWithTime(entry.getKey(), entry.getValue().gyroAngle, entry.getValue().wheelPositions);
193    }
194  }
195
196  /**
197   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
198   * while still accounting for measurement noise.
199   *
200   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
201   * PoseEstimator#update} every loop.
202   *
203   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
204   * recommend only adding vision measurements that are already within one meter or so of the
205   * current pose estimate.
206   *
207   * <p>Note that the vision measurement standard deviations passed into this method will continue
208   * to apply to future measurements until a subsequent call to {@link
209   * PoseEstimator#setVisionMeasurementStdDevs(Matrix)} or this method.
210   *
211   * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera.
212   * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you
213   *     don't use your own time source by calling {@link #updateWithTime}, then you must use a
214   *     timestamp with an epoch since FPGA startup (i.e., the epoch of this timestamp is the same
215   *     epoch as {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}). This means that you
216   *     should use {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source in
217   *     this case.
218   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
219   *     in meters, y position in meters, and heading in radians). Increase these numbers to trust
220   *     the vision pose measurement less.
221   */
222  public void addVisionMeasurement(
223      Pose2d visionRobotPoseMeters,
224      double timestampSeconds,
225      Matrix<N3, N1> visionMeasurementStdDevs) {
226    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
227    addVisionMeasurement(visionRobotPoseMeters, timestampSeconds);
228  }
229
230  /**
231   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
232   * loop.
233   *
234   * @param gyroAngle The current gyro angle.
235   * @param wheelPositions The current encoder readings.
236   * @return The estimated pose of the robot in meters.
237   */
238  public Pose2d update(Rotation2d gyroAngle, T wheelPositions) {
239    return updateWithTime(MathSharedStore.getTimestamp(), gyroAngle, wheelPositions);
240  }
241
242  /**
243   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
244   * loop.
245   *
246   * @param currentTimeSeconds Time at which this method was called, in seconds.
247   * @param gyroAngle The current gyro angle.
248   * @param wheelPositions The current encoder readings.
249   * @return The estimated pose of the robot in meters.
250   */
251  public Pose2d updateWithTime(double currentTimeSeconds, Rotation2d gyroAngle, T wheelPositions) {
252    m_odometry.update(gyroAngle, wheelPositions);
253    m_poseBuffer.addSample(
254        currentTimeSeconds,
255        new InterpolationRecord(getEstimatedPosition(), gyroAngle, wheelPositions.copy()));
256
257    return getEstimatedPosition();
258  }
259
260  /**
261   * Represents an odometry record. The record contains the inputs provided as well as the pose that
262   * was observed based on these inputs, as well as the previous record and its inputs.
263   */
264  private class InterpolationRecord implements Interpolatable<InterpolationRecord> {
265    // The pose observed given the current sensor inputs and the previous pose.
266    private final Pose2d poseMeters;
267
268    // The current gyro angle.
269    private final Rotation2d gyroAngle;
270
271    // The current encoder readings.
272    private final T wheelPositions;
273
274    /**
275     * Constructs an Interpolation Record with the specified parameters.
276     *
277     * @param poseMeters The pose observed given the current sensor inputs and the previous pose.
278     * @param gyro The current gyro angle.
279     * @param wheelPositions The current encoder readings.
280     */
281    private InterpolationRecord(Pose2d poseMeters, Rotation2d gyro, T wheelPositions) {
282      this.poseMeters = poseMeters;
283      this.gyroAngle = gyro;
284      this.wheelPositions = wheelPositions;
285    }
286
287    /**
288     * Return the interpolated record. This object is assumed to be the starting position, or lower
289     * bound.
290     *
291     * @param endValue The upper bound, or end.
292     * @param t How far between the lower and upper bound we are. This should be bounded in [0, 1].
293     * @return The interpolated value.
294     */
295    @Override
296    public InterpolationRecord interpolate(InterpolationRecord endValue, double t) {
297      if (t < 0) {
298        return this;
299      } else if (t >= 1) {
300        return endValue;
301      } else {
302        // Find the new wheel distances.
303        var wheelLerp = wheelPositions.interpolate(endValue.wheelPositions, t);
304
305        // Find the new gyro angle.
306        var gyroLerp = gyroAngle.interpolate(endValue.gyroAngle, t);
307
308        // Create a twist to represent the change based on the interpolated sensor inputs.
309        Twist2d twist = m_kinematics.toTwist2d(wheelPositions, wheelLerp);
310        twist.dtheta = gyroLerp.minus(gyroAngle).getRadians();
311
312        return new InterpolationRecord(poseMeters.exp(twist), gyroLerp, wheelLerp);
313      }
314    }
315
316    @Override
317    public boolean equals(Object obj) {
318      if (this == obj) {
319        return true;
320      }
321      if (!(obj instanceof PoseEstimator.InterpolationRecord)) {
322        return false;
323      }
324      var record = (PoseEstimator<?>.InterpolationRecord) obj;
325      return Objects.equals(gyroAngle, record.gyroAngle)
326          && Objects.equals(wheelPositions, record.wheelPositions)
327          && Objects.equals(poseMeters, record.poseMeters);
328    }
329
330    @Override
331    public int hashCode() {
332      return Objects.hash(gyroAngle, wheelPositions, poseMeters);
333    }
334  }
335}