WPILibC++ 2027.0.0-alpha-4
Loading...
Searching...
No Matches
PoseEstimator.hpp
Go to the documentation of this file.
1// Copyright (c) FIRST and other WPILib contributors.
2// Open Source Software; you can modify and/or share it under the terms of
3// the WPILib BSD license file in the root directory of this project.
4
5#pragma once
6
7#include <cmath>
8#include <map>
9#include <optional>
10#include <vector>
11
12#include <Eigen/Core>
13
22#include "wpi/units/time.hpp"
24#include "wpi/util/array.hpp"
25
26namespace wpi::math {
27
28/**
29 * This class wraps odometry to fuse latency-compensated
30 * vision measurements with encoder measurements. Robot code should not use this
31 * directly- Instead, use the particular type for your drivetrain (e.g.,
32 * DifferentialDrivePoseEstimator). It will correct for noisy vision
33 * measurements and encoder drift. It is intended to be an easy drop-in for
34 * Odometry.
35 *
36 * Update() should be called every robot loop.
37 *
38 * AddVisionMeasurement() can be called as infrequently as you want; if you
39 * never call it, then this class will behave like regular encoder odometry.
40 *
41 * @tparam WheelPositions Wheel positions type.
42 * @tparam WheelVelocities Wheel velocities type.
43 * @tparam WheelAccelerations Wheel accelerations type.
44 */
45template <typename WheelPositions, typename WheelVelocities,
46 typename WheelAccelerations>
48 public:
49 /**
50 * Constructs a PoseEstimator.
51 *
52 * @warning The initial pose estimate will always be the default pose,
53 * regardless of the odometry's current pose.
54 *
55 * @param kinematics A correctly-configured kinematics object for your
56 * drivetrain.
57 * @param odometry A correctly-configured odometry object for your drivetrain.
58 * @param stateStdDevs Standard deviations of the pose estimate (x position in
59 * meters, y position in meters, and heading in radians). Increase these
60 * numbers to trust your state estimate less.
61 * @param visionMeasurementStdDevs Standard deviations of the vision pose
62 * measurement (x position in meters, y position in meters, and heading in
63 * radians). Increase these numbers to trust the vision pose measurement
64 * less.
65 */
68 kinematics,
70 const wpi::util::array<double, 3>& stateStdDevs,
71 const wpi::util::array<double, 3>& visionMeasurementStdDevs)
72 : m_odometry(odometry) {
73 for (size_t i = 0; i < 3; ++i) {
74 m_q[i] = stateStdDevs[i] * stateStdDevs[i];
75 }
76
77 SetVisionMeasurementStdDevs(visionMeasurementStdDevs);
78 wpi::math::MathSharedStore::ReportUsage("PoseEstimator", "");
79 }
80
81 /**
82 * Sets the pose estimator's trust in vision measurements. This might be used
83 * to change trust in vision measurements after the autonomous period, or to
84 * change trust as distance to a vision target increases.
85 *
86 * @param visionMeasurementStdDevs Standard deviations of the vision pose
87 * measurement (x position in meters, y position in meters, and heading in
88 * radians). Increase these numbers to trust the vision pose measurement
89 * less.
90 */
92 const wpi::util::array<double, 3>& visionMeasurementStdDevs) {
93 // Diagonal of measurement covariance matrix R
95 for (size_t i = 0; i < 3; ++i) {
96 r[i] = visionMeasurementStdDevs[i] * visionMeasurementStdDevs[i];
97 }
98
99 // Solve for closed form Kalman gain for continuous Kalman filter with A = 0
100 // and C = I. See wpimath/algorithms.md.
101 for (size_t row = 0; row < 3; ++row) {
102 if (m_q[row] == 0.0) {
103 m_vision_K.diagonal()[row] = 0.0;
104 } else {
105 m_vision_K.diagonal()[row] =
106 m_q[row] / (m_q[row] + std::sqrt(m_q[row] * r[row]));
107 }
108 }
109 }
110
111 /**
112 * Resets the robot's position on the field.
113 *
114 * The gyroscope angle does not need to be reset in the user's robot code.
115 * The library automatically takes care of offsetting the gyro angle.
116 *
117 * @param gyroAngle The current gyro angle.
118 * @param wheelPositions The distances traveled by the encoders.
119 * @param pose The estimated pose of the robot on the field.
120 */
121 void ResetPosition(const Rotation2d& gyroAngle,
122 const WheelPositions& wheelPositions, const Pose2d& pose) {
123 // Reset state estimate and error covariance
124 m_odometry.ResetPosition(gyroAngle, wheelPositions, pose);
125 m_odometryPoseBuffer.Clear();
126 m_visionUpdates.clear();
127 m_poseEstimate = m_odometry.GetPose();
128 }
129
130 /**
131 * Resets the robot's pose.
132 *
133 * @param pose The pose to reset to.
134 */
135 void ResetPose(const Pose2d& pose) {
136 m_odometry.ResetPose(pose);
137 m_odometryPoseBuffer.Clear();
138 m_visionUpdates.clear();
139 m_poseEstimate = m_odometry.GetPose();
140 }
141
142 /**
143 * Resets the robot's translation.
144 *
145 * @param translation The pose to translation to.
146 */
147#if defined(__GNUC__) && !defined(__clang__)
148#pragma GCC diagnostic push
149#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
150#endif // defined(__GNUC__) && !defined(__clang__)
151 void ResetTranslation(const Translation2d& translation) {
152 m_odometry.ResetTranslation(translation);
153
154 const std::optional<std::pair<units::second_t, VisionUpdate>>
155 latestVisionUpdate =
156 m_visionUpdates.empty() ? std::nullopt
157 : std::optional{*m_visionUpdates.crbegin()};
158 m_odometryPoseBuffer.Clear();
159 m_visionUpdates.clear();
160
161 if (latestVisionUpdate) {
162 // apply vision compensation to the pose rotation
163 const VisionUpdate visionUpdate{
164 Pose2d{translation, latestVisionUpdate->second.visionPose.Rotation()},
165 Pose2d{translation,
166 latestVisionUpdate->second.odometryPose.Rotation()}};
167 m_visionUpdates[latestVisionUpdate->first] = visionUpdate;
168 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
169 } else {
170 m_poseEstimate = m_odometry.GetPose();
171 }
172 }
173#if defined(__GNUC__) && !defined(__clang__)
174#pragma GCC diagnostic pop
175#endif // defined(__GNUC__) && !defined(__clang__)
176
177 /**
178 * Resets the robot's rotation.
179 *
180 * @param rotation The rotation to reset to.
181 */
182#if defined(__GNUC__) && !defined(__clang__)
183#pragma GCC diagnostic push
184#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
185#endif // defined(__GNUC__) && !defined(__clang__)
186 void ResetRotation(const Rotation2d& rotation) {
187 m_odometry.ResetRotation(rotation);
188
189 const std::optional<std::pair<units::second_t, VisionUpdate>>
190 latestVisionUpdate =
191 m_visionUpdates.empty() ? std::nullopt
192 : std::optional{*m_visionUpdates.crbegin()};
193 m_odometryPoseBuffer.Clear();
194 m_visionUpdates.clear();
195
196 if (latestVisionUpdate) {
197 // apply vision compensation to the pose translation
198 const VisionUpdate visionUpdate{
199 Pose2d{latestVisionUpdate->second.visionPose.Translation(), rotation},
200 Pose2d{latestVisionUpdate->second.odometryPose.Translation(),
201 rotation}};
202 m_visionUpdates[latestVisionUpdate->first] = visionUpdate;
203 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
204 } else {
205 m_poseEstimate = m_odometry.GetPose();
206 }
207 }
208#if defined(__GNUC__) && !defined(__clang__)
209#pragma GCC diagnostic pop
210#endif // defined(__GNUC__) && !defined(__clang__)
211
212 /**
213 * Gets the estimated robot pose.
214 *
215 * @return The estimated robot pose in meters.
216 */
217 Pose2d GetEstimatedPosition() const { return m_poseEstimate; }
218
219 /**
220 * Return the pose at a given timestamp, if the buffer is not empty.
221 *
222 * @param timestamp The pose's timestamp.
223 * @return The pose at the given timestamp (or std::nullopt if the buffer is
224 * empty).
225 */
226 std::optional<Pose2d> SampleAt(wpi::units::second_t timestamp) const {
227 // Step 0: If there are no odometry updates to sample, skip.
228 if (m_odometryPoseBuffer.GetInternalBuffer().empty()) {
229 return std::nullopt;
230 }
231
232 // Step 1: Make sure timestamp matches the sample from the odometry pose
233 // buffer. (When sampling, the buffer will always use a timestamp
234 // between the first and last timestamps)
235 wpi::units::second_t oldestOdometryTimestamp =
236 m_odometryPoseBuffer.GetInternalBuffer().front().first;
237 wpi::units::second_t newestOdometryTimestamp =
238 m_odometryPoseBuffer.GetInternalBuffer().back().first;
239 timestamp =
240 std::clamp(timestamp, oldestOdometryTimestamp, newestOdometryTimestamp);
241
242 // Step 2: If there are no applicable vision updates, use the odometry-only
243 // information.
244 if (m_visionUpdates.empty() || timestamp < m_visionUpdates.begin()->first) {
245 return m_odometryPoseBuffer.Sample(timestamp);
246 }
247
248 // Step 3: Get the latest vision update from before or at the timestamp to
249 // sample at.
250 // First, find the iterator past the sample timestamp, then go back one.
251 // Note that upper_bound() won't return begin() because we check begin()
252 // earlier.
253 auto floorIter = m_visionUpdates.upper_bound(timestamp);
254 --floorIter;
255 auto visionUpdate = floorIter->second;
256
257 // Step 4: Get the pose measured by odometry at the time of the sample.
258 auto odometryEstimate = m_odometryPoseBuffer.Sample(timestamp);
259
260 // Step 5: Apply the vision compensation to the odometry pose.
261 // TODO Replace with std::optional::transform() in C++23
262 if (odometryEstimate) {
263 return visionUpdate.Compensate(*odometryEstimate);
264 }
265 return std::nullopt;
266 }
267
268 /**
269 * Adds a vision measurement to the Kalman Filter. This will correct
270 * the odometry pose estimate while still accounting for measurement noise.
271 *
272 * This method can be called as infrequently as you want, as long as you are
273 * calling Update() every loop.
274 *
275 * To promote stability of the pose estimate and make it robust to bad vision
276 * data, we recommend only adding vision measurements that are already within
277 * one meter or so of the current pose estimate.
278 *
279 * @param visionRobotPose The pose of the robot as measured by the vision
280 * camera.
281 * @param timestamp The timestamp of the vision measurement in seconds. Note
282 * that if you don't use your own time source by calling UpdateWithTime(),
283 * then you must use a timestamp with an epoch since FPGA startup (i.e.,
284 * the epoch of this timestamp is the same epoch as
285 * wpi::math::Timer::GetTimestamp(). This means that you should use
286 * wpi::math::Timer::GetTimestamp() as your time source in this case.
287 */
288 void AddVisionMeasurement(const Pose2d& visionRobotPose,
289 wpi::units::second_t timestamp) {
290 // Step 0: If this measurement is old enough to be outside the pose buffer's
291 // timespan, skip.
292 if (m_odometryPoseBuffer.GetInternalBuffer().empty() ||
293 m_odometryPoseBuffer.GetInternalBuffer().front().first -
294 kBufferDuration >
295 timestamp) {
296 return;
297 }
298
299 // Step 1: Clean up any old entries
300 CleanUpVisionUpdates();
301
302 // Step 2: Get the pose measured by odometry at the moment the vision
303 // measurement was made.
304 auto odometrySample = m_odometryPoseBuffer.Sample(timestamp);
305
306 if (!odometrySample) {
307 return;
308 }
309
310 // Step 3: Get the vision-compensated pose estimate at the moment the vision
311 // measurement was made.
312 auto visionSample = SampleAt(timestamp);
313
314 if (!visionSample) {
315 return;
316 }
317
318 // Step 4: Measure the transform between the old pose estimate and the
319 // vision transform.
320 auto transform = visionRobotPose - visionSample.value();
321
322 // Step 5: We should not trust the transform entirely, so instead we scale
323 // this transform by a Kalman gain matrix representing how much we trust
324 // vision measurements compared to our current pose.
325 Eigen::Vector3d k_times_transform =
326 m_vision_K * Eigen::Vector3d{transform.X().value(),
327 transform.Y().value(),
328 transform.Rotation().Radians().value()};
329
330 // Step 6: Convert back to Transform2d.
331 Transform2d scaledTransform{
332 wpi::units::meter_t{k_times_transform(0)},
333 wpi::units::meter_t{k_times_transform(1)},
334 Rotation2d{wpi::units::radian_t{k_times_transform(2)}}};
335
336 // Step 7: Calculate and record the vision update.
337 VisionUpdate visionUpdate{*visionSample + scaledTransform, *odometrySample};
338 m_visionUpdates[timestamp] = visionUpdate;
339
340 // Step 8: Remove later vision measurements. (Matches previous behavior)
341 auto firstAfter = m_visionUpdates.upper_bound(timestamp);
342 m_visionUpdates.erase(firstAfter, m_visionUpdates.end());
343
344 // Step 9: Update latest pose estimate. Since we cleared all updates after
345 // this vision update, it's guaranteed to be the latest vision update.
346 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
347 }
348
349 /**
350 * Adds a vision measurement to the Kalman Filter. This will correct
351 * the odometry pose estimate while still accounting for measurement noise.
352 *
353 * This method can be called as infrequently as you want, as long as you are
354 * calling Update() every loop.
355 *
356 * To promote stability of the pose estimate and make it robust to bad vision
357 * data, we recommend only adding vision measurements that are already within
358 * one meter or so of the current pose estimate.
359 *
360 * Note that the vision measurement standard deviations passed into this
361 * method will continue to apply to future measurements until a subsequent
362 * call to SetVisionMeasurementStdDevs() or this method.
363 *
364 * @param visionRobotPose The pose of the robot as measured by the vision
365 * camera.
366 * @param timestamp The timestamp of the vision measurement in seconds. Note
367 * that if you don't use your own time source by calling UpdateWithTime(),
368 * then you must use a timestamp with an epoch since FPGA startup (i.e.,
369 * the epoch of this timestamp is the same epoch as
370 * wpi::math::Timer::GetTimestamp(). This means that you should use
371 * wpi::math::Timer::GetTimestamp() as your time source in this case.
372 * @param visionMeasurementStdDevs Standard deviations of the vision pose
373 * measurement (x position in meters, y position in meters, and heading in
374 * radians). Increase these numbers to trust the vision pose measurement
375 * less.
376 */
378 const Pose2d& visionRobotPose, wpi::units::second_t timestamp,
379 const wpi::util::array<double, 3>& visionMeasurementStdDevs) {
380 SetVisionMeasurementStdDevs(visionMeasurementStdDevs);
381 AddVisionMeasurement(visionRobotPose, timestamp);
382 }
383
384 /**
385 * Updates the pose estimator with wheel encoder and gyro information. This
386 * should be called every loop.
387 *
388 * @param gyroAngle The current gyro angle.
389 * @param wheelPositions The distances traveled by the encoders.
390 *
391 * @return The estimated pose of the robot in meters.
392 */
393 Pose2d Update(const Rotation2d& gyroAngle,
394 const WheelPositions& wheelPositions) {
396 wheelPositions);
397 }
398
399 /**
400 * Updates the pose estimator with wheel encoder and gyro information. This
401 * should be called every loop.
402 *
403 * @param currentTime The time at which this method was called.
404 * @param gyroAngle The current gyro angle.
405 * @param wheelPositions The distances traveled by the encoders.
406 *
407 * @return The estimated pose of the robot in meters.
408 */
409 Pose2d UpdateWithTime(wpi::units::second_t currentTime,
410 const Rotation2d& gyroAngle,
411 const WheelPositions& wheelPositions) {
412 auto odometryEstimate = m_odometry.Update(gyroAngle, wheelPositions);
413
414 m_odometryPoseBuffer.AddSample(currentTime, odometryEstimate);
415
416 if (m_visionUpdates.empty()) {
417 m_poseEstimate = odometryEstimate;
418 } else {
419 auto visionUpdate = m_visionUpdates.rbegin()->second;
420 m_poseEstimate = visionUpdate.Compensate(odometryEstimate);
421 }
422
423 return GetEstimatedPosition();
424 }
425
426 private:
427 /**
428 * Removes stale vision updates that won't affect sampling.
429 */
430 void CleanUpVisionUpdates() {
431 // Step 0: If there are no odometry samples, skip.
432 if (m_odometryPoseBuffer.GetInternalBuffer().empty()) {
433 return;
434 }
435
436 // Step 1: Find the oldest timestamp that needs a vision update.
437 wpi::units::second_t oldestOdometryTimestamp =
438 m_odometryPoseBuffer.GetInternalBuffer().front().first;
439
440 // Step 2: If there are no vision updates before that timestamp, skip.
441 if (m_visionUpdates.empty() ||
442 oldestOdometryTimestamp < m_visionUpdates.begin()->first) {
443 return;
444 }
445
446 // Step 3: Find the newest vision update timestamp before or at the oldest
447 // timestamp.
448 // First, find the iterator past the oldest odometry timestamp, then go
449 // back one. Note that upper_bound() won't return begin() because we check
450 // begin() earlier.
451 auto newestNeededVisionUpdate =
452 m_visionUpdates.upper_bound(oldestOdometryTimestamp);
453 --newestNeededVisionUpdate;
454
455 // Step 4: Remove all entries strictly before the newest timestamp we need.
456 m_visionUpdates.erase(m_visionUpdates.begin(), newestNeededVisionUpdate);
457 }
458
459 struct VisionUpdate {
460 // The vision-compensated pose estimate
461 Pose2d visionPose;
462
463 // The pose estimated based solely on odometry
464 Pose2d odometryPose;
465
466 /**
467 * Returns the vision-compensated version of the pose. Specifically, changes
468 * the pose from being relative to this record's odometry pose to being
469 * relative to this record's vision pose.
470 *
471 * @param pose The pose to compensate.
472 * @return The compensated pose.
473 */
474 Pose2d Compensate(const Pose2d& pose) const {
475 auto delta = pose - odometryPose;
476 return visionPose + delta;
477 }
478 };
479
480 static constexpr wpi::units::second_t kBufferDuration = 1.5_s;
481
482 Odometry<WheelPositions, WheelVelocities, WheelAccelerations>& m_odometry;
483
484 // Diagonal of process noise covariance matrix Q
485 wpi::util::array<double, 3> m_q{wpi::util::empty_array};
486
487 // Kalman gain matrix K
488 Eigen::DiagonalMatrix<double, 3> m_vision_K =
489 Eigen::DiagonalMatrix<double, 3>::Zero();
490
491 // Maps timestamps to odometry-only pose estimates
492 TimeInterpolatableBuffer<Pose2d> m_odometryPoseBuffer{kBufferDuration};
493 // Maps timestamps to vision updates
494 // Always contains one entry before the oldest entry in m_odometryPoseBuffer,
495 // unless there have been no vision measurements after the last reset. May
496 // contain one entry while m_odometryPoseBuffer is empty to correct for
497 // translation/rotation after a call to ResetRotation/ResetTranslation.
498 std::map<wpi::units::second_t, VisionUpdate> m_visionUpdates;
499
500 Pose2d m_poseEstimate;
501};
502
503} // namespace wpi::math
#define WPILIB_DLLEXPORT
Definition SymbolExports.hpp:36
Helper class that converts a chassis velocity (dx, dy, and dtheta components) into individual wheel v...
Definition Kinematics.hpp:26
static void ReportUsage(std::string_view resource, std::string_view data)
Definition MathShared.hpp:61
static wpi::units::second_t GetTimestamp()
Definition MathShared.hpp:65
Class for odometry.
Definition Odometry.hpp:31
Represents a 2D pose containing translational and rotational elements.
Definition Pose2d.hpp:27
Pose2d UpdateWithTime(wpi::units::second_t currentTime, const Rotation2d &gyroAngle, const WheelPositions &wheelPositions)
Updates the pose estimator with wheel encoder and gyro information.
Definition PoseEstimator.hpp:409
void AddVisionMeasurement(const Pose2d &visionRobotPose, wpi::units::second_t timestamp, const wpi::util::array< double, 3 > &visionMeasurementStdDevs)
Adds a vision measurement to the Kalman Filter.
Definition PoseEstimator.hpp:377
void ResetPosition(const Rotation2d &gyroAngle, const WheelPositions &wheelPositions, const Pose2d &pose)
Resets the robot's position on the field.
Definition PoseEstimator.hpp:121
Pose2d Update(const Rotation2d &gyroAngle, const WheelPositions &wheelPositions)
Updates the pose estimator with wheel encoder and gyro information.
Definition PoseEstimator.hpp:393
void ResetTranslation(const Translation2d &translation)
Resets the robot's translation.
Definition PoseEstimator.hpp:151
Pose2d GetEstimatedPosition() const
Gets the estimated robot pose.
Definition PoseEstimator.hpp:217
std::optional< Pose2d > SampleAt(wpi::units::second_t timestamp) const
Return the pose at a given timestamp, if the buffer is not empty.
Definition PoseEstimator.hpp:226
PoseEstimator(Kinematics< WheelPositions, WheelVelocities, WheelAccelerations > &kinematics, Odometry< WheelPositions, WheelVelocities, WheelAccelerations > &odometry, const wpi::util::array< double, 3 > &stateStdDevs, const wpi::util::array< double, 3 > &visionMeasurementStdDevs)
Constructs a PoseEstimator.
Definition PoseEstimator.hpp:66
void AddVisionMeasurement(const Pose2d &visionRobotPose, wpi::units::second_t timestamp)
Adds a vision measurement to the Kalman Filter.
Definition PoseEstimator.hpp:288
void ResetRotation(const Rotation2d &rotation)
Resets the robot's rotation.
Definition PoseEstimator.hpp:186
void SetVisionMeasurementStdDevs(const wpi::util::array< double, 3 > &visionMeasurementStdDevs)
Sets the pose estimator's trust in vision measurements.
Definition PoseEstimator.hpp:91
void ResetPose(const Pose2d &pose)
Resets the robot's pose.
Definition PoseEstimator.hpp:135
A rotation in a 2D coordinate frame represented by a point on the unit circle (cosine and sine).
Definition Rotation2d.hpp:26
Represents a transformation for a Pose2d in the pose's frame.
Definition Transform2d.hpp:21
Represents a translation in 2D space.
Definition Translation2d.hpp:30
This class is a wrapper around std::array that does compile time size checking.
Definition array.hpp:26
Definition LinearSystem.hpp:20
constexpr empty_array_t empty_array
Definition array.hpp:16