WPILibC++ 2027.0.0-alpha-5
Loading...
Searching...
No Matches
PoseEstimator.hpp
Go to the documentation of this file.
1// Copyright (c) FIRST and other WPILib contributors.
2// Open Source Software; you can modify and/or share it under the terms of
3// the WPILib BSD license file in the root directory of this project.
4
5#pragma once
6
7#include <cmath>
8#include <map>
9#include <optional>
10#include <vector>
11
12#include <Eigen/Core>
13
22#include "wpi/units/time.hpp"
24#include "wpi/util/array.hpp"
25
26namespace wpi::math {
27
28/**
29 * This class wraps odometry to fuse latency-compensated
30 * vision measurements with encoder measurements. Robot code should not use this
31 * directly- Instead, use the particular type for your drivetrain (e.g.,
32 * DifferentialDrivePoseEstimator). It will correct for noisy vision
33 * measurements and encoder drift. It is intended to be an easy drop-in for
34 * Odometry.
35 *
36 * Update() should be called every robot loop.
37 *
38 * AddVisionMeasurement() can be called as infrequently as you want; if you
39 * never call it, then this class will behave like regular encoder odometry.
40 *
41 * @tparam WheelPositions Wheel positions type.
42 * @tparam WheelVelocities Wheel velocities type.
43 * @tparam WheelAccelerations Wheel accelerations type.
44 */
45template <typename WheelPositions, typename WheelVelocities,
46 typename WheelAccelerations>
48 public:
49 /**
50 * Constructs a PoseEstimator.
51 *
52 * @warning The initial pose estimate will always be the default pose,
53 * regardless of the odometry's current pose.
54 *
55 * @param kinematics A correctly-configured kinematics object for your
56 * drivetrain.
57 * @param odometry A correctly-configured odometry object for your drivetrain.
58 * @param stateStdDevs Standard deviations of the pose estimate (x position in
59 * meters, y position in meters, and heading in radians). Increase these
60 * numbers to trust your state estimate less.
61 * @param visionMeasurementStdDevs Standard deviations of the vision pose
62 * measurement (x position in meters, y position in meters, and heading in
63 * radians). Increase these numbers to trust the vision pose measurement
64 * less.
65 */
68 kinematics,
70 const wpi::util::array<double, 3>& stateStdDevs,
71 const wpi::util::array<double, 3>& visionMeasurementStdDevs)
72 : m_odometry(odometry) {
73 for (size_t i = 0; i < 3; ++i) {
74 m_q[i] = stateStdDevs[i] * stateStdDevs[i];
75 }
76
77 SetVisionMeasurementStdDevs(visionMeasurementStdDevs);
78 wpi::math::MathSharedStore::ReportUsage("PoseEstimator", "");
79 }
80
81 /**
82 * Sets the pose estimator's trust in vision measurements. This might be used
83 * to change trust in vision measurements after the autonomous period, or to
84 * change trust as distance to a vision target increases.
85 *
86 * @param visionMeasurementStdDevs Standard deviations of the vision pose
87 * measurement (x position in meters, y position in meters, and heading in
88 * radians). Increase these numbers to trust the vision pose measurement
89 * less.
90 */
92 const wpi::util::array<double, 3>& visionMeasurementStdDevs) {
93 // Diagonal of measurement covariance matrix R
95 for (size_t i = 0; i < 3; ++i) {
96 r[i] = visionMeasurementStdDevs[i] * visionMeasurementStdDevs[i];
97 }
98
99 // Solve for closed form Kalman gain for continuous Kalman filter with A = 0
100 // and C = I. See wpimath/algorithms.md.
101 for (size_t row = 0; row < 3; ++row) {
102 if (m_q[row] == 0.0) {
103 m_vision_K.diagonal()[row] = 0.0;
104 } else {
105 m_vision_K.diagonal()[row] =
106 m_q[row] / (m_q[row] + std::sqrt(m_q[row] * r[row]));
107 }
108 }
109 }
110
111 /**
112 * Resets the robot's position on the field.
113 *
114 * The gyroscope angle does not need to be reset in the user's robot code.
115 * The library automatically takes care of offsetting the gyro angle.
116 *
117 * @param gyroAngle The current gyro angle.
118 * @param wheelPositions The distances traveled by the encoders.
119 * @param pose The estimated pose of the robot on the field.
120 */
121 void ResetPosition(const Rotation2d& gyroAngle,
122 const WheelPositions& wheelPositions, const Pose2d& pose) {
123 // Reset state estimate and error covariance
124 m_odometry.ResetPosition(gyroAngle, wheelPositions, pose);
125 m_odometryPoseBuffer.Clear();
126 m_visionUpdates.clear();
127 m_poseEstimate = m_odometry.GetPose();
128 }
129
130 /**
131 * Resets the robot's pose.
132 *
133 * @param pose The pose to reset to.
134 */
135 void ResetPose(const Pose2d& pose) {
136 m_odometry.ResetPose(pose);
137 m_odometryPoseBuffer.Clear();
138 m_visionUpdates.clear();
139 m_poseEstimate = m_odometry.GetPose();
140 }
141
142 /**
143 * Resets the robot's translation.
144 *
145 * @param translation The pose to translation to.
146 */
147#if defined(__GNUC__) && !defined(__clang__)
148#pragma GCC diagnostic push
149#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
150#endif // defined(__GNUC__) && !defined(__clang__)
151 void ResetTranslation(const Translation2d& translation) {
152 m_odometry.ResetTranslation(translation);
153
154 const std::optional<std::pair<units::second_t, VisionUpdate>>
155 latestVisionUpdate =
156 m_visionUpdates.empty() ? std::nullopt
157 : std::optional{*m_visionUpdates.crbegin()};
158 m_odometryPoseBuffer.Clear();
159 m_visionUpdates.clear();
160
161 if (latestVisionUpdate) {
162 // apply vision compensation to the pose rotation
163 const VisionUpdate visionUpdate{
164 Pose2d{translation, latestVisionUpdate->second.visionPose.Rotation()},
165 Pose2d{translation,
166 latestVisionUpdate->second.odometryPose.Rotation()}};
167 m_visionUpdates[latestVisionUpdate->first] = visionUpdate;
168 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
169 } else {
170 m_poseEstimate = m_odometry.GetPose();
171 }
172 }
173#if defined(__GNUC__) && !defined(__clang__)
174#pragma GCC diagnostic pop
175#endif // defined(__GNUC__) && !defined(__clang__)
176
177 /**
178 * Resets the robot's rotation.
179 *
180 * @param rotation The rotation to reset to.
181 */
182#if defined(__GNUC__) && !defined(__clang__)
183#pragma GCC diagnostic push
184#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
185#endif // defined(__GNUC__) && !defined(__clang__)
186 void ResetRotation(const Rotation2d& rotation) {
187 m_odometry.ResetRotation(rotation);
188
189 const std::optional<std::pair<units::second_t, VisionUpdate>>
190 latestVisionUpdate =
191 m_visionUpdates.empty() ? std::nullopt
192 : std::optional{*m_visionUpdates.crbegin()};
193 m_odometryPoseBuffer.Clear();
194 m_visionUpdates.clear();
195
196 if (latestVisionUpdate) {
197 // apply vision compensation to the pose translation
198 const VisionUpdate visionUpdate{
199 Pose2d{latestVisionUpdate->second.visionPose.Translation(), rotation},
200 Pose2d{latestVisionUpdate->second.odometryPose.Translation(),
201 rotation}};
202 m_visionUpdates[latestVisionUpdate->first] = visionUpdate;
203 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
204 } else {
205 m_poseEstimate = m_odometry.GetPose();
206 }
207 }
208#if defined(__GNUC__) && !defined(__clang__)
209#pragma GCC diagnostic pop
210#endif // defined(__GNUC__) && !defined(__clang__)
211
212 /**
213 * Gets the estimated robot pose.
214 *
215 * @return The estimated robot pose in meters.
216 */
217 Pose2d GetEstimatedPosition() const { return m_poseEstimate; }
218
219 /**
220 * Return the pose at a given timestamp, if the buffer is not empty.
221 *
222 * @param timestamp The pose's timestamp.
223 * @return The pose at the given timestamp (or std::nullopt if the buffer is
224 * empty).
225 */
226 std::optional<Pose2d> SampleAt(wpi::units::second_t timestamp) const {
227 // Step 0: If there are no odometry updates to sample, skip.
228 if (m_odometryPoseBuffer.GetInternalBuffer().empty()) {
229 return std::nullopt;
230 }
231
232 // Step 1: Make sure timestamp matches the sample from the odometry pose
233 // buffer. (When sampling, the buffer will always use a timestamp
234 // between the first and last timestamps)
235 wpi::units::second_t oldestOdometryTimestamp =
236 m_odometryPoseBuffer.GetInternalBuffer().front().first;
237 wpi::units::second_t newestOdometryTimestamp =
238 m_odometryPoseBuffer.GetInternalBuffer().back().first;
239 timestamp =
240 std::clamp(timestamp, oldestOdometryTimestamp, newestOdometryTimestamp);
241
242 // Step 2: If there are no applicable vision updates, use the odometry-only
243 // information.
244 if (m_visionUpdates.empty() || timestamp < m_visionUpdates.begin()->first) {
245 return m_odometryPoseBuffer.Sample(timestamp);
246 }
247
248 // Step 3: Get the latest vision update from before or at the timestamp to
249 // sample at.
250 // First, find the iterator past the sample timestamp, then go back one.
251 // Note that upper_bound() won't return begin() because we check begin()
252 // earlier.
253 auto floorIter = m_visionUpdates.upper_bound(timestamp);
254 --floorIter;
255 auto visionUpdate = floorIter->second;
256
257 // Step 4: Get the pose measured by odometry at the time of the sample.
258 auto odometryEstimate = m_odometryPoseBuffer.Sample(timestamp);
259
260 // Step 5: Apply the vision compensation to the odometry pose.
261 // TODO Replace with std::optional::transform() in C++23
262 if (odometryEstimate) {
263 return visionUpdate.Compensate(*odometryEstimate);
264 }
265 return std::nullopt;
266 }
267
268 /**
269 * Adds a vision measurement to the Kalman Filter. This will correct
270 * the odometry pose estimate while still accounting for measurement noise.
271 *
272 * This method can be called as infrequently as you want, as long as you are
273 * calling Update() every loop.
274 *
275 * To promote stability of the pose estimate and make it robust to bad vision
276 * data, we recommend only adding vision measurements that are already within
277 * one meter or so of the current pose estimate.
278 *
279 * @param visionRobotPose The pose of the robot as measured by the vision
280 * camera.
281 * @param timestamp The timestamp of the vision measurement in seconds. Note
282 * that if you don't use your own time source by calling UpdateWithTime(),
283 * then you must use a timestamp with the same epoch as
284 * wpi::Timer::GetMonotonicTimestamp(). This means that you should use
285 * wpi::Timer::GetMonotonicTimestamp() as your time source in this case.
286 */
287 void AddVisionMeasurement(const Pose2d& visionRobotPose,
288 wpi::units::second_t timestamp) {
289 // Step 0: If this measurement is old enough to be outside the pose buffer's
290 // timespan, skip.
291 if (m_odometryPoseBuffer.GetInternalBuffer().empty() ||
292 m_odometryPoseBuffer.GetInternalBuffer().front().first -
293 kBufferDuration >
294 timestamp) {
295 return;
296 }
297
298 // Step 1: Clean up any old entries
299 CleanUpVisionUpdates();
300
301 // Step 2: Get the pose measured by odometry at the moment the vision
302 // measurement was made.
303 auto odometrySample = m_odometryPoseBuffer.Sample(timestamp);
304
305 if (!odometrySample) {
306 return;
307 }
308
309 // Step 3: Get the vision-compensated pose estimate at the moment the vision
310 // measurement was made.
311 auto visionSample = SampleAt(timestamp);
312
313 if (!visionSample) {
314 return;
315 }
316
317 // Step 4: Measure the transform between the old pose estimate and the
318 // vision transform.
319 auto transform = visionRobotPose - visionSample.value();
320
321 // Step 5: We should not trust the transform entirely, so instead we scale
322 // this transform by a Kalman gain matrix representing how much we trust
323 // vision measurements compared to our current pose.
324 Eigen::Vector3d k_times_transform =
325 m_vision_K * Eigen::Vector3d{transform.X().value(),
326 transform.Y().value(),
327 transform.Rotation().Radians().value()};
328
329 // Step 6: Convert back to Transform2d.
330 Transform2d scaledTransform{
331 wpi::units::meter_t{k_times_transform(0)},
332 wpi::units::meter_t{k_times_transform(1)},
333 Rotation2d{wpi::units::radian_t{k_times_transform(2)}}};
334
335 // Step 7: Calculate and record the vision update.
336 VisionUpdate visionUpdate{*visionSample + scaledTransform, *odometrySample};
337 m_visionUpdates[timestamp] = visionUpdate;
338
339 // Step 8: Remove later vision measurements. (Matches previous behavior)
340 auto firstAfter = m_visionUpdates.upper_bound(timestamp);
341 m_visionUpdates.erase(firstAfter, m_visionUpdates.end());
342
343 // Step 9: Update latest pose estimate. Since we cleared all updates after
344 // this vision update, it's guaranteed to be the latest vision update.
345 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
346 }
347
348 /**
349 * Adds a vision measurement to the Kalman Filter. This will correct
350 * the odometry pose estimate while still accounting for measurement noise.
351 *
352 * This method can be called as infrequently as you want, as long as you are
353 * calling Update() every loop.
354 *
355 * To promote stability of the pose estimate and make it robust to bad vision
356 * data, we recommend only adding vision measurements that are already within
357 * one meter or so of the current pose estimate.
358 *
359 * Note that the vision measurement standard deviations passed into this
360 * method will continue to apply to future measurements until a subsequent
361 * call to SetVisionMeasurementStdDevs() or this method.
362 *
363 * @param visionRobotPose The pose of the robot as measured by the vision
364 * camera.
365 * @param timestamp The timestamp of the vision measurement in seconds. Note
366 * that if you don't use your own time source by calling UpdateWithTime(),
367 * then you must use a timestamp with the same epoch as
368 * wpi::Timer::GetMonotonicTimestamp(). This means that you should use
369 * wpi::Timer::GetMonotonicTimestamp() as your time source in this case.
370 * @param visionMeasurementStdDevs Standard deviations of the vision pose
371 * measurement (x position in meters, y position in meters, and heading in
372 * radians). Increase these numbers to trust the vision pose measurement
373 * less.
374 */
376 const Pose2d& visionRobotPose, wpi::units::second_t timestamp,
377 const wpi::util::array<double, 3>& visionMeasurementStdDevs) {
378 SetVisionMeasurementStdDevs(visionMeasurementStdDevs);
379 AddVisionMeasurement(visionRobotPose, timestamp);
380 }
381
382 /**
383 * Updates the pose estimator with wheel encoder and gyro information. This
384 * should be called every loop.
385 *
386 * @param gyroAngle The current gyro angle.
387 * @param wheelPositions The distances traveled by the encoders.
388 *
389 * @return The estimated pose of the robot in meters.
390 */
391 Pose2d Update(const Rotation2d& gyroAngle,
392 const WheelPositions& wheelPositions) {
394 wheelPositions);
395 }
396
397 /**
398 * Updates the pose estimator with wheel encoder and gyro information. This
399 * should be called every loop.
400 *
401 * @param currentTime The time at which this method was called.
402 * @param gyroAngle The current gyro angle.
403 * @param wheelPositions The distances traveled by the encoders.
404 *
405 * @return The estimated pose of the robot in meters.
406 */
407 Pose2d UpdateWithTime(wpi::units::second_t currentTime,
408 const Rotation2d& gyroAngle,
409 const WheelPositions& wheelPositions) {
410 auto odometryEstimate = m_odometry.Update(gyroAngle, wheelPositions);
411
412 m_odometryPoseBuffer.AddSample(currentTime, odometryEstimate);
413
414 if (m_visionUpdates.empty()) {
415 m_poseEstimate = odometryEstimate;
416 } else {
417 auto visionUpdate = m_visionUpdates.rbegin()->second;
418 m_poseEstimate = visionUpdate.Compensate(odometryEstimate);
419 }
420
421 return GetEstimatedPosition();
422 }
423
424 private:
425 /**
426 * Removes stale vision updates that won't affect sampling.
427 */
428 void CleanUpVisionUpdates() {
429 // Step 0: If there are no odometry samples, skip.
430 if (m_odometryPoseBuffer.GetInternalBuffer().empty()) {
431 return;
432 }
433
434 // Step 1: Find the oldest timestamp that needs a vision update.
435 wpi::units::second_t oldestOdometryTimestamp =
436 m_odometryPoseBuffer.GetInternalBuffer().front().first;
437
438 // Step 2: If there are no vision updates before that timestamp, skip.
439 if (m_visionUpdates.empty() ||
440 oldestOdometryTimestamp < m_visionUpdates.begin()->first) {
441 return;
442 }
443
444 // Step 3: Find the newest vision update timestamp before or at the oldest
445 // timestamp.
446 // First, find the iterator past the oldest odometry timestamp, then go
447 // back one. Note that upper_bound() won't return begin() because we check
448 // begin() earlier.
449 auto newestNeededVisionUpdate =
450 m_visionUpdates.upper_bound(oldestOdometryTimestamp);
451 --newestNeededVisionUpdate;
452
453 // Step 4: Remove all entries strictly before the newest timestamp we need.
454 m_visionUpdates.erase(m_visionUpdates.begin(), newestNeededVisionUpdate);
455 }
456
457 struct VisionUpdate {
458 // The vision-compensated pose estimate
459 Pose2d visionPose;
460
461 // The pose estimated based solely on odometry
462 Pose2d odometryPose;
463
464 /**
465 * Returns the vision-compensated version of the pose. Specifically, changes
466 * the pose from being relative to this record's odometry pose to being
467 * relative to this record's vision pose.
468 *
469 * @param pose The pose to compensate.
470 * @return The compensated pose.
471 */
472 Pose2d Compensate(const Pose2d& pose) const {
473 auto delta = pose - odometryPose;
474 return visionPose + delta;
475 }
476 };
477
478 static constexpr wpi::units::second_t kBufferDuration = 1.5_s;
479
480 Odometry<WheelPositions, WheelVelocities, WheelAccelerations>& m_odometry;
481
482 // Diagonal of process noise covariance matrix Q
483 wpi::util::array<double, 3> m_q{wpi::util::empty_array};
484
485 // Kalman gain matrix K
486 Eigen::DiagonalMatrix<double, 3> m_vision_K =
487 Eigen::DiagonalMatrix<double, 3>::Zero();
488
489 // Maps timestamps to odometry-only pose estimates
490 TimeInterpolatableBuffer<Pose2d> m_odometryPoseBuffer{kBufferDuration};
491 // Maps timestamps to vision updates
492 // Always contains one entry before the oldest entry in m_odometryPoseBuffer,
493 // unless there have been no vision measurements after the last reset. May
494 // contain one entry while m_odometryPoseBuffer is empty to correct for
495 // translation/rotation after a call to ResetRotation/ResetTranslation.
496 std::map<wpi::units::second_t, VisionUpdate> m_visionUpdates;
497
498 Pose2d m_poseEstimate;
499};
500
501} // namespace wpi::math
#define WPILIB_DLLEXPORT
Definition SymbolExports.hpp:36
Helper class that converts a chassis velocity (dx, dy, and dtheta components) into individual wheel v...
Definition Kinematics.hpp:26
static void ReportUsage(std::string_view resource, std::string_view data)
Definition MathShared.hpp:61
static wpi::units::second_t GetTimestamp()
Definition MathShared.hpp:65
Class for odometry.
Definition Odometry.hpp:31
Represents a 2D pose containing translational and rotational elements.
Definition Pose2d.hpp:30
Pose2d UpdateWithTime(wpi::units::second_t currentTime, const Rotation2d &gyroAngle, const WheelPositions &wheelPositions)
Updates the pose estimator with wheel encoder and gyro information.
Definition PoseEstimator.hpp:407
void AddVisionMeasurement(const Pose2d &visionRobotPose, wpi::units::second_t timestamp, const wpi::util::array< double, 3 > &visionMeasurementStdDevs)
Adds a vision measurement to the Kalman Filter.
Definition PoseEstimator.hpp:375
void ResetPosition(const Rotation2d &gyroAngle, const WheelPositions &wheelPositions, const Pose2d &pose)
Resets the robot's position on the field.
Definition PoseEstimator.hpp:121
Pose2d Update(const Rotation2d &gyroAngle, const WheelPositions &wheelPositions)
Updates the pose estimator with wheel encoder and gyro information.
Definition PoseEstimator.hpp:391
void ResetTranslation(const Translation2d &translation)
Resets the robot's translation.
Definition PoseEstimator.hpp:151
Pose2d GetEstimatedPosition() const
Gets the estimated robot pose.
Definition PoseEstimator.hpp:217
std::optional< Pose2d > SampleAt(wpi::units::second_t timestamp) const
Return the pose at a given timestamp, if the buffer is not empty.
Definition PoseEstimator.hpp:226
PoseEstimator(Kinematics< WheelPositions, WheelVelocities, WheelAccelerations > &kinematics, Odometry< WheelPositions, WheelVelocities, WheelAccelerations > &odometry, const wpi::util::array< double, 3 > &stateStdDevs, const wpi::util::array< double, 3 > &visionMeasurementStdDevs)
Constructs a PoseEstimator.
Definition PoseEstimator.hpp:66
void AddVisionMeasurement(const Pose2d &visionRobotPose, wpi::units::second_t timestamp)
Adds a vision measurement to the Kalman Filter.
Definition PoseEstimator.hpp:287
void ResetRotation(const Rotation2d &rotation)
Resets the robot's rotation.
Definition PoseEstimator.hpp:186
void SetVisionMeasurementStdDevs(const wpi::util::array< double, 3 > &visionMeasurementStdDevs)
Sets the pose estimator's trust in vision measurements.
Definition PoseEstimator.hpp:91
void ResetPose(const Pose2d &pose)
Resets the robot's pose.
Definition PoseEstimator.hpp:135
A rotation in a 2D coordinate frame represented by a point on the unit circle (cosine and sine).
Definition Rotation2d.hpp:29
Represents a transformation for a Pose2d in the pose's frame.
Definition Transform2d.hpp:21
Represents a translation in 2D space.
Definition Translation2d.hpp:33
This class is a wrapper around std::array that does compile time size checking.
Definition array.hpp:26
Definition LinearSystem.hpp:20
constexpr empty_array_t empty_array
Definition array.hpp:16