WPILibC++ 2027.0.0-alpha-5
Loading...
Searching...
No Matches
PoseEstimator3d.hpp
Go to the documentation of this file.
1// Copyright (c) FIRST and other WPILib contributors.
2// Open Source Software; you can modify and/or share it under the terms of
3// the WPILib BSD license file in the root directory of this project.
4
5#pragma once
6
7#include <cmath>
8#include <map>
9#include <optional>
10#include <vector>
11
12#include <Eigen/Core>
13
23#include "wpi/units/time.hpp"
25#include "wpi/util/array.hpp"
26
27namespace wpi::math {
28
29/**
30 * This class wraps odometry to fuse latency-compensated
31 * vision measurements with encoder measurements. Robot code should not use this
32 * directly- Instead, use the particular type for your drivetrain (e.g.,
33 * DifferentialDrivePoseEstimator3d). It will correct for noisy vision
34 * measurements and encoder drift. It is intended to be an easy drop-in for
35 * Odometry3d. It is also intended to be an easy replacement for PoseEstimator,
36 * only requiring the addition of a standard deviation for Z and appropriate
37 * conversions between 2D and 3D versions of geometry classes. (See
38 * Pose3d(Pose2d), Rotation3d(Rotation2d), Translation3d(Translation2d), and
39 * Pose3d.ToPose2d().)
40 *
41 * Update() should be called every robot loop.
42 *
43 * AddVisionMeasurement() can be called as infrequently as you want; if you
44 * never call it, then this class will behave like regular encoder odometry.
45 *
46 * @tparam WheelPositions Wheel positions type.
47 * @tparam WheelVelocities Wheel velocities type.
48 * @tparam WheelAccelerations Wheel accelerations type.
49 */
50template <typename WheelPositions, typename WheelVelocities,
51 typename WheelAccelerations>
53 public:
54 /**
55 * Constructs a PoseEstimator3d.
56 *
57 * @warning The initial pose estimate will always be the default pose,
58 * regardless of the odometry's current pose.
59 *
60 * @param kinematics A correctly-configured kinematics object for your
61 * drivetrain.
62 * @param odometry A correctly-configured odometry object for your drivetrain.
63 * @param stateStdDevs Standard deviations of the pose estimate (x position in
64 * meters, y position in meters, and heading in radians). Increase these
65 * numbers to trust your state estimate less.
66 * @param visionMeasurementStdDevs Standard deviations of the vision pose
67 * measurement (x position in meters, y position in meters, and z position
68 * in meters, and angle in radians). Increase these numbers to trust the
69 * vision pose measurement less.
70 */
73 kinematics,
75 const wpi::util::array<double, 4>& stateStdDevs,
76 const wpi::util::array<double, 4>& visionMeasurementStdDevs)
77 : m_odometry(odometry) {
78 for (size_t i = 0; i < 4; ++i) {
79 m_q[i] = stateStdDevs[i] * stateStdDevs[i];
80 }
81
82 SetVisionMeasurementStdDevs(visionMeasurementStdDevs);
83 wpi::math::MathSharedStore::ReportUsage("PoseEstimator3d", "");
84 }
85
86 /**
87 * Sets the pose estimator's trust in vision measurements. This might be used
88 * to change trust in vision measurements after the autonomous period, or to
89 * change trust as distance to a vision target increases.
90 *
91 * @param visionMeasurementStdDevs Standard deviations of the vision pose
92 * measurement (x position in meters, y position in meters, and heading in
93 * radians). Increase these numbers to trust the vision pose measurement
94 * less.
95 */
97 const wpi::util::array<double, 4>& visionMeasurementStdDevs) {
98 // Diagonal of measurement noise covariance matrix R
100 for (size_t i = 0; i < 4; ++i) {
101 r[i] = visionMeasurementStdDevs[i] * visionMeasurementStdDevs[i];
102 }
103
104 // Solve for closed form Kalman gain for continuous Kalman filter with A = 0
105 // and C = I. See wpimath/algorithms.md.
106 for (size_t row = 0; row < 4; ++row) {
107 if (m_q[row] == 0.0) {
108 m_vision_K.diagonal()[row] = 0.0;
109 } else {
110 m_vision_K.diagonal()[row] =
111 m_q[row] / (m_q[row] + std::sqrt(m_q[row] * r[row]));
112 }
113 }
114 double angle_gain = m_vision_K.diagonal()[3];
115 m_vision_K.diagonal()[4] = angle_gain;
116 m_vision_K.diagonal()[5] = angle_gain;
117 }
118
119 /**
120 * Resets the robot's position on the field.
121 *
122 * The gyroscope angle does not need to be reset in the user's robot code.
123 * The library automatically takes care of offsetting the gyro angle.
124 *
125 * @param gyroAngle The current gyro angle.
126 * @param wheelPositions The distances traveled by the encoders.
127 * @param pose The estimated pose of the robot on the field.
128 */
129 void ResetPosition(const Rotation3d& gyroAngle,
130 const WheelPositions& wheelPositions, const Pose3d& pose) {
131 // Reset state estimate and error covariance
132 m_odometry.ResetPosition(gyroAngle, wheelPositions, pose);
133 m_odometryPoseBuffer.Clear();
134 m_visionUpdates.clear();
135 m_poseEstimate = m_odometry.GetPose();
136 }
137
138 /**
139 * Resets the robot's pose.
140 *
141 * @param pose The pose to reset to.
142 */
143 void ResetPose(const Pose3d& pose) {
144 m_odometry.ResetPose(pose);
145 m_odometryPoseBuffer.Clear();
146 m_visionUpdates.clear();
147 m_poseEstimate = m_odometry.GetPose();
148 }
149
150 /**
151 * Resets the robot's translation.
152 *
153 * @param translation The pose to translation to.
154 */
155#if defined(__GNUC__) && !defined(__clang__)
156#pragma GCC diagnostic push
157#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
158#endif // defined(__GNUC__) && !defined(__clang__)
159 void ResetTranslation(const Translation3d& translation) {
160 m_odometry.ResetTranslation(translation);
161
162 const std::optional<std::pair<units::second_t, VisionUpdate>>
163 latestVisionUpdate =
164 m_visionUpdates.empty() ? std::nullopt
165 : std::optional{*m_visionUpdates.crbegin()};
166 m_odometryPoseBuffer.Clear();
167 m_visionUpdates.clear();
168
169 if (latestVisionUpdate) {
170 // apply vision compensation to the pose rotation
171 const VisionUpdate visionUpdate{
172 Pose3d{translation, latestVisionUpdate->second.visionPose.Rotation()},
173 Pose3d{translation,
174 latestVisionUpdate->second.odometryPose.Rotation()}};
175 m_visionUpdates[latestVisionUpdate->first] = visionUpdate;
176 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
177 } else {
178 m_poseEstimate = m_odometry.GetPose();
179 }
180 }
181#if defined(__GNUC__) && !defined(__clang__)
182#pragma GCC diagnostic pop
183#endif // defined(__GNUC__) && !defined(__clang__)
184
185 /**
186 * Resets the robot's rotation.
187 *
188 * @param rotation The rotation to reset to.
189 */
190#if defined(__GNUC__) && !defined(__clang__)
191#pragma GCC diagnostic push
192#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
193#endif // defined(__GNUC__) && !defined(__clang__)
194 void ResetRotation(const Rotation3d& rotation) {
195 m_odometry.ResetRotation(rotation);
196
197 const std::optional<std::pair<units::second_t, VisionUpdate>>
198 latestVisionUpdate =
199 m_visionUpdates.empty() ? std::nullopt
200 : std::optional{*m_visionUpdates.crbegin()};
201 m_odometryPoseBuffer.Clear();
202 m_visionUpdates.clear();
203
204 if (latestVisionUpdate) {
205 // apply vision compensation to the pose translation
206 const VisionUpdate visionUpdate{
207 Pose3d{latestVisionUpdate->second.visionPose.Translation(), rotation},
208 Pose3d{latestVisionUpdate->second.odometryPose.Translation(),
209 rotation}};
210 m_visionUpdates[latestVisionUpdate->first] = visionUpdate;
211 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
212 } else {
213 m_poseEstimate = m_odometry.GetPose();
214 }
215 }
216#if defined(__GNUC__) && !defined(__clang__)
217#pragma GCC diagnostic pop
218#endif // defined(__GNUC__) && !defined(__clang__)
219
220 /**
221 * Gets the estimated robot pose.
222 *
223 * @return The estimated robot pose in meters.
224 */
225 Pose3d GetEstimatedPosition() const { return m_poseEstimate; }
226
227 /**
228 * Return the pose at a given timestamp, if the buffer is not empty.
229 *
230 * @param timestamp The pose's timestamp.
231 * @return The pose at the given timestamp (or std::nullopt if the buffer is
232 * empty).
233 */
234 std::optional<Pose3d> SampleAt(wpi::units::second_t timestamp) const {
235 // Step 0: If there are no odometry updates to sample, skip.
236 if (m_odometryPoseBuffer.GetInternalBuffer().empty()) {
237 return std::nullopt;
238 }
239
240 // Step 1: Make sure timestamp matches the sample from the odometry pose
241 // buffer. (When sampling, the buffer will always use a timestamp
242 // between the first and last timestamps)
243 wpi::units::second_t oldestOdometryTimestamp =
244 m_odometryPoseBuffer.GetInternalBuffer().front().first;
245 wpi::units::second_t newestOdometryTimestamp =
246 m_odometryPoseBuffer.GetInternalBuffer().back().first;
247 timestamp =
248 std::clamp(timestamp, oldestOdometryTimestamp, newestOdometryTimestamp);
249
250 // Step 2: If there are no applicable vision updates, use the odometry-only
251 // information.
252 if (m_visionUpdates.empty() || timestamp < m_visionUpdates.begin()->first) {
253 return m_odometryPoseBuffer.Sample(timestamp);
254 }
255
256 // Step 3: Get the latest vision update from before or at the timestamp to
257 // sample at.
258 // First, find the iterator past the sample timestamp, then go back one.
259 // Note that upper_bound() won't return begin() because we check begin()
260 // earlier.
261 auto floorIter = m_visionUpdates.upper_bound(timestamp);
262 --floorIter;
263 auto visionUpdate = floorIter->second;
264
265 // Step 4: Get the pose measured by odometry at the time of the sample.
266 auto odometryEstimate = m_odometryPoseBuffer.Sample(timestamp);
267
268 // Step 5: Apply the vision compensation to the odometry pose.
269 // TODO Replace with std::optional::transform() in C++23
270 if (odometryEstimate) {
271 return visionUpdate.Compensate(*odometryEstimate);
272 }
273 return std::nullopt;
274 }
275
276 /**
277 * Adds a vision measurement to the Kalman Filter. This will correct
278 * the odometry pose estimate while still accounting for measurement noise.
279 *
280 * This method can be called as infrequently as you want, as long as you are
281 * calling Update() every loop.
282 *
283 * To promote stability of the pose estimate and make it robust to bad vision
284 * data, we recommend only adding vision measurements that are already within
285 * one meter or so of the current pose estimate.
286 *
287 * @param visionRobotPose The pose of the robot as measured by the vision
288 * camera.
289 * @param timestamp The timestamp of the vision measurement in seconds. Note
290 * that if you don't use your own time source by calling UpdateWithTime(),
291 * then you must use a timestamp with the same epoch as
292 * wpi::Timer::GetMonotonicTimestamp(). This means that you should use
293 * wpi::Timer::GetMonotonicTimestamp() as your time source in this case.
294 */
295 void AddVisionMeasurement(const Pose3d& visionRobotPose,
296 wpi::units::second_t timestamp) {
297 // Step 0: If this measurement is old enough to be outside the pose buffer's
298 // timespan, skip.
299 if (m_odometryPoseBuffer.GetInternalBuffer().empty() ||
300 m_odometryPoseBuffer.GetInternalBuffer().front().first -
301 kBufferDuration >
302 timestamp) {
303 return;
304 }
305
306 // Step 1: Clean up any old entries
307 CleanUpVisionUpdates();
308
309 // Step 2: Get the pose measured by odometry at the moment the vision
310 // measurement was made.
311 auto odometrySample = m_odometryPoseBuffer.Sample(timestamp);
312
313 if (!odometrySample) {
314 return;
315 }
316
317 // Step 3: Get the vision-compensated pose estimate at the moment the vision
318 // measurement was made.
319 auto visionSample = SampleAt(timestamp);
320
321 if (!visionSample) {
322 return;
323 }
324
325 // Step 4: Measure the transform between the old pose estimate and the
326 // vision pose.
327 auto transform = visionRobotPose - visionSample.value();
328
329 // Step 5: We should not trust the transform entirely, so instead we scale
330 // this transform by a Kalman gain matrix representing how much we trust
331 // vision measurements compared to our current pose.
332 wpi::math::Vectord<6> k_times_transform =
333 m_vision_K * wpi::math::Vectord<6>{transform.X().value(),
334 transform.Y().value(),
335 transform.Z().value(),
336 transform.Rotation().X().value(),
337 transform.Rotation().Y().value(),
338 transform.Rotation().Z().value()};
339
340 // Step 6: Convert back to Transform3d.
341 Transform3d scaledTransform{
342 wpi::units::meter_t{k_times_transform(0)},
343 wpi::units::meter_t{k_times_transform(1)},
344 wpi::units::meter_t{k_times_transform(2)},
345 Rotation3d{wpi::units::radian_t{k_times_transform(3)},
346 wpi::units::radian_t{k_times_transform(4)},
347 wpi::units::radian_t{k_times_transform(5)}}};
348
349 // Step 7: Calculate and record the vision update.
350 VisionUpdate visionUpdate{*visionSample + scaledTransform, *odometrySample};
351 m_visionUpdates[timestamp] = visionUpdate;
352
353 // Step 8: Remove later vision measurements. (Matches previous behavior)
354 auto firstAfter = m_visionUpdates.upper_bound(timestamp);
355 m_visionUpdates.erase(firstAfter, m_visionUpdates.end());
356
357 // Step 9: Update latest pose estimate. Since we cleared all updates after
358 // this vision update, it's guaranteed to be the latest vision update.
359 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
360 }
361
362 /**
363 * Adds a vision measurement to the Kalman Filter. This will correct
364 * the odometry pose estimate while still accounting for measurement noise.
365 *
366 * This method can be called as infrequently as you want, as long as you are
367 * calling Update() every loop.
368 *
369 * To promote stability of the pose estimate and make it robust to bad vision
370 * data, we recommend only adding vision measurements that are already within
371 * one meter or so of the current pose estimate.
372 *
373 * Note that the vision measurement standard deviations passed into this
374 * method will continue to apply to future measurements until a subsequent
375 * call to SetVisionMeasurementStdDevs() or this method.
376 *
377 * @param visionRobotPose The pose of the robot as measured by the vision
378 * camera.
379 * @param timestamp The timestamp of the vision measurement in seconds. Note
380 * that if you don't use your own time source by calling UpdateWithTime(),
381 * then you must use a timestamp with the same epoch as
382 * wpi::Timer::GetMonotonicTimestamp(). This means that you should use
383 * wpi::Timer::GetMonotonicTimestamp() as your time source in this case.
384 * @param visionMeasurementStdDevs Standard deviations of the vision pose
385 * measurement (x position in meters, y position in meters, and heading in
386 * radians). Increase these numbers to trust the vision pose measurement
387 * less.
388 */
390 const Pose3d& visionRobotPose, wpi::units::second_t timestamp,
391 const wpi::util::array<double, 4>& visionMeasurementStdDevs) {
392 SetVisionMeasurementStdDevs(visionMeasurementStdDevs);
393 AddVisionMeasurement(visionRobotPose, timestamp);
394 }
395
396 /**
397 * Updates the pose estimator with wheel encoder and gyro information. This
398 * should be called every loop.
399 *
400 * @param gyroAngle The current gyro angle.
401 * @param wheelPositions The distances traveled by the encoders.
402 *
403 * @return The estimated pose of the robot in meters.
404 */
405 Pose3d Update(const Rotation3d& gyroAngle,
406 const WheelPositions& wheelPositions) {
408 wheelPositions);
409 }
410
411 /**
412 * Updates the pose estimator with wheel encoder and gyro information. This
413 * should be called every loop.
414 *
415 * @param currentTime The time at which this method was called.
416 * @param gyroAngle The current gyro angle.
417 * @param wheelPositions The distances traveled by the encoders.
418 *
419 * @return The estimated pose of the robot in meters.
420 */
421 Pose3d UpdateWithTime(wpi::units::second_t currentTime,
422 const Rotation3d& gyroAngle,
423 const WheelPositions& wheelPositions) {
424 auto odometryEstimate = m_odometry.Update(gyroAngle, wheelPositions);
425
426 m_odometryPoseBuffer.AddSample(currentTime, odometryEstimate);
427
428 if (m_visionUpdates.empty()) {
429 m_poseEstimate = odometryEstimate;
430 } else {
431 auto visionUpdate = m_visionUpdates.rbegin()->second;
432 m_poseEstimate = visionUpdate.Compensate(odometryEstimate);
433 }
434
435 return GetEstimatedPosition();
436 }
437
438 private:
439 /**
440 * Removes stale vision updates that won't affect sampling.
441 */
442 void CleanUpVisionUpdates() {
443 // Step 0: If there are no odometry samples, skip.
444 if (m_odometryPoseBuffer.GetInternalBuffer().empty()) {
445 return;
446 }
447
448 // Step 1: Find the oldest timestamp that needs a vision update.
449 wpi::units::second_t oldestOdometryTimestamp =
450 m_odometryPoseBuffer.GetInternalBuffer().front().first;
451
452 // Step 2: If there are no vision updates before that timestamp, skip.
453 if (m_visionUpdates.empty() ||
454 oldestOdometryTimestamp < m_visionUpdates.begin()->first) {
455 return;
456 }
457
458 // Step 3: Find the newest vision update timestamp before or at the oldest
459 // timestamp.
460 // First, find the iterator past the oldest odometry timestamp, then go
461 // back one. Note that upper_bound() won't return begin() because we check
462 // begin() earlier.
463 auto newestNeededVisionUpdate =
464 m_visionUpdates.upper_bound(oldestOdometryTimestamp);
465 --newestNeededVisionUpdate;
466
467 // Step 4: Remove all entries strictly before the newest timestamp we need.
468 m_visionUpdates.erase(m_visionUpdates.begin(), newestNeededVisionUpdate);
469 }
470
471 struct VisionUpdate {
472 // The vision-compensated pose estimate
473 Pose3d visionPose;
474
475 // The pose estimated based solely on odometry
476 Pose3d odometryPose;
477
478 /**
479 * Returns the vision-compensated version of the pose. Specifically, changes
480 * the pose from being relative to this record's odometry pose to being
481 * relative to this record's vision pose.
482 *
483 * @param pose The pose to compensate.
484 * @return The compensated pose.
485 */
486 Pose3d Compensate(const Pose3d& pose) const {
487 auto delta = pose - odometryPose;
488 return visionPose + delta;
489 }
490 };
491
492 static constexpr wpi::units::second_t kBufferDuration = 1.5_s;
493
494 Odometry3d<WheelPositions, WheelVelocities, WheelAccelerations>& m_odometry;
495
496 // Diagonal of process noise covariance matrix Q
497 wpi::util::array<double, 4> m_q{wpi::util::empty_array};
498
499 // Kalman gain matrix K
500 Eigen::DiagonalMatrix<double, 6> m_vision_K =
501 Eigen::DiagonalMatrix<double, 6>::Zero();
502
503 // Maps timestamps to odometry-only pose estimates
504 TimeInterpolatableBuffer<Pose3d> m_odometryPoseBuffer{kBufferDuration};
505 // Maps timestamps to vision updates
506 // Always contains one entry before the oldest entry in m_odometryPoseBuffer,
507 // unless there have been no vision measurements after the last reset. May
508 // contain one entry while m_odometryPoseBuffer is empty to correct for
509 // translation/rotation after a call to ResetRotation/ResetTranslation.
510 std::map<wpi::units::second_t, VisionUpdate> m_visionUpdates;
511
512 Pose3d m_poseEstimate;
513};
514
515} // namespace wpi::math
#define WPILIB_DLLEXPORT
Definition SymbolExports.hpp:36
Helper class that converts a chassis velocity (dx, dy, and dtheta components) into individual wheel v...
Definition Kinematics.hpp:26
static void ReportUsage(std::string_view resource, std::string_view data)
Definition MathShared.hpp:61
static wpi::units::second_t GetTimestamp()
Definition MathShared.hpp:65
Class for odometry.
Definition Odometry3d.hpp:31
Represents a 3D pose containing translational and rotational elements.
Definition Pose3d.hpp:31
void AddVisionMeasurement(const Pose3d &visionRobotPose, wpi::units::second_t timestamp, const wpi::util::array< double, 4 > &visionMeasurementStdDevs)
Adds a vision measurement to the Kalman Filter.
Definition PoseEstimator3d.hpp:389
PoseEstimator3d(Kinematics< WheelPositions, WheelVelocities, WheelAccelerations > &kinematics, Odometry3d< WheelPositions, WheelVelocities, WheelAccelerations > &odometry, const wpi::util::array< double, 4 > &stateStdDevs, const wpi::util::array< double, 4 > &visionMeasurementStdDevs)
Constructs a PoseEstimator3d.
Definition PoseEstimator3d.hpp:71
void ResetTranslation(const Translation3d &translation)
Resets the robot's translation.
Definition PoseEstimator3d.hpp:159
void ResetPosition(const Rotation3d &gyroAngle, const WheelPositions &wheelPositions, const Pose3d &pose)
Resets the robot's position on the field.
Definition PoseEstimator3d.hpp:129
void SetVisionMeasurementStdDevs(const wpi::util::array< double, 4 > &visionMeasurementStdDevs)
Sets the pose estimator's trust in vision measurements.
Definition PoseEstimator3d.hpp:96
Pose3d Update(const Rotation3d &gyroAngle, const WheelPositions &wheelPositions)
Updates the pose estimator with wheel encoder and gyro information.
Definition PoseEstimator3d.hpp:405
void ResetPose(const Pose3d &pose)
Resets the robot's pose.
Definition PoseEstimator3d.hpp:143
Pose3d UpdateWithTime(wpi::units::second_t currentTime, const Rotation3d &gyroAngle, const WheelPositions &wheelPositions)
Updates the pose estimator with wheel encoder and gyro information.
Definition PoseEstimator3d.hpp:421
Pose3d GetEstimatedPosition() const
Gets the estimated robot pose.
Definition PoseEstimator3d.hpp:225
void AddVisionMeasurement(const Pose3d &visionRobotPose, wpi::units::second_t timestamp)
Adds a vision measurement to the Kalman Filter.
Definition PoseEstimator3d.hpp:295
std::optional< Pose3d > SampleAt(wpi::units::second_t timestamp) const
Return the pose at a given timestamp, if the buffer is not empty.
Definition PoseEstimator3d.hpp:234
void ResetRotation(const Rotation3d &rotation)
Resets the robot's rotation.
Definition PoseEstimator3d.hpp:194
A rotation in a 3D coordinate frame, represented by a quaternion.
Definition Rotation3d.hpp:72
Represents a transformation for a Pose3d in the pose's frame.
Definition Transform3d.hpp:23
Represents a translation in 3D space.
Definition Translation3d.hpp:34
This class is a wrapper around std::array that does compile time size checking.
Definition array.hpp:26
Definition LinearSystem.hpp:20
Eigen::Vector< double, Size > Vectord
Definition EigenCore.hpp:12
constexpr empty_array_t empty_array
Definition array.hpp:16