WPILibC++ 2027.0.0-alpha-4
Loading...
Searching...
No Matches
PoseEstimator3d.hpp
Go to the documentation of this file.
1// Copyright (c) FIRST and other WPILib contributors.
2// Open Source Software; you can modify and/or share it under the terms of
3// the WPILib BSD license file in the root directory of this project.
4
5#pragma once
6
7#include <cmath>
8#include <map>
9#include <optional>
10#include <vector>
11
12#include <Eigen/Core>
13
23#include "wpi/units/time.hpp"
25#include "wpi/util/array.hpp"
26
27namespace wpi::math {
28
29/**
30 * This class wraps odometry to fuse latency-compensated
31 * vision measurements with encoder measurements. Robot code should not use this
32 * directly- Instead, use the particular type for your drivetrain (e.g.,
33 * DifferentialDrivePoseEstimator3d). It will correct for noisy vision
34 * measurements and encoder drift. It is intended to be an easy drop-in for
35 * Odometry3d. It is also intended to be an easy replacement for PoseEstimator,
36 * only requiring the addition of a standard deviation for Z and appropriate
37 * conversions between 2D and 3D versions of geometry classes. (See
38 * Pose3d(Pose2d), Rotation3d(Rotation2d), Translation3d(Translation2d), and
39 * Pose3d.ToPose2d().)
40 *
41 * Update() should be called every robot loop.
42 *
43 * AddVisionMeasurement() can be called as infrequently as you want; if you
44 * never call it, then this class will behave like regular encoder odometry.
45 *
46 * @tparam WheelPositions Wheel positions type.
47 * @tparam WheelVelocities Wheel velocities type.
48 * @tparam WheelAccelerations Wheel accelerations type.
49 */
50template <typename WheelPositions, typename WheelVelocities,
51 typename WheelAccelerations>
53 public:
54 /**
55 * Constructs a PoseEstimator3d.
56 *
57 * @warning The initial pose estimate will always be the default pose,
58 * regardless of the odometry's current pose.
59 *
60 * @param kinematics A correctly-configured kinematics object for your
61 * drivetrain.
62 * @param odometry A correctly-configured odometry object for your drivetrain.
63 * @param stateStdDevs Standard deviations of the pose estimate (x position in
64 * meters, y position in meters, and heading in radians). Increase these
65 * numbers to trust your state estimate less.
66 * @param visionMeasurementStdDevs Standard deviations of the vision pose
67 * measurement (x position in meters, y position in meters, and z position
68 * in meters, and angle in radians). Increase these numbers to trust the
69 * vision pose measurement less.
70 */
73 kinematics,
75 const wpi::util::array<double, 4>& stateStdDevs,
76 const wpi::util::array<double, 4>& visionMeasurementStdDevs)
77 : m_odometry(odometry) {
78 for (size_t i = 0; i < 4; ++i) {
79 m_q[i] = stateStdDevs[i] * stateStdDevs[i];
80 }
81
82 SetVisionMeasurementStdDevs(visionMeasurementStdDevs);
83 wpi::math::MathSharedStore::ReportUsage("PoseEstimator3d", "");
84 }
85
86 /**
87 * Sets the pose estimator's trust in vision measurements. This might be used
88 * to change trust in vision measurements after the autonomous period, or to
89 * change trust as distance to a vision target increases.
90 *
91 * @param visionMeasurementStdDevs Standard deviations of the vision pose
92 * measurement (x position in meters, y position in meters, and heading in
93 * radians). Increase these numbers to trust the vision pose measurement
94 * less.
95 */
97 const wpi::util::array<double, 4>& visionMeasurementStdDevs) {
98 // Diagonal of measurement noise covariance matrix R
100 for (size_t i = 0; i < 4; ++i) {
101 r[i] = visionMeasurementStdDevs[i] * visionMeasurementStdDevs[i];
102 }
103
104 // Solve for closed form Kalman gain for continuous Kalman filter with A = 0
105 // and C = I. See wpimath/algorithms.md.
106 for (size_t row = 0; row < 4; ++row) {
107 if (m_q[row] == 0.0) {
108 m_vision_K.diagonal()[row] = 0.0;
109 } else {
110 m_vision_K.diagonal()[row] =
111 m_q[row] / (m_q[row] + std::sqrt(m_q[row] * r[row]));
112 }
113 }
114 double angle_gain = m_vision_K.diagonal()[3];
115 m_vision_K.diagonal()[4] = angle_gain;
116 m_vision_K.diagonal()[5] = angle_gain;
117 }
118
119 /**
120 * Resets the robot's position on the field.
121 *
122 * The gyroscope angle does not need to be reset in the user's robot code.
123 * The library automatically takes care of offsetting the gyro angle.
124 *
125 * @param gyroAngle The current gyro angle.
126 * @param wheelPositions The distances traveled by the encoders.
127 * @param pose The estimated pose of the robot on the field.
128 */
129 void ResetPosition(const Rotation3d& gyroAngle,
130 const WheelPositions& wheelPositions, const Pose3d& pose) {
131 // Reset state estimate and error covariance
132 m_odometry.ResetPosition(gyroAngle, wheelPositions, pose);
133 m_odometryPoseBuffer.Clear();
134 m_visionUpdates.clear();
135 m_poseEstimate = m_odometry.GetPose();
136 }
137
138 /**
139 * Resets the robot's pose.
140 *
141 * @param pose The pose to reset to.
142 */
143 void ResetPose(const Pose3d& pose) {
144 m_odometry.ResetPose(pose);
145 m_odometryPoseBuffer.Clear();
146 m_visionUpdates.clear();
147 m_poseEstimate = m_odometry.GetPose();
148 }
149
150 /**
151 * Resets the robot's translation.
152 *
153 * @param translation The pose to translation to.
154 */
155#if defined(__GNUC__) && !defined(__clang__)
156#pragma GCC diagnostic push
157#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
158#endif // defined(__GNUC__) && !defined(__clang__)
159 void ResetTranslation(const Translation3d& translation) {
160 m_odometry.ResetTranslation(translation);
161
162 const std::optional<std::pair<units::second_t, VisionUpdate>>
163 latestVisionUpdate =
164 m_visionUpdates.empty() ? std::nullopt
165 : std::optional{*m_visionUpdates.crbegin()};
166 m_odometryPoseBuffer.Clear();
167 m_visionUpdates.clear();
168
169 if (latestVisionUpdate) {
170 // apply vision compensation to the pose rotation
171 const VisionUpdate visionUpdate{
172 Pose3d{translation, latestVisionUpdate->second.visionPose.Rotation()},
173 Pose3d{translation,
174 latestVisionUpdate->second.odometryPose.Rotation()}};
175 m_visionUpdates[latestVisionUpdate->first] = visionUpdate;
176 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
177 } else {
178 m_poseEstimate = m_odometry.GetPose();
179 }
180 }
181#if defined(__GNUC__) && !defined(__clang__)
182#pragma GCC diagnostic pop
183#endif // defined(__GNUC__) && !defined(__clang__)
184
185 /**
186 * Resets the robot's rotation.
187 *
188 * @param rotation The rotation to reset to.
189 */
190#if defined(__GNUC__) && !defined(__clang__)
191#pragma GCC diagnostic push
192#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
193#endif // defined(__GNUC__) && !defined(__clang__)
194 void ResetRotation(const Rotation3d& rotation) {
195 m_odometry.ResetRotation(rotation);
196
197 const std::optional<std::pair<units::second_t, VisionUpdate>>
198 latestVisionUpdate =
199 m_visionUpdates.empty() ? std::nullopt
200 : std::optional{*m_visionUpdates.crbegin()};
201 m_odometryPoseBuffer.Clear();
202 m_visionUpdates.clear();
203
204 if (latestVisionUpdate) {
205 // apply vision compensation to the pose translation
206 const VisionUpdate visionUpdate{
207 Pose3d{latestVisionUpdate->second.visionPose.Translation(), rotation},
208 Pose3d{latestVisionUpdate->second.odometryPose.Translation(),
209 rotation}};
210 m_visionUpdates[latestVisionUpdate->first] = visionUpdate;
211 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
212 } else {
213 m_poseEstimate = m_odometry.GetPose();
214 }
215 }
216#if defined(__GNUC__) && !defined(__clang__)
217#pragma GCC diagnostic pop
218#endif // defined(__GNUC__) && !defined(__clang__)
219
220 /**
221 * Gets the estimated robot pose.
222 *
223 * @return The estimated robot pose in meters.
224 */
225 Pose3d GetEstimatedPosition() const { return m_poseEstimate; }
226
227 /**
228 * Return the pose at a given timestamp, if the buffer is not empty.
229 *
230 * @param timestamp The pose's timestamp.
231 * @return The pose at the given timestamp (or std::nullopt if the buffer is
232 * empty).
233 */
234 std::optional<Pose3d> SampleAt(wpi::units::second_t timestamp) const {
235 // Step 0: If there are no odometry updates to sample, skip.
236 if (m_odometryPoseBuffer.GetInternalBuffer().empty()) {
237 return std::nullopt;
238 }
239
240 // Step 1: Make sure timestamp matches the sample from the odometry pose
241 // buffer. (When sampling, the buffer will always use a timestamp
242 // between the first and last timestamps)
243 wpi::units::second_t oldestOdometryTimestamp =
244 m_odometryPoseBuffer.GetInternalBuffer().front().first;
245 wpi::units::second_t newestOdometryTimestamp =
246 m_odometryPoseBuffer.GetInternalBuffer().back().first;
247 timestamp =
248 std::clamp(timestamp, oldestOdometryTimestamp, newestOdometryTimestamp);
249
250 // Step 2: If there are no applicable vision updates, use the odometry-only
251 // information.
252 if (m_visionUpdates.empty() || timestamp < m_visionUpdates.begin()->first) {
253 return m_odometryPoseBuffer.Sample(timestamp);
254 }
255
256 // Step 3: Get the latest vision update from before or at the timestamp to
257 // sample at.
258 // First, find the iterator past the sample timestamp, then go back one.
259 // Note that upper_bound() won't return begin() because we check begin()
260 // earlier.
261 auto floorIter = m_visionUpdates.upper_bound(timestamp);
262 --floorIter;
263 auto visionUpdate = floorIter->second;
264
265 // Step 4: Get the pose measured by odometry at the time of the sample.
266 auto odometryEstimate = m_odometryPoseBuffer.Sample(timestamp);
267
268 // Step 5: Apply the vision compensation to the odometry pose.
269 // TODO Replace with std::optional::transform() in C++23
270 if (odometryEstimate) {
271 return visionUpdate.Compensate(*odometryEstimate);
272 }
273 return std::nullopt;
274 }
275
276 /**
277 * Adds a vision measurement to the Kalman Filter. This will correct
278 * the odometry pose estimate while still accounting for measurement noise.
279 *
280 * This method can be called as infrequently as you want, as long as you are
281 * calling Update() every loop.
282 *
283 * To promote stability of the pose estimate and make it robust to bad vision
284 * data, we recommend only adding vision measurements that are already within
285 * one meter or so of the current pose estimate.
286 *
287 * @param visionRobotPose The pose of the robot as measured by the vision
288 * camera.
289 * @param timestamp The timestamp of the vision measurement in seconds. Note
290 * that if you don't use your own time source by calling UpdateWithTime(),
291 * then you must use a timestamp with an epoch since FPGA startup (i.e.,
292 * the epoch of this timestamp is the same epoch as
293 * wpi::math::Timer::GetFPGATimestamp(). This means that you should use
294 * wpi::math::Timer::GetFPGATimestamp() as your time source in this case.
295 */
296 void AddVisionMeasurement(const Pose3d& visionRobotPose,
297 wpi::units::second_t timestamp) {
298 // Step 0: If this measurement is old enough to be outside the pose buffer's
299 // timespan, skip.
300 if (m_odometryPoseBuffer.GetInternalBuffer().empty() ||
301 m_odometryPoseBuffer.GetInternalBuffer().front().first -
302 kBufferDuration >
303 timestamp) {
304 return;
305 }
306
307 // Step 1: Clean up any old entries
308 CleanUpVisionUpdates();
309
310 // Step 2: Get the pose measured by odometry at the moment the vision
311 // measurement was made.
312 auto odometrySample = m_odometryPoseBuffer.Sample(timestamp);
313
314 if (!odometrySample) {
315 return;
316 }
317
318 // Step 3: Get the vision-compensated pose estimate at the moment the vision
319 // measurement was made.
320 auto visionSample = SampleAt(timestamp);
321
322 if (!visionSample) {
323 return;
324 }
325
326 // Step 4: Measure the transform between the old pose estimate and the
327 // vision pose.
328 auto transform = visionRobotPose - visionSample.value();
329
330 // Step 5: We should not trust the transform entirely, so instead we scale
331 // this transform by a Kalman gain matrix representing how much we trust
332 // vision measurements compared to our current pose.
333 wpi::math::Vectord<6> k_times_transform =
334 m_vision_K * wpi::math::Vectord<6>{transform.X().value(),
335 transform.Y().value(),
336 transform.Z().value(),
337 transform.Rotation().X().value(),
338 transform.Rotation().Y().value(),
339 transform.Rotation().Z().value()};
340
341 // Step 6: Convert back to Transform3d.
342 Transform3d scaledTransform{
343 wpi::units::meter_t{k_times_transform(0)},
344 wpi::units::meter_t{k_times_transform(1)},
345 wpi::units::meter_t{k_times_transform(2)},
346 Rotation3d{wpi::units::radian_t{k_times_transform(3)},
347 wpi::units::radian_t{k_times_transform(4)},
348 wpi::units::radian_t{k_times_transform(5)}}};
349
350 // Step 7: Calculate and record the vision update.
351 VisionUpdate visionUpdate{*visionSample + scaledTransform, *odometrySample};
352 m_visionUpdates[timestamp] = visionUpdate;
353
354 // Step 8: Remove later vision measurements. (Matches previous behavior)
355 auto firstAfter = m_visionUpdates.upper_bound(timestamp);
356 m_visionUpdates.erase(firstAfter, m_visionUpdates.end());
357
358 // Step 9: Update latest pose estimate. Since we cleared all updates after
359 // this vision update, it's guaranteed to be the latest vision update.
360 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
361 }
362
363 /**
364 * Adds a vision measurement to the Kalman Filter. This will correct
365 * the odometry pose estimate while still accounting for measurement noise.
366 *
367 * This method can be called as infrequently as you want, as long as you are
368 * calling Update() every loop.
369 *
370 * To promote stability of the pose estimate and make it robust to bad vision
371 * data, we recommend only adding vision measurements that are already within
372 * one meter or so of the current pose estimate.
373 *
374 * Note that the vision measurement standard deviations passed into this
375 * method will continue to apply to future measurements until a subsequent
376 * call to SetVisionMeasurementStdDevs() or this method.
377 *
378 * @param visionRobotPose The pose of the robot as measured by the vision
379 * camera.
380 * @param timestamp The timestamp of the vision measurement in seconds. Note
381 * that if you don't use your own time source by calling UpdateWithTime(),
382 * then you must use a timestamp with an epoch since FPGA startup (i.e.,
383 * the epoch of this timestamp is the same epoch as
384 * wpi::math::Timer::GetFPGATimestamp(). This means that you should use
385 * wpi::math::Timer::GetFPGATimestamp() as your time source in this case.
386 * @param visionMeasurementStdDevs Standard deviations of the vision pose
387 * measurement (x position in meters, y position in meters, and heading in
388 * radians). Increase these numbers to trust the vision pose measurement
389 * less.
390 */
392 const Pose3d& visionRobotPose, wpi::units::second_t timestamp,
393 const wpi::util::array<double, 4>& visionMeasurementStdDevs) {
394 SetVisionMeasurementStdDevs(visionMeasurementStdDevs);
395 AddVisionMeasurement(visionRobotPose, timestamp);
396 }
397
398 /**
399 * Updates the pose estimator with wheel encoder and gyro information. This
400 * should be called every loop.
401 *
402 * @param gyroAngle The current gyro angle.
403 * @param wheelPositions The distances traveled by the encoders.
404 *
405 * @return The estimated pose of the robot in meters.
406 */
407 Pose3d Update(const Rotation3d& gyroAngle,
408 const WheelPositions& wheelPositions) {
410 wheelPositions);
411 }
412
413 /**
414 * Updates the pose estimator with wheel encoder and gyro information. This
415 * should be called every loop.
416 *
417 * @param currentTime The time at which this method was called.
418 * @param gyroAngle The current gyro angle.
419 * @param wheelPositions The distances traveled by the encoders.
420 *
421 * @return The estimated pose of the robot in meters.
422 */
423 Pose3d UpdateWithTime(wpi::units::second_t currentTime,
424 const Rotation3d& gyroAngle,
425 const WheelPositions& wheelPositions) {
426 auto odometryEstimate = m_odometry.Update(gyroAngle, wheelPositions);
427
428 m_odometryPoseBuffer.AddSample(currentTime, odometryEstimate);
429
430 if (m_visionUpdates.empty()) {
431 m_poseEstimate = odometryEstimate;
432 } else {
433 auto visionUpdate = m_visionUpdates.rbegin()->second;
434 m_poseEstimate = visionUpdate.Compensate(odometryEstimate);
435 }
436
437 return GetEstimatedPosition();
438 }
439
440 private:
441 /**
442 * Removes stale vision updates that won't affect sampling.
443 */
444 void CleanUpVisionUpdates() {
445 // Step 0: If there are no odometry samples, skip.
446 if (m_odometryPoseBuffer.GetInternalBuffer().empty()) {
447 return;
448 }
449
450 // Step 1: Find the oldest timestamp that needs a vision update.
451 wpi::units::second_t oldestOdometryTimestamp =
452 m_odometryPoseBuffer.GetInternalBuffer().front().first;
453
454 // Step 2: If there are no vision updates before that timestamp, skip.
455 if (m_visionUpdates.empty() ||
456 oldestOdometryTimestamp < m_visionUpdates.begin()->first) {
457 return;
458 }
459
460 // Step 3: Find the newest vision update timestamp before or at the oldest
461 // timestamp.
462 // First, find the iterator past the oldest odometry timestamp, then go
463 // back one. Note that upper_bound() won't return begin() because we check
464 // begin() earlier.
465 auto newestNeededVisionUpdate =
466 m_visionUpdates.upper_bound(oldestOdometryTimestamp);
467 --newestNeededVisionUpdate;
468
469 // Step 4: Remove all entries strictly before the newest timestamp we need.
470 m_visionUpdates.erase(m_visionUpdates.begin(), newestNeededVisionUpdate);
471 }
472
473 struct VisionUpdate {
474 // The vision-compensated pose estimate
475 Pose3d visionPose;
476
477 // The pose estimated based solely on odometry
478 Pose3d odometryPose;
479
480 /**
481 * Returns the vision-compensated version of the pose. Specifically, changes
482 * the pose from being relative to this record's odometry pose to being
483 * relative to this record's vision pose.
484 *
485 * @param pose The pose to compensate.
486 * @return The compensated pose.
487 */
488 Pose3d Compensate(const Pose3d& pose) const {
489 auto delta = pose - odometryPose;
490 return visionPose + delta;
491 }
492 };
493
494 static constexpr wpi::units::second_t kBufferDuration = 1.5_s;
495
496 Odometry3d<WheelPositions, WheelVelocities, WheelAccelerations>& m_odometry;
497
498 // Diagonal of process noise covariance matrix Q
499 wpi::util::array<double, 4> m_q{wpi::util::empty_array};
500
501 // Kalman gain matrix K
502 Eigen::DiagonalMatrix<double, 6> m_vision_K =
503 Eigen::DiagonalMatrix<double, 6>::Zero();
504
505 // Maps timestamps to odometry-only pose estimates
506 TimeInterpolatableBuffer<Pose3d> m_odometryPoseBuffer{kBufferDuration};
507 // Maps timestamps to vision updates
508 // Always contains one entry before the oldest entry in m_odometryPoseBuffer,
509 // unless there have been no vision measurements after the last reset. May
510 // contain one entry while m_odometryPoseBuffer is empty to correct for
511 // translation/rotation after a call to ResetRotation/ResetTranslation.
512 std::map<wpi::units::second_t, VisionUpdate> m_visionUpdates;
513
514 Pose3d m_poseEstimate;
515};
516
517} // namespace wpi::math
#define WPILIB_DLLEXPORT
Definition SymbolExports.hpp:36
Helper class that converts a chassis velocity (dx, dy, and dtheta components) into individual wheel v...
Definition Kinematics.hpp:26
static void ReportUsage(std::string_view resource, std::string_view data)
Definition MathShared.hpp:61
static wpi::units::second_t GetTimestamp()
Definition MathShared.hpp:65
Class for odometry.
Definition Odometry3d.hpp:31
Represents a 3D pose containing translational and rotational elements.
Definition Pose3d.hpp:28
void AddVisionMeasurement(const Pose3d &visionRobotPose, wpi::units::second_t timestamp, const wpi::util::array< double, 4 > &visionMeasurementStdDevs)
Adds a vision measurement to the Kalman Filter.
Definition PoseEstimator3d.hpp:391
PoseEstimator3d(Kinematics< WheelPositions, WheelVelocities, WheelAccelerations > &kinematics, Odometry3d< WheelPositions, WheelVelocities, WheelAccelerations > &odometry, const wpi::util::array< double, 4 > &stateStdDevs, const wpi::util::array< double, 4 > &visionMeasurementStdDevs)
Constructs a PoseEstimator3d.
Definition PoseEstimator3d.hpp:71
void ResetTranslation(const Translation3d &translation)
Resets the robot's translation.
Definition PoseEstimator3d.hpp:159
void ResetPosition(const Rotation3d &gyroAngle, const WheelPositions &wheelPositions, const Pose3d &pose)
Resets the robot's position on the field.
Definition PoseEstimator3d.hpp:129
void SetVisionMeasurementStdDevs(const wpi::util::array< double, 4 > &visionMeasurementStdDevs)
Sets the pose estimator's trust in vision measurements.
Definition PoseEstimator3d.hpp:96
Pose3d Update(const Rotation3d &gyroAngle, const WheelPositions &wheelPositions)
Updates the pose estimator with wheel encoder and gyro information.
Definition PoseEstimator3d.hpp:407
void ResetPose(const Pose3d &pose)
Resets the robot's pose.
Definition PoseEstimator3d.hpp:143
Pose3d UpdateWithTime(wpi::units::second_t currentTime, const Rotation3d &gyroAngle, const WheelPositions &wheelPositions)
Updates the pose estimator with wheel encoder and gyro information.
Definition PoseEstimator3d.hpp:423
Pose3d GetEstimatedPosition() const
Gets the estimated robot pose.
Definition PoseEstimator3d.hpp:225
void AddVisionMeasurement(const Pose3d &visionRobotPose, wpi::units::second_t timestamp)
Adds a vision measurement to the Kalman Filter.
Definition PoseEstimator3d.hpp:296
std::optional< Pose3d > SampleAt(wpi::units::second_t timestamp) const
Return the pose at a given timestamp, if the buffer is not empty.
Definition PoseEstimator3d.hpp:234
void ResetRotation(const Rotation3d &rotation)
Resets the robot's rotation.
Definition PoseEstimator3d.hpp:194
A rotation in a 3D coordinate frame, represented by a quaternion.
Definition Rotation3d.hpp:69
Represents a transformation for a Pose3d in the pose's frame.
Definition Transform3d.hpp:23
Represents a translation in 3D space.
Definition Translation3d.hpp:31
This class is a wrapper around std::array that does compile time size checking.
Definition array.hpp:26
Definition LinearSystem.hpp:20
Eigen::Vector< double, Size > Vectord
Definition EigenCore.hpp:12
constexpr empty_array_t empty_array
Definition array.hpp:16