WPILibC++ 2026.2.2
Loading...
Searching...
No Matches
PoseEstimator3d.h
Go to the documentation of this file.
1// Copyright (c) FIRST and other WPILib contributors.
2// Open Source Software; you can modify and/or share it under the terms of
3// the WPILib BSD license file in the root directory of this project.
4
5#pragma once
6
7#include <map>
8#include <optional>
9#include <utility>
10#include <vector>
11
12#include <Eigen/Core>
13#include <wpi/SymbolExports.h>
14#include <wpi/array.h>
15
16#include "frc/EigenCore.h"
17#include "frc/geometry/Pose3d.h"
24#include "units/time.h"
25#include "wpimath/MathShared.h"
26
27namespace frc {
28
29/**
30 * This class wraps odometry to fuse latency-compensated
31 * vision measurements with encoder measurements. Robot code should not use this
32 * directly- Instead, use the particular type for your drivetrain (e.g.,
33 * DifferentialDrivePoseEstimator3d). It will correct for noisy vision
34 * measurements and encoder drift. It is intended to be an easy drop-in for
35 * Odometry3d. It is also intended to be an easy replacement for PoseEstimator,
36 * only requiring the addition of a standard deviation for Z and appropriate
37 * conversions between 2D and 3D versions of geometry classes. (See
38 * Pose3d(Pose2d), Rotation3d(Rotation2d), Translation3d(Translation2d), and
39 * Pose3d.ToPose2d().)
40 *
41 * Update() should be called every robot loop.
42 *
43 * AddVisionMeasurement() can be called as infrequently as you want; if you
44 * never call it, then this class will behave like regular encoder odometry.
45 *
46 * @tparam WheelSpeeds Wheel speeds type.
47 * @tparam WheelPositions Wheel positions type.
48 */
49template <typename WheelSpeeds, typename WheelPositions>
51 public:
52 /**
53 * Constructs a PoseEstimator3d.
54 *
55 * @warning The initial pose estimate will always be the default pose,
56 * regardless of the odometry's current pose.
57 *
58 * @param kinematics A correctly-configured kinematics object for your
59 * drivetrain.
60 * @param odometry A correctly-configured odometry object for your drivetrain.
61 * @param stateStdDevs Standard deviations of the pose estimate (x position in
62 * meters, y position in meters, and heading in radians). Increase these
63 * numbers to trust your state estimate less.
64 * @param visionMeasurementStdDevs Standard deviations of the vision pose
65 * measurement (x position in meters, y position in meters, and z position
66 * in meters, and angle in radians). Increase these numbers to trust the
67 * vision pose measurement less.
68 */
71 const wpi::array<double, 4>& stateStdDevs,
72 const wpi::array<double, 4>& visionMeasurementStdDevs)
73 : m_odometry(odometry) {
74 for (size_t i = 0; i < 4; ++i) {
75 m_q[i] = stateStdDevs[i] * stateStdDevs[i];
76 }
77
78 SetVisionMeasurementStdDevs(visionMeasurementStdDevs);
81 }
82
83 /**
84 * Sets the pose estimator's trust in vision measurements. This might be used
85 * to change trust in vision measurements after the autonomous period, or to
86 * change trust as distance to a vision target increases.
87 *
88 * @param visionMeasurementStdDevs Standard deviations of the vision pose
89 * measurement (x position in meters, y position in meters, and heading in
90 * radians). Increase these numbers to trust the vision pose measurement
91 * less.
92 */
94 const wpi::array<double, 4>& visionMeasurementStdDevs) {
95 // Diagonal of measurement noise covariance matrix R
97 for (size_t i = 0; i < 4; ++i) {
98 r[i] = visionMeasurementStdDevs[i] * visionMeasurementStdDevs[i];
99 }
100
101 // Solve for closed form Kalman gain for continuous Kalman filter with A = 0
102 // and C = I. See wpimath/algorithms.md.
103 for (size_t row = 0; row < 4; ++row) {
104 if (m_q[row] == 0.0) {
105 m_vision_K.diagonal()[row] = 0.0;
106 } else {
107 m_vision_K.diagonal()[row] =
108 m_q[row] / (m_q[row] + std::sqrt(m_q[row] * r[row]));
109 }
110 }
111 double angle_gain = m_vision_K.diagonal()[3];
112 m_vision_K.diagonal()[4] = angle_gain;
113 m_vision_K.diagonal()[5] = angle_gain;
114 }
115
116 /**
117 * Resets the robot's position on the field.
118 *
119 * The gyroscope angle does not need to be reset in the user's robot code.
120 * The library automatically takes care of offsetting the gyro angle.
121 *
122 * @param gyroAngle The current gyro angle.
123 * @param wheelPositions The distances traveled by the encoders.
124 * @param pose The estimated pose of the robot on the field.
125 */
126 void ResetPosition(const Rotation3d& gyroAngle,
127 const WheelPositions& wheelPositions, const Pose3d& pose) {
128 // Reset state estimate and error covariance
129 m_odometry.ResetPosition(gyroAngle, wheelPositions, pose);
130 m_odometryPoseBuffer.Clear();
131 m_visionUpdates.clear();
132 m_poseEstimate = m_odometry.GetPose();
133 }
134
135 /**
136 * Resets the robot's pose.
137 *
138 * @param pose The pose to reset to.
139 */
140 void ResetPose(const Pose3d& pose) {
141 m_odometry.ResetPose(pose);
142 m_odometryPoseBuffer.Clear();
143 m_visionUpdates.clear();
144 m_poseEstimate = m_odometry.GetPose();
145 }
146
147 /**
148 * Resets the robot's translation.
149 *
150 * @param translation The pose to translation to.
151 */
152#if defined(__GNUC__) && !defined(__clang__)
153#pragma GCC diagnostic push
154#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
155#endif // defined(__GNUC__) && !defined(__clang__)
156 void ResetTranslation(const Translation3d& translation) {
157 m_odometry.ResetTranslation(translation);
158
159 const std::optional<std::pair<units::second_t, VisionUpdate>>
160 latestVisionUpdate =
161 m_visionUpdates.empty() ? std::nullopt
162 : std::optional{*m_visionUpdates.crbegin()};
163 m_odometryPoseBuffer.Clear();
164 m_visionUpdates.clear();
165
166 if (latestVisionUpdate) {
167 // apply vision compensation to the pose rotation
168 const VisionUpdate visionUpdate{
169 Pose3d{translation, latestVisionUpdate->second.visionPose.Rotation()},
170 Pose3d{translation,
171 latestVisionUpdate->second.odometryPose.Rotation()}};
172 m_visionUpdates[latestVisionUpdate->first] = visionUpdate;
173 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
174 } else {
175 m_poseEstimate = m_odometry.GetPose();
176 }
177 }
178#if defined(__GNUC__) && !defined(__clang__)
179#pragma GCC diagnostic pop
180#endif // defined(__GNUC__) && !defined(__clang__)
181
182 /**
183 * Resets the robot's rotation.
184 *
185 * @param rotation The rotation to reset to.
186 */
187#if defined(__GNUC__) && !defined(__clang__)
188#pragma GCC diagnostic push
189#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
190#endif // defined(__GNUC__) && !defined(__clang__)
191 void ResetRotation(const Rotation3d& rotation) {
192 m_odometry.ResetRotation(rotation);
193
194 const std::optional<std::pair<units::second_t, VisionUpdate>>
195 latestVisionUpdate =
196 m_visionUpdates.empty() ? std::nullopt
197 : std::optional{*m_visionUpdates.crbegin()};
198 m_odometryPoseBuffer.Clear();
199 m_visionUpdates.clear();
200
201 if (latestVisionUpdate) {
202 // apply vision compensation to the pose translation
203 const VisionUpdate visionUpdate{
204 Pose3d{latestVisionUpdate->second.visionPose.Translation(), rotation},
205 Pose3d{latestVisionUpdate->second.odometryPose.Translation(),
206 rotation}};
207 m_visionUpdates[latestVisionUpdate->first] = visionUpdate;
208 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
209 } else {
210 m_poseEstimate = m_odometry.GetPose();
211 }
212 }
213#if defined(__GNUC__) && !defined(__clang__)
214#pragma GCC diagnostic pop
215#endif // defined(__GNUC__) && !defined(__clang__)
216
217 /**
218 * Gets the estimated robot pose.
219 *
220 * @return The estimated robot pose in meters.
221 */
222 Pose3d GetEstimatedPosition() const { return m_poseEstimate; }
223
224 /**
225 * Return the pose at a given timestamp, if the buffer is not empty.
226 *
227 * @param timestamp The pose's timestamp.
228 * @return The pose at the given timestamp (or std::nullopt if the buffer is
229 * empty).
230 */
231 std::optional<Pose3d> SampleAt(units::second_t timestamp) const {
232 // Step 0: If there are no odometry updates to sample, skip.
233 if (m_odometryPoseBuffer.GetInternalBuffer().empty()) {
234 return std::nullopt;
235 }
236
237 // Step 1: Make sure timestamp matches the sample from the odometry pose
238 // buffer. (When sampling, the buffer will always use a timestamp
239 // between the first and last timestamps)
240 units::second_t oldestOdometryTimestamp =
241 m_odometryPoseBuffer.GetInternalBuffer().front().first;
242 units::second_t newestOdometryTimestamp =
243 m_odometryPoseBuffer.GetInternalBuffer().back().first;
244 timestamp =
245 std::clamp(timestamp, oldestOdometryTimestamp, newestOdometryTimestamp);
246
247 // Step 2: If there are no applicable vision updates, use the odometry-only
248 // information.
249 if (m_visionUpdates.empty() || timestamp < m_visionUpdates.begin()->first) {
250 return m_odometryPoseBuffer.Sample(timestamp);
251 }
252
253 // Step 3: Get the latest vision update from before or at the timestamp to
254 // sample at.
255 // First, find the iterator past the sample timestamp, then go back one.
256 // Note that upper_bound() won't return begin() because we check begin()
257 // earlier.
258 auto floorIter = m_visionUpdates.upper_bound(timestamp);
259 --floorIter;
260 auto visionUpdate = floorIter->second;
261
262 // Step 4: Get the pose measured by odometry at the time of the sample.
263 auto odometryEstimate = m_odometryPoseBuffer.Sample(timestamp);
264
265 // Step 5: Apply the vision compensation to the odometry pose.
266 // TODO Replace with std::optional::transform() in C++23
267 if (odometryEstimate) {
268 return visionUpdate.Compensate(*odometryEstimate);
269 }
270 return std::nullopt;
271 }
272
273 /**
274 * Adds a vision measurement to the Kalman Filter. This will correct
275 * the odometry pose estimate while still accounting for measurement noise.
276 *
277 * This method can be called as infrequently as you want, as long as you are
278 * calling Update() every loop.
279 *
280 * To promote stability of the pose estimate and make it robust to bad vision
281 * data, we recommend only adding vision measurements that are already within
282 * one meter or so of the current pose estimate.
283 *
284 * @param visionRobotPose The pose of the robot as measured by the vision
285 * camera.
286 * @param timestamp The timestamp of the vision measurement in seconds. Note
287 * that if you don't use your own time source by calling UpdateWithTime(),
288 * then you must use a timestamp with an epoch since FPGA startup (i.e.,
289 * the epoch of this timestamp is the same epoch as
290 * frc::Timer::GetFPGATimestamp(). This means that you should use
291 * frc::Timer::GetFPGATimestamp() as your time source in this case.
292 */
293 void AddVisionMeasurement(const Pose3d& visionRobotPose,
294 units::second_t timestamp) {
295 // Step 0: If this measurement is old enough to be outside the pose buffer's
296 // timespan, skip.
297 if (m_odometryPoseBuffer.GetInternalBuffer().empty() ||
298 m_odometryPoseBuffer.GetInternalBuffer().front().first -
299 kBufferDuration >
300 timestamp) {
301 return;
302 }
303
304 // Step 1: Clean up any old entries
305 CleanUpVisionUpdates();
306
307 // Step 2: Get the pose measured by odometry at the moment the vision
308 // measurement was made.
309 auto odometrySample = m_odometryPoseBuffer.Sample(timestamp);
310
311 if (!odometrySample) {
312 return;
313 }
314
315 // Step 3: Get the vision-compensated pose estimate at the moment the vision
316 // measurement was made.
317 auto visionSample = SampleAt(timestamp);
318
319 if (!visionSample) {
320 return;
321 }
322
323 // Step 4: Measure the transform between the old pose estimate and the
324 // vision pose.
325 auto transform = visionRobotPose - visionSample.value();
326
327 // Step 5: We should not trust the transform entirely, so instead we scale
328 // this transform by a Kalman gain matrix representing how much we trust
329 // vision measurements compared to our current pose.
330 frc::Vectord<6> k_times_transform =
331 m_vision_K * frc::Vectord<6>{transform.X().value(),
332 transform.Y().value(),
333 transform.Z().value(),
334 transform.Rotation().X().value(),
335 transform.Rotation().Y().value(),
336 transform.Rotation().Z().value()};
337
338 // Step 6: Convert back to Transform3d.
339 Transform3d scaledTransform{
340 units::meter_t{k_times_transform(0)},
341 units::meter_t{k_times_transform(1)},
342 units::meter_t{k_times_transform(2)},
343 Rotation3d{units::radian_t{k_times_transform(3)},
344 units::radian_t{k_times_transform(4)},
345 units::radian_t{k_times_transform(5)}}};
346
347 // Step 7: Calculate and record the vision update.
348 VisionUpdate visionUpdate{*visionSample + scaledTransform, *odometrySample};
349 m_visionUpdates[timestamp] = visionUpdate;
350
351 // Step 8: Remove later vision measurements. (Matches previous behavior)
352 auto firstAfter = m_visionUpdates.upper_bound(timestamp);
353 m_visionUpdates.erase(firstAfter, m_visionUpdates.end());
354
355 // Step 9: Update latest pose estimate. Since we cleared all updates after
356 // this vision update, it's guaranteed to be the latest vision update.
357 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
358 }
359
360 /**
361 * Adds a vision measurement to the Kalman Filter. This will correct
362 * the odometry pose estimate while still accounting for measurement noise.
363 *
364 * This method can be called as infrequently as you want, as long as you are
365 * calling Update() every loop.
366 *
367 * To promote stability of the pose estimate and make it robust to bad vision
368 * data, we recommend only adding vision measurements that are already within
369 * one meter or so of the current pose estimate.
370 *
371 * Note that the vision measurement standard deviations passed into this
372 * method will continue to apply to future measurements until a subsequent
373 * call to SetVisionMeasurementStdDevs() or this method.
374 *
375 * @param visionRobotPose The pose of the robot as measured by the vision
376 * camera.
377 * @param timestamp The timestamp of the vision measurement in seconds. Note
378 * that if you don't use your own time source by calling UpdateWithTime(),
379 * then you must use a timestamp with an epoch since FPGA startup (i.e.,
380 * the epoch of this timestamp is the same epoch as
381 * frc::Timer::GetFPGATimestamp(). This means that you should use
382 * frc::Timer::GetFPGATimestamp() as your time source in this case.
383 * @param visionMeasurementStdDevs Standard deviations of the vision pose
384 * measurement (x position in meters, y position in meters, and heading in
385 * radians). Increase these numbers to trust the vision pose measurement
386 * less.
387 */
389 const Pose3d& visionRobotPose, units::second_t timestamp,
390 const wpi::array<double, 4>& visionMeasurementStdDevs) {
391 SetVisionMeasurementStdDevs(visionMeasurementStdDevs);
392 AddVisionMeasurement(visionRobotPose, timestamp);
393 }
394
395 /**
396 * Updates the pose estimator with wheel encoder and gyro information. This
397 * should be called every loop.
398 *
399 * @param gyroAngle The current gyro angle.
400 * @param wheelPositions The distances traveled by the encoders.
401 *
402 * @return The estimated pose of the robot in meters.
403 */
404 Pose3d Update(const Rotation3d& gyroAngle,
405 const WheelPositions& wheelPositions) {
406 return UpdateWithTime(wpi::math::MathSharedStore::GetTimestamp(), gyroAngle,
407 wheelPositions);
408 }
409
410 /**
411 * Updates the pose estimator with wheel encoder and gyro information. This
412 * should be called every loop.
413 *
414 * @param currentTime The time at which this method was called.
415 * @param gyroAngle The current gyro angle.
416 * @param wheelPositions The distances traveled by the encoders.
417 *
418 * @return The estimated pose of the robot in meters.
419 */
420 Pose3d UpdateWithTime(units::second_t currentTime,
421 const Rotation3d& gyroAngle,
422 const WheelPositions& wheelPositions) {
423 auto odometryEstimate = m_odometry.Update(gyroAngle, wheelPositions);
424
425 m_odometryPoseBuffer.AddSample(currentTime, odometryEstimate);
426
427 if (m_visionUpdates.empty()) {
428 m_poseEstimate = odometryEstimate;
429 } else {
430 auto visionUpdate = m_visionUpdates.rbegin()->second;
431 m_poseEstimate = visionUpdate.Compensate(odometryEstimate);
432 }
433
434 return GetEstimatedPosition();
435 }
436
437 private:
438 /**
439 * Removes stale vision updates that won't affect sampling.
440 */
441 void CleanUpVisionUpdates() {
442 // Step 0: If there are no odometry samples, skip.
443 if (m_odometryPoseBuffer.GetInternalBuffer().empty()) {
444 return;
445 }
446
447 // Step 1: Find the oldest timestamp that needs a vision update.
448 units::second_t oldestOdometryTimestamp =
449 m_odometryPoseBuffer.GetInternalBuffer().front().first;
450
451 // Step 2: If there are no vision updates before that timestamp, skip.
452 if (m_visionUpdates.empty() ||
453 oldestOdometryTimestamp < m_visionUpdates.begin()->first) {
454 return;
455 }
456
457 // Step 3: Find the newest vision update timestamp before or at the oldest
458 // timestamp.
459 // First, find the iterator past the oldest odometry timestamp, then go
460 // back one. Note that upper_bound() won't return begin() because we check
461 // begin() earlier.
462 auto newestNeededVisionUpdate =
463 m_visionUpdates.upper_bound(oldestOdometryTimestamp);
464 --newestNeededVisionUpdate;
465
466 // Step 4: Remove all entries strictly before the newest timestamp we need.
467 m_visionUpdates.erase(m_visionUpdates.begin(), newestNeededVisionUpdate);
468 }
469
470 struct VisionUpdate {
471 // The vision-compensated pose estimate
472 Pose3d visionPose;
473
474 // The pose estimated based solely on odometry
475 Pose3d odometryPose;
476
477 /**
478 * Returns the vision-compensated version of the pose. Specifically, changes
479 * the pose from being relative to this record's odometry pose to being
480 * relative to this record's vision pose.
481 *
482 * @param pose The pose to compensate.
483 * @return The compensated pose.
484 */
485 Pose3d Compensate(const Pose3d& pose) const {
486 auto delta = pose - odometryPose;
487 return visionPose + delta;
488 }
489 };
490
491 static constexpr units::second_t kBufferDuration = 1.5_s;
492
493 Odometry3d<WheelSpeeds, WheelPositions>& m_odometry;
494
495 // Diagonal of process noise covariance matrix Q
497
498 // Kalman gain matrix K
499 Eigen::DiagonalMatrix<double, 6> m_vision_K =
500 Eigen::DiagonalMatrix<double, 6>::Zero();
501
502 // Maps timestamps to odometry-only pose estimates
503 TimeInterpolatableBuffer<Pose3d> m_odometryPoseBuffer{kBufferDuration};
504 // Maps timestamps to vision updates
505 // Always contains one entry before the oldest entry in m_odometryPoseBuffer,
506 // unless there have been no vision measurements after the last reset. May
507 // contain one entry while m_odometryPoseBuffer is empty to correct for
508 // translation/rotation after a call to ResetRotation/ResetTranslation.
509 std::map<units::second_t, VisionUpdate> m_visionUpdates;
510
511 Pose3d m_poseEstimate;
512};
513
514} // namespace frc
#define WPILIB_DLLEXPORT
Definition SymbolExports.h:36
Helper class that converts a chassis velocity (dx, dy, and dtheta components) into individual wheel s...
Definition Kinematics.h:25
Class for odometry.
Definition Odometry3d.h:33
Represents a 3D pose containing translational and rotational elements.
Definition Pose3d.h:31
This class wraps odometry to fuse latency-compensated vision measurements with encoder measurements.
Definition PoseEstimator3d.h:50
void ResetRotation(const Rotation3d &rotation)
Resets the robot's rotation.
Definition PoseEstimator3d.h:191
PoseEstimator3d(Kinematics< WheelSpeeds, WheelPositions > &kinematics, Odometry3d< WheelSpeeds, WheelPositions > &odometry, const wpi::array< double, 4 > &stateStdDevs, const wpi::array< double, 4 > &visionMeasurementStdDevs)
Constructs a PoseEstimator3d.
Definition PoseEstimator3d.h:69
Pose3d UpdateWithTime(units::second_t currentTime, const Rotation3d &gyroAngle, const WheelPositions &wheelPositions)
Updates the pose estimator with wheel encoder and gyro information.
Definition PoseEstimator3d.h:420
void ResetTranslation(const Translation3d &translation)
Resets the robot's translation.
Definition PoseEstimator3d.h:156
std::optional< Pose3d > SampleAt(units::second_t timestamp) const
Return the pose at a given timestamp, if the buffer is not empty.
Definition PoseEstimator3d.h:231
void SetVisionMeasurementStdDevs(const wpi::array< double, 4 > &visionMeasurementStdDevs)
Sets the pose estimator's trust in vision measurements.
Definition PoseEstimator3d.h:93
void AddVisionMeasurement(const Pose3d &visionRobotPose, units::second_t timestamp)
Adds a vision measurement to the Kalman Filter.
Definition PoseEstimator3d.h:293
void ResetPosition(const Rotation3d &gyroAngle, const WheelPositions &wheelPositions, const Pose3d &pose)
Resets the robot's position on the field.
Definition PoseEstimator3d.h:126
Pose3d Update(const Rotation3d &gyroAngle, const WheelPositions &wheelPositions)
Updates the pose estimator with wheel encoder and gyro information.
Definition PoseEstimator3d.h:404
void ResetPose(const Pose3d &pose)
Resets the robot's pose.
Definition PoseEstimator3d.h:140
void AddVisionMeasurement(const Pose3d &visionRobotPose, units::second_t timestamp, const wpi::array< double, 4 > &visionMeasurementStdDevs)
Adds a vision measurement to the Kalman Filter.
Definition PoseEstimator3d.h:388
Pose3d GetEstimatedPosition() const
Gets the estimated robot pose.
Definition PoseEstimator3d.h:222
A rotation in a 3D coordinate frame, represented by a quaternion.
Definition Rotation3d.h:72
Represents a transformation for a Pose3d in the pose's frame.
Definition Transform3d.h:23
Represents a translation in 3D space.
Definition Translation3d.h:31
This class is a wrapper around std::array that does compile time size checking.
Definition array.h:26
static units::second_t GetTimestamp()
Definition MathShared.h:84
static void ReportUsage(MathUsageId id, int count)
Definition MathShared.h:80
Definition CAN.h:11
Eigen::Vector< double, Size > Vectord
Definition EigenCore.h:12
constexpr empty_array_t empty_array
Definition array.h:16