WPILibC++ 2026.1.1
Loading...
Searching...
No Matches
PoseEstimator3d.h
Go to the documentation of this file.
1// Copyright (c) FIRST and other WPILib contributors.
2// Open Source Software; you can modify and/or share it under the terms of
3// the WPILib BSD license file in the root directory of this project.
4
5#pragma once
6
7#include <map>
8#include <optional>
9#include <utility>
10#include <vector>
11
12#include <Eigen/Core>
13#include <wpi/SymbolExports.h>
14#include <wpi/array.h>
15
16#include "frc/EigenCore.h"
17#include "frc/geometry/Pose3d.h"
24#include "units/time.h"
25#include "wpimath/MathShared.h"
26
27namespace frc {
28
29/**
30 * This class wraps odometry to fuse latency-compensated
31 * vision measurements with encoder measurements. Robot code should not use this
32 * directly- Instead, use the particular type for your drivetrain (e.g.,
33 * DifferentialDrivePoseEstimator3d). It will correct for noisy vision
34 * measurements and encoder drift. It is intended to be an easy drop-in for
35 * Odometry3d. It is also intended to be an easy replacement for PoseEstimator,
36 * only requiring the addition of a standard deviation for Z and appropriate
37 * conversions between 2D and 3D versions of geometry classes. (See
38 * Pose3d(Pose2d), Rotation3d(Rotation2d), Translation3d(Translation2d), and
39 * Pose3d.ToPose2d().)
40 *
41 * Update() should be called every robot loop.
42 *
43 * AddVisionMeasurement() can be called as infrequently as you want; if you
44 * never call it, then this class will behave like regular encoder odometry.
45 *
46 * @tparam WheelSpeeds Wheel speeds type.
47 * @tparam WheelPositions Wheel positions type.
48 */
49template <typename WheelSpeeds, typename WheelPositions>
51 public:
52 /**
53 * Constructs a PoseEstimator3d.
54 *
55 * @warning The initial pose estimate will always be the default pose,
56 * regardless of the odometry's current pose.
57 *
58 * @param kinematics A correctly-configured kinematics object for your
59 * drivetrain.
60 * @param odometry A correctly-configured odometry object for your drivetrain.
61 * @param stateStdDevs Standard deviations of the pose estimate (x position in
62 * meters, y position in meters, and heading in radians). Increase these
63 * numbers to trust your state estimate less.
64 * @param visionMeasurementStdDevs Standard deviations of the vision pose
65 * measurement (x position in meters, y position in meters, and z position
66 * in meters, and angle in radians). Increase these numbers to trust the
67 * vision pose measurement less.
68 */
71 const wpi::array<double, 4>& stateStdDevs,
72 const wpi::array<double, 4>& visionMeasurementStdDevs)
73 : m_odometry(odometry) {
74 for (size_t i = 0; i < 4; ++i) {
75 m_q[i] = stateStdDevs[i] * stateStdDevs[i];
76 }
77
78 SetVisionMeasurementStdDevs(visionMeasurementStdDevs);
81 }
82
83 /**
84 * Sets the pose estimator's trust in vision measurements. This might be used
85 * to change trust in vision measurements after the autonomous period, or to
86 * change trust as distance to a vision target increases.
87 *
88 * @param visionMeasurementStdDevs Standard deviations of the vision pose
89 * measurement (x position in meters, y position in meters, and heading in
90 * radians). Increase these numbers to trust the vision pose measurement
91 * less.
92 */
94 const wpi::array<double, 4>& visionMeasurementStdDevs) {
96 for (size_t i = 0; i < 4; ++i) {
97 r[i] = visionMeasurementStdDevs[i] * visionMeasurementStdDevs[i];
98 }
99
100 // Solve for closed form Kalman gain for continuous Kalman filter with A = 0
101 // and C = I. See wpimath/algorithms.md.
102 for (size_t row = 0; row < 4; ++row) {
103 if (m_q[row] == 0.0) {
104 m_visionK(row, row) = 0.0;
105 } else {
106 m_visionK(row, row) =
107 m_q[row] / (m_q[row] + std::sqrt(m_q[row] * r[row]));
108 }
109 }
110 double angle_gain = m_visionK(3, 3);
111 m_visionK(4, 4) = angle_gain;
112 m_visionK(5, 5) = angle_gain;
113 }
114
115 /**
116 * Resets the robot's position on the field.
117 *
118 * The gyroscope angle does not need to be reset in the user's robot code.
119 * The library automatically takes care of offsetting the gyro angle.
120 *
121 * @param gyroAngle The current gyro angle.
122 * @param wheelPositions The distances traveled by the encoders.
123 * @param pose The estimated pose of the robot on the field.
124 */
125 void ResetPosition(const Rotation3d& gyroAngle,
126 const WheelPositions& wheelPositions, const Pose3d& pose) {
127 // Reset state estimate and error covariance
128 m_odometry.ResetPosition(gyroAngle, wheelPositions, pose);
129 m_odometryPoseBuffer.Clear();
130 m_visionUpdates.clear();
131 m_poseEstimate = m_odometry.GetPose();
132 }
133
134 /**
135 * Resets the robot's pose.
136 *
137 * @param pose The pose to reset to.
138 */
139 void ResetPose(const Pose3d& pose) {
140 m_odometry.ResetPose(pose);
141 m_odometryPoseBuffer.Clear();
142 m_visionUpdates.clear();
143 m_poseEstimate = m_odometry.GetPose();
144 }
145
146 /**
147 * Resets the robot's translation.
148 *
149 * @param translation The pose to translation to.
150 */
151#if defined(__GNUC__) && !defined(__clang__)
152#pragma GCC diagnostic push
153#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
154#endif // defined(__GNUC__) && !defined(__clang__)
155 void ResetTranslation(const Translation3d& translation) {
156 m_odometry.ResetTranslation(translation);
157
158 const std::optional<std::pair<units::second_t, VisionUpdate>>
159 latestVisionUpdate =
160 m_visionUpdates.empty() ? std::nullopt
161 : std::optional{*m_visionUpdates.crbegin()};
162 m_odometryPoseBuffer.Clear();
163 m_visionUpdates.clear();
164
165 if (latestVisionUpdate) {
166 // apply vision compensation to the pose rotation
167 const VisionUpdate visionUpdate{
168 Pose3d{translation, latestVisionUpdate->second.visionPose.Rotation()},
169 Pose3d{translation,
170 latestVisionUpdate->second.odometryPose.Rotation()}};
171 m_visionUpdates[latestVisionUpdate->first] = visionUpdate;
172 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
173 } else {
174 m_poseEstimate = m_odometry.GetPose();
175 }
176 }
177#if defined(__GNUC__) && !defined(__clang__)
178#pragma GCC diagnostic pop
179#endif // defined(__GNUC__) && !defined(__clang__)
180
181 /**
182 * Resets the robot's rotation.
183 *
184 * @param rotation The rotation to reset to.
185 */
186#if defined(__GNUC__) && !defined(__clang__)
187#pragma GCC diagnostic push
188#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
189#endif // defined(__GNUC__) && !defined(__clang__)
190 void ResetRotation(const Rotation3d& rotation) {
191 m_odometry.ResetRotation(rotation);
192
193 const std::optional<std::pair<units::second_t, VisionUpdate>>
194 latestVisionUpdate =
195 m_visionUpdates.empty() ? std::nullopt
196 : std::optional{*m_visionUpdates.crbegin()};
197 m_odometryPoseBuffer.Clear();
198 m_visionUpdates.clear();
199
200 if (latestVisionUpdate) {
201 // apply vision compensation to the pose translation
202 const VisionUpdate visionUpdate{
203 Pose3d{latestVisionUpdate->second.visionPose.Translation(), rotation},
204 Pose3d{latestVisionUpdate->second.odometryPose.Translation(),
205 rotation}};
206 m_visionUpdates[latestVisionUpdate->first] = visionUpdate;
207 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
208 } else {
209 m_poseEstimate = m_odometry.GetPose();
210 }
211 }
212#if defined(__GNUC__) && !defined(__clang__)
213#pragma GCC diagnostic pop
214#endif // defined(__GNUC__) && !defined(__clang__)
215
216 /**
217 * Gets the estimated robot pose.
218 *
219 * @return The estimated robot pose in meters.
220 */
221 Pose3d GetEstimatedPosition() const { return m_poseEstimate; }
222
223 /**
224 * Return the pose at a given timestamp, if the buffer is not empty.
225 *
226 * @param timestamp The pose's timestamp.
227 * @return The pose at the given timestamp (or std::nullopt if the buffer is
228 * empty).
229 */
230 std::optional<Pose3d> SampleAt(units::second_t timestamp) const {
231 // Step 0: If there are no odometry updates to sample, skip.
232 if (m_odometryPoseBuffer.GetInternalBuffer().empty()) {
233 return std::nullopt;
234 }
235
236 // Step 1: Make sure timestamp matches the sample from the odometry pose
237 // buffer. (When sampling, the buffer will always use a timestamp
238 // between the first and last timestamps)
239 units::second_t oldestOdometryTimestamp =
240 m_odometryPoseBuffer.GetInternalBuffer().front().first;
241 units::second_t newestOdometryTimestamp =
242 m_odometryPoseBuffer.GetInternalBuffer().back().first;
243 timestamp =
244 std::clamp(timestamp, oldestOdometryTimestamp, newestOdometryTimestamp);
245
246 // Step 2: If there are no applicable vision updates, use the odometry-only
247 // information.
248 if (m_visionUpdates.empty() || timestamp < m_visionUpdates.begin()->first) {
249 return m_odometryPoseBuffer.Sample(timestamp);
250 }
251
252 // Step 3: Get the latest vision update from before or at the timestamp to
253 // sample at.
254 // First, find the iterator past the sample timestamp, then go back one.
255 // Note that upper_bound() won't return begin() because we check begin()
256 // earlier.
257 auto floorIter = m_visionUpdates.upper_bound(timestamp);
258 --floorIter;
259 auto visionUpdate = floorIter->second;
260
261 // Step 4: Get the pose measured by odometry at the time of the sample.
262 auto odometryEstimate = m_odometryPoseBuffer.Sample(timestamp);
263
264 // Step 5: Apply the vision compensation to the odometry pose.
265 // TODO Replace with std::optional::transform() in C++23
266 if (odometryEstimate) {
267 return visionUpdate.Compensate(*odometryEstimate);
268 }
269 return std::nullopt;
270 }
271
272 /**
273 * Adds a vision measurement to the Kalman Filter. This will correct
274 * the odometry pose estimate while still accounting for measurement noise.
275 *
276 * This method can be called as infrequently as you want, as long as you are
277 * calling Update() every loop.
278 *
279 * To promote stability of the pose estimate and make it robust to bad vision
280 * data, we recommend only adding vision measurements that are already within
281 * one meter or so of the current pose estimate.
282 *
283 * @param visionRobotPose The pose of the robot as measured by the vision
284 * camera.
285 * @param timestamp The timestamp of the vision measurement in seconds. Note
286 * that if you don't use your own time source by calling UpdateWithTime(),
287 * then you must use a timestamp with an epoch since FPGA startup (i.e.,
288 * the epoch of this timestamp is the same epoch as
289 * frc::Timer::GetFPGATimestamp(). This means that you should use
290 * frc::Timer::GetFPGATimestamp() as your time source in this case.
291 */
292 void AddVisionMeasurement(const Pose3d& visionRobotPose,
293 units::second_t timestamp) {
294 // Step 0: If this measurement is old enough to be outside the pose buffer's
295 // timespan, skip.
296 if (m_odometryPoseBuffer.GetInternalBuffer().empty() ||
297 m_odometryPoseBuffer.GetInternalBuffer().front().first -
298 kBufferDuration >
299 timestamp) {
300 return;
301 }
302
303 // Step 1: Clean up any old entries
304 CleanUpVisionUpdates();
305
306 // Step 2: Get the pose measured by odometry at the moment the vision
307 // measurement was made.
308 auto odometrySample = m_odometryPoseBuffer.Sample(timestamp);
309
310 if (!odometrySample) {
311 return;
312 }
313
314 // Step 3: Get the vision-compensated pose estimate at the moment the vision
315 // measurement was made.
316 auto visionSample = SampleAt(timestamp);
317
318 if (!visionSample) {
319 return;
320 }
321
322 // Step 4: Measure the transform between the old pose estimate and the
323 // vision pose.
324 auto transform = visionRobotPose - visionSample.value();
325
326 // Step 5: We should not trust the transform entirely, so instead we scale
327 // this transform by a Kalman gain matrix representing how much we trust
328 // vision measurements compared to our current pose.
329 frc::Vectord<6> k_times_transform =
330 m_visionK * frc::Vectord<6>{transform.X().value(),
331 transform.Y().value(),
332 transform.Z().value(),
333 transform.Rotation().X().value(),
334 transform.Rotation().Y().value(),
335 transform.Rotation().Z().value()};
336
337 // Step 6: Convert back to Transform3d.
338 Transform3d scaledTransform{
339 units::meter_t{k_times_transform(0)},
340 units::meter_t{k_times_transform(1)},
341 units::meter_t{k_times_transform(2)},
342 Rotation3d{units::radian_t{k_times_transform(3)},
343 units::radian_t{k_times_transform(4)},
344 units::radian_t{k_times_transform(5)}}};
345
346 // Step 7: Calculate and record the vision update.
347 VisionUpdate visionUpdate{*visionSample + scaledTransform, *odometrySample};
348 m_visionUpdates[timestamp] = visionUpdate;
349
350 // Step 8: Remove later vision measurements. (Matches previous behavior)
351 auto firstAfter = m_visionUpdates.upper_bound(timestamp);
352 m_visionUpdates.erase(firstAfter, m_visionUpdates.end());
353
354 // Step 9: Update latest pose estimate. Since we cleared all updates after
355 // this vision update, it's guaranteed to be the latest vision update.
356 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
357 }
358
359 /**
360 * Adds a vision measurement to the Kalman Filter. This will correct
361 * the odometry pose estimate while still accounting for measurement noise.
362 *
363 * This method can be called as infrequently as you want, as long as you are
364 * calling Update() every loop.
365 *
366 * To promote stability of the pose estimate and make it robust to bad vision
367 * data, we recommend only adding vision measurements that are already within
368 * one meter or so of the current pose estimate.
369 *
370 * Note that the vision measurement standard deviations passed into this
371 * method will continue to apply to future measurements until a subsequent
372 * call to SetVisionMeasurementStdDevs() or this method.
373 *
374 * @param visionRobotPose The pose of the robot as measured by the vision
375 * camera.
376 * @param timestamp The timestamp of the vision measurement in seconds. Note
377 * that if you don't use your own time source by calling UpdateWithTime(),
378 * then you must use a timestamp with an epoch since FPGA startup (i.e.,
379 * the epoch of this timestamp is the same epoch as
380 * frc::Timer::GetFPGATimestamp(). This means that you should use
381 * frc::Timer::GetFPGATimestamp() as your time source in this case.
382 * @param visionMeasurementStdDevs Standard deviations of the vision pose
383 * measurement (x position in meters, y position in meters, and heading in
384 * radians). Increase these numbers to trust the vision pose measurement
385 * less.
386 */
388 const Pose3d& visionRobotPose, units::second_t timestamp,
389 const wpi::array<double, 4>& visionMeasurementStdDevs) {
390 SetVisionMeasurementStdDevs(visionMeasurementStdDevs);
391 AddVisionMeasurement(visionRobotPose, timestamp);
392 }
393
394 /**
395 * Updates the pose estimator with wheel encoder and gyro information. This
396 * should be called every loop.
397 *
398 * @param gyroAngle The current gyro angle.
399 * @param wheelPositions The distances traveled by the encoders.
400 *
401 * @return The estimated pose of the robot in meters.
402 */
403 Pose3d Update(const Rotation3d& gyroAngle,
404 const WheelPositions& wheelPositions) {
405 return UpdateWithTime(wpi::math::MathSharedStore::GetTimestamp(), gyroAngle,
406 wheelPositions);
407 }
408
409 /**
410 * Updates the pose estimator with wheel encoder and gyro information. This
411 * should be called every loop.
412 *
413 * @param currentTime The time at which this method was called.
414 * @param gyroAngle The current gyro angle.
415 * @param wheelPositions The distances traveled by the encoders.
416 *
417 * @return The estimated pose of the robot in meters.
418 */
419 Pose3d UpdateWithTime(units::second_t currentTime,
420 const Rotation3d& gyroAngle,
421 const WheelPositions& wheelPositions) {
422 auto odometryEstimate = m_odometry.Update(gyroAngle, wheelPositions);
423
424 m_odometryPoseBuffer.AddSample(currentTime, odometryEstimate);
425
426 if (m_visionUpdates.empty()) {
427 m_poseEstimate = odometryEstimate;
428 } else {
429 auto visionUpdate = m_visionUpdates.rbegin()->second;
430 m_poseEstimate = visionUpdate.Compensate(odometryEstimate);
431 }
432
433 return GetEstimatedPosition();
434 }
435
436 private:
437 /**
438 * Removes stale vision updates that won't affect sampling.
439 */
440 void CleanUpVisionUpdates() {
441 // Step 0: If there are no odometry samples, skip.
442 if (m_odometryPoseBuffer.GetInternalBuffer().empty()) {
443 return;
444 }
445
446 // Step 1: Find the oldest timestamp that needs a vision update.
447 units::second_t oldestOdometryTimestamp =
448 m_odometryPoseBuffer.GetInternalBuffer().front().first;
449
450 // Step 2: If there are no vision updates before that timestamp, skip.
451 if (m_visionUpdates.empty() ||
452 oldestOdometryTimestamp < m_visionUpdates.begin()->first) {
453 return;
454 }
455
456 // Step 3: Find the newest vision update timestamp before or at the oldest
457 // timestamp.
458 // First, find the iterator past the oldest odometry timestamp, then go
459 // back one. Note that upper_bound() won't return begin() because we check
460 // begin() earlier.
461 auto newestNeededVisionUpdate =
462 m_visionUpdates.upper_bound(oldestOdometryTimestamp);
463 --newestNeededVisionUpdate;
464
465 // Step 4: Remove all entries strictly before the newest timestamp we need.
466 m_visionUpdates.erase(m_visionUpdates.begin(), newestNeededVisionUpdate);
467 }
468
469 struct VisionUpdate {
470 // The vision-compensated pose estimate
471 Pose3d visionPose;
472
473 // The pose estimated based solely on odometry
474 Pose3d odometryPose;
475
476 /**
477 * Returns the vision-compensated version of the pose. Specifically, changes
478 * the pose from being relative to this record's odometry pose to being
479 * relative to this record's vision pose.
480 *
481 * @param pose The pose to compensate.
482 * @return The compensated pose.
483 */
484 Pose3d Compensate(const Pose3d& pose) const {
485 auto delta = pose - odometryPose;
486 return visionPose + delta;
487 }
488 };
489
490 static constexpr units::second_t kBufferDuration = 1.5_s;
491
492 Odometry3d<WheelSpeeds, WheelPositions>& m_odometry;
495
496 // Maps timestamps to odometry-only pose estimates
497 TimeInterpolatableBuffer<Pose3d> m_odometryPoseBuffer{kBufferDuration};
498 // Maps timestamps to vision updates
499 // Always contains one entry before the oldest entry in m_odometryPoseBuffer,
500 // unless there have been no vision measurements after the last reset. May
501 // contain one entry while m_odometryPoseBuffer is empty to correct for
502 // translation/rotation after a call to ResetRotation/ResetTranslation.
503 std::map<units::second_t, VisionUpdate> m_visionUpdates;
504
505 Pose3d m_poseEstimate;
506};
507
508} // namespace frc
#define WPILIB_DLLEXPORT
Definition SymbolExports.h:36
Helper class that converts a chassis velocity (dx, dy, and dtheta components) into individual wheel s...
Definition Kinematics.h:25
Class for odometry.
Definition Odometry3d.h:33
Represents a 3D pose containing translational and rotational elements.
Definition Pose3d.h:31
This class wraps odometry to fuse latency-compensated vision measurements with encoder measurements.
Definition PoseEstimator3d.h:50
void ResetRotation(const Rotation3d &rotation)
Resets the robot's rotation.
Definition PoseEstimator3d.h:190
PoseEstimator3d(Kinematics< WheelSpeeds, WheelPositions > &kinematics, Odometry3d< WheelSpeeds, WheelPositions > &odometry, const wpi::array< double, 4 > &stateStdDevs, const wpi::array< double, 4 > &visionMeasurementStdDevs)
Constructs a PoseEstimator3d.
Definition PoseEstimator3d.h:69
Pose3d UpdateWithTime(units::second_t currentTime, const Rotation3d &gyroAngle, const WheelPositions &wheelPositions)
Updates the pose estimator with wheel encoder and gyro information.
Definition PoseEstimator3d.h:419
void ResetTranslation(const Translation3d &translation)
Resets the robot's translation.
Definition PoseEstimator3d.h:155
std::optional< Pose3d > SampleAt(units::second_t timestamp) const
Return the pose at a given timestamp, if the buffer is not empty.
Definition PoseEstimator3d.h:230
void SetVisionMeasurementStdDevs(const wpi::array< double, 4 > &visionMeasurementStdDevs)
Sets the pose estimator's trust in vision measurements.
Definition PoseEstimator3d.h:93
void AddVisionMeasurement(const Pose3d &visionRobotPose, units::second_t timestamp)
Adds a vision measurement to the Kalman Filter.
Definition PoseEstimator3d.h:292
void ResetPosition(const Rotation3d &gyroAngle, const WheelPositions &wheelPositions, const Pose3d &pose)
Resets the robot's position on the field.
Definition PoseEstimator3d.h:125
Pose3d Update(const Rotation3d &gyroAngle, const WheelPositions &wheelPositions)
Updates the pose estimator with wheel encoder and gyro information.
Definition PoseEstimator3d.h:403
void ResetPose(const Pose3d &pose)
Resets the robot's pose.
Definition PoseEstimator3d.h:139
void AddVisionMeasurement(const Pose3d &visionRobotPose, units::second_t timestamp, const wpi::array< double, 4 > &visionMeasurementStdDevs)
Adds a vision measurement to the Kalman Filter.
Definition PoseEstimator3d.h:387
Pose3d GetEstimatedPosition() const
Gets the estimated robot pose.
Definition PoseEstimator3d.h:221
A rotation in a 3D coordinate frame represented by a quaternion.
Definition Rotation3d.h:29
Represents a transformation for a Pose3d in the pose's frame.
Definition Transform3d.h:21
Represents a translation in 3D space.
Definition Translation3d.h:31
This class is a wrapper around std::array that does compile time size checking.
Definition array.h:26
static units::second_t GetTimestamp()
Definition MathShared.h:84
static void ReportUsage(MathUsageId id, int count)
Definition MathShared.h:80
Definition CAN.h:11
Eigen::Matrix< double, Rows, Cols, Options, MaxRows, MaxCols > Matrixd
Definition EigenCore.h:21
Eigen::Vector< double, Size > Vectord
Definition EigenCore.h:12
constexpr empty_array_t empty_array
Definition array.h:16