WPILibC++ 2025.1.1
Loading...
Searching...
No Matches
PoseEstimator3d.h
Go to the documentation of this file.
1// Copyright (c) FIRST and other WPILib contributors.
2// Open Source Software; you can modify and/or share it under the terms of
3// the WPILib BSD license file in the root directory of this project.
4
5#pragma once
6
7#include <map>
8#include <optional>
9#include <utility>
10#include <vector>
11
12#include <Eigen/Core>
13#include <wpi/SymbolExports.h>
14#include <wpi/array.h>
15
16#include "frc/EigenCore.h"
17#include "frc/geometry/Pose2d.h"
23#include "units/time.h"
24#include "wpimath/MathShared.h"
25
26namespace frc {
27
28/**
29 * This class wraps odometry to fuse latency-compensated
30 * vision measurements with encoder measurements. Robot code should not use this
31 * directly- Instead, use the particular type for your drivetrain (e.g.,
32 * DifferentialDrivePoseEstimator3d). It will correct for noisy vision
33 * measurements and encoder drift. It is intended to be an easy drop-in for
34 * Odometry3d. It is also intended to be an easy replacement for PoseEstimator,
35 * only requiring the addition of a standard deviation for Z and appropriate
36 * conversions between 2D and 3D versions of geometry classes. (See
37 * Pose3d(Pose2d), Rotation3d(Rotation2d), Translation3d(Translation2d), and
38 * Pose3d.ToPose2d().)
39 *
40 * Update() should be called every robot loop.
41 *
42 * AddVisionMeasurement() can be called as infrequently as you want; if you
43 * never call it, then this class will behave like regular encoder odometry.
44 *
45 * @tparam WheelSpeeds Wheel speeds type.
46 * @tparam WheelPositions Wheel positions type.
47 */
48template <typename WheelSpeeds, typename WheelPositions>
50 public:
51 /**
52 * Constructs a PoseEstimator3d.
53 *
54 * @warning The initial pose estimate will always be the default pose,
55 * regardless of the odometry's current pose.
56 *
57 * @param kinematics A correctly-configured kinematics object for your
58 * drivetrain.
59 * @param odometry A correctly-configured odometry object for your drivetrain.
60 * @param stateStdDevs Standard deviations of the pose estimate (x position in
61 * meters, y position in meters, and heading in radians). Increase these
62 * numbers to trust your state estimate less.
63 * @param visionMeasurementStdDevs Standard deviations of the vision pose
64 * measurement (x position in meters, y position in meters, and z position
65 * in meters, and angle in radians). Increase these numbers to trust the
66 * vision pose measurement less.
67 */
70 const wpi::array<double, 4>& stateStdDevs,
71 const wpi::array<double, 4>& visionMeasurementStdDevs)
72 : m_odometry(odometry) {
73 for (size_t i = 0; i < 4; ++i) {
74 m_q[i] = stateStdDevs[i] * stateStdDevs[i];
75 }
76
77 SetVisionMeasurementStdDevs(visionMeasurementStdDevs);
78 }
79
80 /**
81 * Sets the pose estimator's trust in vision measurements. This might be used
82 * to change trust in vision measurements after the autonomous period, or to
83 * change trust as distance to a vision target increases.
84 *
85 * @param visionMeasurementStdDevs Standard deviations of the vision pose
86 * measurement (x position in meters, y position in meters, and heading in
87 * radians). Increase these numbers to trust the vision pose measurement
88 * less.
89 */
91 const wpi::array<double, 4>& visionMeasurementStdDevs) {
93 for (size_t i = 0; i < 4; ++i) {
94 r[i] = visionMeasurementStdDevs[i] * visionMeasurementStdDevs[i];
95 }
96
97 // Solve for closed form Kalman gain for continuous Kalman filter with A = 0
98 // and C = I. See wpimath/algorithms.md.
99 for (size_t row = 0; row < 4; ++row) {
100 if (m_q[row] == 0.0) {
101 m_visionK(row, row) = 0.0;
102 } else {
103 m_visionK(row, row) =
104 m_q[row] / (m_q[row] + std::sqrt(m_q[row] * r[row]));
105 }
106 }
107 double angle_gain = m_visionK(3, 3);
108 m_visionK(4, 4) = angle_gain;
109 m_visionK(5, 5) = angle_gain;
110 }
111
112 /**
113 * Resets the robot's position on the field.
114 *
115 * The gyroscope angle does not need to be reset in the user's robot code.
116 * The library automatically takes care of offsetting the gyro angle.
117 *
118 * @param gyroAngle The current gyro angle.
119 * @param wheelPositions The distances traveled by the encoders.
120 * @param pose The estimated pose of the robot on the field.
121 */
122 void ResetPosition(const Rotation3d& gyroAngle,
123 const WheelPositions& wheelPositions, const Pose3d& pose) {
124 // Reset state estimate and error covariance
125 m_odometry.ResetPosition(gyroAngle, wheelPositions, pose);
126 m_odometryPoseBuffer.Clear();
127 m_visionUpdates.clear();
128 m_poseEstimate = m_odometry.GetPose();
129 }
130
131 /**
132 * Resets the robot's pose.
133 *
134 * @param pose The pose to reset to.
135 */
136 void ResetPose(const Pose3d& pose) {
137 m_odometry.ResetPose(pose);
138 m_odometryPoseBuffer.Clear();
139 m_visionUpdates.clear();
140 m_poseEstimate = m_odometry.GetPose();
141 }
142
143 /**
144 * Resets the robot's translation.
145 *
146 * @param translation The pose to translation to.
147 */
148 void ResetTranslation(const Translation3d& translation) {
149 m_odometry.ResetTranslation(translation);
150 m_odometryPoseBuffer.Clear();
151 m_visionUpdates.clear();
152 m_poseEstimate = m_odometry.GetPose();
153 }
154
155 /**
156 * Resets the robot's rotation.
157 *
158 * @param rotation The rotation to reset to.
159 */
160 void ResetRotation(const Rotation3d& rotation) {
161 m_odometry.ResetRotation(rotation);
162 m_odometryPoseBuffer.Clear();
163 m_visionUpdates.clear();
164 m_poseEstimate = m_odometry.GetPose();
165 }
166
167 /**
168 * Gets the estimated robot pose.
169 *
170 * @return The estimated robot pose in meters.
171 */
172 Pose3d GetEstimatedPosition() const { return m_poseEstimate; }
173
174 /**
175 * Return the pose at a given timestamp, if the buffer is not empty.
176 *
177 * @param timestamp The pose's timestamp.
178 * @return The pose at the given timestamp (or std::nullopt if the buffer is
179 * empty).
180 */
181 std::optional<Pose3d> SampleAt(units::second_t timestamp) const {
182 // Step 0: If there are no odometry updates to sample, skip.
183 if (m_odometryPoseBuffer.GetInternalBuffer().empty()) {
184 return std::nullopt;
185 }
186
187 // Step 1: Make sure timestamp matches the sample from the odometry pose
188 // buffer. (When sampling, the buffer will always use a timestamp
189 // between the first and last timestamps)
190 units::second_t oldestOdometryTimestamp =
191 m_odometryPoseBuffer.GetInternalBuffer().front().first;
192 units::second_t newestOdometryTimestamp =
193 m_odometryPoseBuffer.GetInternalBuffer().back().first;
194 timestamp =
195 std::clamp(timestamp, oldestOdometryTimestamp, newestOdometryTimestamp);
196
197 // Step 2: If there are no applicable vision updates, use the odometry-only
198 // information.
199 if (m_visionUpdates.empty() || timestamp < m_visionUpdates.begin()->first) {
200 return m_odometryPoseBuffer.Sample(timestamp);
201 }
202
203 // Step 3: Get the latest vision update from before or at the timestamp to
204 // sample at.
205 // First, find the iterator past the sample timestamp, then go back one.
206 // Note that upper_bound() won't return begin() because we check begin()
207 // earlier.
208 auto floorIter = m_visionUpdates.upper_bound(timestamp);
209 --floorIter;
210 auto visionUpdate = floorIter->second;
211
212 // Step 4: Get the pose measured by odometry at the time of the sample.
213 auto odometryEstimate = m_odometryPoseBuffer.Sample(timestamp);
214
215 // Step 5: Apply the vision compensation to the odometry pose.
216 // TODO Replace with std::optional::transform() in C++23
217 if (odometryEstimate) {
218 return visionUpdate.Compensate(*odometryEstimate);
219 }
220 return std::nullopt;
221 }
222
223 /**
224 * Adds a vision measurement to the Kalman Filter. This will correct
225 * the odometry pose estimate while still accounting for measurement noise.
226 *
227 * This method can be called as infrequently as you want, as long as you are
228 * calling Update() every loop.
229 *
230 * To promote stability of the pose estimate and make it robust to bad vision
231 * data, we recommend only adding vision measurements that are already within
232 * one meter or so of the current pose estimate.
233 *
234 * @param visionRobotPose The pose of the robot as measured by the vision
235 * camera.
236 * @param timestamp The timestamp of the vision measurement in seconds. Note
237 * that if you don't use your own time source by calling UpdateWithTime(),
238 * then you must use a timestamp with an epoch since FPGA startup (i.e.,
239 * the epoch of this timestamp is the same epoch as
240 * frc::Timer::GetFPGATimestamp(). This means that you should use
241 * frc::Timer::GetFPGATimestamp() as your time source in this case.
242 */
243 void AddVisionMeasurement(const Pose3d& visionRobotPose,
244 units::second_t timestamp) {
245 // Step 0: If this measurement is old enough to be outside the pose buffer's
246 // timespan, skip.
247 if (m_odometryPoseBuffer.GetInternalBuffer().empty() ||
248 m_odometryPoseBuffer.GetInternalBuffer().front().first -
249 kBufferDuration >
250 timestamp) {
251 return;
252 }
253
254 // Step 1: Clean up any old entries
255 CleanUpVisionUpdates();
256
257 // Step 2: Get the pose measured by odometry at the moment the vision
258 // measurement was made.
259 auto odometrySample = m_odometryPoseBuffer.Sample(timestamp);
260
261 if (!odometrySample) {
262 return;
263 }
264
265 // Step 3: Get the vision-compensated pose estimate at the moment the vision
266 // measurement was made.
267 auto visionSample = SampleAt(timestamp);
268
269 if (!visionSample) {
270 return;
271 }
272
273 // Step 4: Measure the twist between the old pose estimate and the vision
274 // pose.
275 auto twist = visionSample.value().Log(visionRobotPose);
276
277 // Step 5: We should not trust the twist entirely, so instead we scale this
278 // twist by a Kalman gain matrix representing how much we trust vision
279 // measurements compared to our current pose.
280 frc::Vectord<6> k_times_twist =
281 m_visionK * frc::Vectord<6>{twist.dx.value(), twist.dy.value(),
282 twist.dz.value(), twist.rx.value(),
283 twist.ry.value(), twist.rz.value()};
284
285 // Step 6: Convert back to Twist3d.
286 Twist3d scaledTwist{
287 units::meter_t{k_times_twist(0)}, units::meter_t{k_times_twist(1)},
288 units::meter_t{k_times_twist(2)}, units::radian_t{k_times_twist(3)},
289 units::radian_t{k_times_twist(4)}, units::radian_t{k_times_twist(5)}};
290
291 // Step 7: Calculate and record the vision update.
292 VisionUpdate visionUpdate{visionSample->Exp(scaledTwist), *odometrySample};
293 m_visionUpdates[timestamp] = visionUpdate;
294
295 // Step 8: Remove later vision measurements. (Matches previous behavior)
296 auto firstAfter = m_visionUpdates.upper_bound(timestamp);
297 m_visionUpdates.erase(firstAfter, m_visionUpdates.end());
298
299 // Step 9: Update latest pose estimate. Since we cleared all updates after
300 // this vision update, it's guaranteed to be the latest vision update.
301 m_poseEstimate = visionUpdate.Compensate(m_odometry.GetPose());
302 }
303
304 /**
305 * Adds a vision measurement to the Kalman Filter. This will correct
306 * the odometry pose estimate while still accounting for measurement noise.
307 *
308 * This method can be called as infrequently as you want, as long as you are
309 * calling Update() every loop.
310 *
311 * To promote stability of the pose estimate and make it robust to bad vision
312 * data, we recommend only adding vision measurements that are already within
313 * one meter or so of the current pose estimate.
314 *
315 * Note that the vision measurement standard deviations passed into this
316 * method will continue to apply to future measurements until a subsequent
317 * call to SetVisionMeasurementStdDevs() or this method.
318 *
319 * @param visionRobotPose The pose of the robot as measured by the vision
320 * camera.
321 * @param timestamp The timestamp of the vision measurement in seconds. Note
322 * that if you don't use your own time source by calling UpdateWithTime(),
323 * then you must use a timestamp with an epoch since FPGA startup (i.e.,
324 * the epoch of this timestamp is the same epoch as
325 * frc::Timer::GetFPGATimestamp(). This means that you should use
326 * frc::Timer::GetFPGATimestamp() as your time source in this case.
327 * @param visionMeasurementStdDevs Standard deviations of the vision pose
328 * measurement (x position in meters, y position in meters, and heading in
329 * radians). Increase these numbers to trust the vision pose measurement
330 * less.
331 */
333 const Pose3d& visionRobotPose, units::second_t timestamp,
334 const wpi::array<double, 4>& visionMeasurementStdDevs) {
335 SetVisionMeasurementStdDevs(visionMeasurementStdDevs);
336 AddVisionMeasurement(visionRobotPose, timestamp);
337 }
338
339 /**
340 * Updates the pose estimator with wheel encoder and gyro information. This
341 * should be called every loop.
342 *
343 * @param gyroAngle The current gyro angle.
344 * @param wheelPositions The distances traveled by the encoders.
345 *
346 * @return The estimated pose of the robot in meters.
347 */
348 Pose3d Update(const Rotation3d& gyroAngle,
349 const WheelPositions& wheelPositions) {
350 return UpdateWithTime(wpi::math::MathSharedStore::GetTimestamp(), gyroAngle,
351 wheelPositions);
352 }
353
354 /**
355 * Updates the pose estimator with wheel encoder and gyro information. This
356 * should be called every loop.
357 *
358 * @param currentTime The time at which this method was called.
359 * @param gyroAngle The current gyro angle.
360 * @param wheelPositions The distances traveled by the encoders.
361 *
362 * @return The estimated pose of the robot in meters.
363 */
364 Pose3d UpdateWithTime(units::second_t currentTime,
365 const Rotation3d& gyroAngle,
366 const WheelPositions& wheelPositions) {
367 auto odometryEstimate = m_odometry.Update(gyroAngle, wheelPositions);
368
369 m_odometryPoseBuffer.AddSample(currentTime, odometryEstimate);
370
371 if (m_visionUpdates.empty()) {
372 m_poseEstimate = odometryEstimate;
373 } else {
374 auto visionUpdate = m_visionUpdates.rbegin()->second;
375 m_poseEstimate = visionUpdate.Compensate(odometryEstimate);
376 }
377
378 return GetEstimatedPosition();
379 }
380
381 private:
382 /**
383 * Removes stale vision updates that won't affect sampling.
384 */
385 void CleanUpVisionUpdates() {
386 // Step 0: If there are no odometry samples, skip.
387 if (m_odometryPoseBuffer.GetInternalBuffer().empty()) {
388 return;
389 }
390
391 // Step 1: Find the oldest timestamp that needs a vision update.
392 units::second_t oldestOdometryTimestamp =
393 m_odometryPoseBuffer.GetInternalBuffer().front().first;
394
395 // Step 2: If there are no vision updates before that timestamp, skip.
396 if (m_visionUpdates.empty() ||
397 oldestOdometryTimestamp < m_visionUpdates.begin()->first) {
398 return;
399 }
400
401 // Step 3: Find the newest vision update timestamp before or at the oldest
402 // timestamp.
403 // First, find the iterator past the oldest odometry timestamp, then go
404 // back one. Note that upper_bound() won't return begin() because we check
405 // begin() earlier.
406 auto newestNeededVisionUpdate =
407 m_visionUpdates.upper_bound(oldestOdometryTimestamp);
408 --newestNeededVisionUpdate;
409
410 // Step 4: Remove all entries strictly before the newest timestamp we need.
411 m_visionUpdates.erase(m_visionUpdates.begin(), newestNeededVisionUpdate);
412 }
413
414 struct VisionUpdate {
415 // The vision-compensated pose estimate
416 Pose3d visionPose;
417
418 // The pose estimated based solely on odometry
419 Pose3d odometryPose;
420
421 /**
422 * Returns the vision-compensated version of the pose. Specifically, changes
423 * the pose from being relative to this record's odometry pose to being
424 * relative to this record's vision pose.
425 *
426 * @param pose The pose to compensate.
427 * @return The compensated pose.
428 */
429 Pose3d Compensate(const Pose3d& pose) const {
430 auto delta = pose - odometryPose;
431 return visionPose + delta;
432 }
433 };
434
435 static constexpr units::second_t kBufferDuration = 1.5_s;
436
437 Odometry3d<WheelSpeeds, WheelPositions>& m_odometry;
440
441 // Maps timestamps to odometry-only pose estimates
442 TimeInterpolatableBuffer<Pose3d> m_odometryPoseBuffer{kBufferDuration};
443 // Maps timestamps to vision updates
444 // Always contains one entry before the oldest entry in m_odometryPoseBuffer,
445 // unless there have been no vision measurements after the last reset
446 std::map<units::second_t, VisionUpdate> m_visionUpdates;
447
448 Pose3d m_poseEstimate;
449};
450
451} // namespace frc
#define WPILIB_DLLEXPORT
Definition SymbolExports.h:36
Helper class that converts a chassis velocity (dx, dy, and dtheta components) into individual wheel s...
Definition Kinematics.h:25
Class for odometry.
Definition Odometry3d.h:33
Represents a 3D pose containing translational and rotational elements.
Definition Pose3d.h:28
This class wraps odometry to fuse latency-compensated vision measurements with encoder measurements.
Definition PoseEstimator3d.h:49
void ResetRotation(const Rotation3d &rotation)
Resets the robot's rotation.
Definition PoseEstimator3d.h:160
PoseEstimator3d(Kinematics< WheelSpeeds, WheelPositions > &kinematics, Odometry3d< WheelSpeeds, WheelPositions > &odometry, const wpi::array< double, 4 > &stateStdDevs, const wpi::array< double, 4 > &visionMeasurementStdDevs)
Constructs a PoseEstimator3d.
Definition PoseEstimator3d.h:68
Pose3d UpdateWithTime(units::second_t currentTime, const Rotation3d &gyroAngle, const WheelPositions &wheelPositions)
Updates the pose estimator with wheel encoder and gyro information.
Definition PoseEstimator3d.h:364
void ResetTranslation(const Translation3d &translation)
Resets the robot's translation.
Definition PoseEstimator3d.h:148
std::optional< Pose3d > SampleAt(units::second_t timestamp) const
Return the pose at a given timestamp, if the buffer is not empty.
Definition PoseEstimator3d.h:181
void SetVisionMeasurementStdDevs(const wpi::array< double, 4 > &visionMeasurementStdDevs)
Sets the pose estimator's trust in vision measurements.
Definition PoseEstimator3d.h:90
void AddVisionMeasurement(const Pose3d &visionRobotPose, units::second_t timestamp)
Adds a vision measurement to the Kalman Filter.
Definition PoseEstimator3d.h:243
void ResetPosition(const Rotation3d &gyroAngle, const WheelPositions &wheelPositions, const Pose3d &pose)
Resets the robot's position on the field.
Definition PoseEstimator3d.h:122
Pose3d Update(const Rotation3d &gyroAngle, const WheelPositions &wheelPositions)
Updates the pose estimator with wheel encoder and gyro information.
Definition PoseEstimator3d.h:348
void ResetPose(const Pose3d &pose)
Resets the robot's pose.
Definition PoseEstimator3d.h:136
void AddVisionMeasurement(const Pose3d &visionRobotPose, units::second_t timestamp, const wpi::array< double, 4 > &visionMeasurementStdDevs)
Adds a vision measurement to the Kalman Filter.
Definition PoseEstimator3d.h:332
Pose3d GetEstimatedPosition() const
Gets the estimated robot pose.
Definition PoseEstimator3d.h:172
A rotation in a 3D coordinate frame represented by a quaternion.
Definition Rotation3d.h:29
Represents a translation in 3D space.
Definition Translation3d.h:26
This class is a wrapper around std::array that does compile time size checking.
Definition array.h:26
static units::second_t GetTimestamp()
Definition MathShared.h:79
Definition CAN.h:11
Eigen::Matrix< double, Rows, Cols, Options, MaxRows, MaxCols > Matrixd
Definition EigenCore.h:21
Eigen::Vector< double, Size > Vectord
Definition EigenCore.h:12
constexpr empty_array_t empty_array
Definition array.h:16
A change in distance along a 3D arc since the last pose update.
Definition Twist3d.h:22