blob: a17718843a7674bcad279e77d7947a5e6a6e3dd9 [file] [log] [blame]
James Kuszmaul313e9ce2024-02-11 17:47:33 -08001#include "y2024/localizer/localizer.h"
2
3#include "gflags/gflags.h"
4
5#include "aos/containers/sized_array.h"
6#include "frc971/control_loops/drivetrain/localizer_generated.h"
7#include "frc971/control_loops/pose.h"
8#include "frc971/vision/target_map_utils.h"
9#include "y2024/constants.h"
10
11DEFINE_double(max_pose_error, 1e-6,
12 "Throw out target poses with a higher pose error than this");
13DEFINE_double(
14 max_pose_error_ratio, 0.4,
15 "Throw out target poses with a higher pose error ratio than this");
16DEFINE_double(distortion_noise_scalar, 1.0,
17 "Scale the target pose distortion factor by this when computing "
18 "the noise.");
19DEFINE_double(
20 max_implied_yaw_error, 3.0,
21 "Reject target poses that imply a robot yaw of more than this many degrees "
22 "off from our estimate.");
23DEFINE_double(
24 max_implied_teleop_yaw_error, 30.0,
25 "Reject target poses that imply a robot yaw of more than this many degrees "
26 "off from our estimate.");
27DEFINE_double(max_distance_to_target, 5.0,
28 "Reject target poses that have a 3d distance of more than this "
29 "many meters.");
30DEFINE_double(max_auto_image_robot_speed, 2.0,
31 "Reject target poses when the robot is travelling faster than "
32 "this speed in auto.");
33
34namespace y2024::localizer {
35namespace {
36constexpr std::array<std::string_view, Localizer::kNumCameras>
Maxwell Henderson3279bc52024-03-01 09:50:53 -080037 kDetectionChannels{"/orin1/camera0", "/orin1/camera1", "/imu/camera0",
38 "/imu/camera1"};
James Kuszmaul313e9ce2024-02-11 17:47:33 -080039
40size_t CameraIndexForName(std::string_view name) {
41 for (size_t index = 0; index < kDetectionChannels.size(); ++index) {
42 if (name == kDetectionChannels.at(index)) {
43 return index;
44 }
45 }
46 LOG(FATAL) << "No camera channel named " << name;
47}
48
49std::map<uint64_t, Localizer::Transform> GetTargetLocations(
50 const Constants &constants) {
51 CHECK(constants.has_common());
52 CHECK(constants.common()->has_target_map());
53 CHECK(constants.common()->target_map()->has_target_poses());
54 std::map<uint64_t, Localizer::Transform> transforms;
55 for (const frc971::vision::TargetPoseFbs *target :
56 *constants.common()->target_map()->target_poses()) {
57 CHECK(target->has_id());
58 CHECK(target->has_position());
59 CHECK(target->has_orientation());
60 CHECK_EQ(0u, transforms.count(target->id()));
61 transforms[target->id()] = PoseToTransform(target);
62 }
63 return transforms;
64}
65} // namespace
66
67std::array<Localizer::CameraState, Localizer::kNumCameras>
68Localizer::MakeCameras(const Constants &constants, aos::EventLoop *event_loop) {
69 CHECK(constants.has_cameras());
70 std::array<Localizer::CameraState, Localizer::kNumCameras> cameras;
71 for (const CameraConfiguration *camera : *constants.cameras()) {
72 CHECK(camera->has_calibration());
73 const frc971::vision::calibration::CameraCalibration *calibration =
74 camera->calibration();
75 CHECK(!calibration->has_turret_extrinsics())
76 << "The 2024 robot does not have cameras on a turret.";
77 CHECK(calibration->has_node_name());
78 const std::string channel_name =
79 absl::StrFormat("/%s/camera%d", calibration->node_name()->string_view(),
80 calibration->camera_number());
81 const size_t index = CameraIndexForName(channel_name);
82 // We default-construct the extrinsics matrix to all-zeros; use that to
83 // sanity-check whether we have populated the matrix yet or not.
84 CHECK(cameras.at(index).extrinsics.norm() == 0)
85 << "Got multiple calibrations for "
86 << calibration->node_name()->string_view();
87 CHECK(calibration->has_fixed_extrinsics());
88 cameras.at(index).extrinsics =
89 frc971::control_loops::drivetrain::FlatbufferToTransformationMatrix(
90 *calibration->fixed_extrinsics());
91 cameras.at(index).debug_sender =
92 event_loop->MakeSender<VisualizationStatic>(channel_name);
93 }
94 for (const CameraState &camera : cameras) {
95 CHECK(camera.extrinsics.norm() != 0) << "Missing a camera calibration.";
96 }
97 return cameras;
98}
99
100Localizer::Localizer(aos::EventLoop *event_loop)
101 : event_loop_(event_loop),
102 constants_fetcher_(event_loop),
103 dt_config_(
104 frc971::control_loops::drivetrain::DrivetrainConfig<double>::
105 FromFlatbuffer(*CHECK_NOTNULL(
106 constants_fetcher_.constants().common()->drivetrain()))),
107 cameras_(MakeCameras(constants_fetcher_.constants(), event_loop)),
108 target_poses_(GetTargetLocations(constants_fetcher_.constants())),
109 down_estimator_(dt_config_),
110 ekf_(dt_config_),
111 observations_(&ekf_),
112 imu_watcher_(event_loop, dt_config_,
113 y2024::constants::Values::DrivetrainEncoderToMeters(1),
114 std::bind(&Localizer::HandleImu, this, std::placeholders::_1,
115 std::placeholders::_2, std::placeholders::_3,
116 std::placeholders::_4, std::placeholders::_5),
117 frc971::controls::ImuWatcher::TimestampSource::kPi),
118 utils_(event_loop),
119 status_sender_(event_loop->MakeSender<Status>("/localizer")),
120 output_sender_(event_loop->MakeSender<frc971::controls::LocalizerOutput>(
121 "/localizer")),
122 server_statistics_fetcher_(
123 event_loop_->MakeFetcher<aos::message_bridge::ServerStatistics>(
124 "/aos")),
125 client_statistics_fetcher_(
126 event_loop_->MakeFetcher<aos::message_bridge::ClientStatistics>(
127 "/aos")) {
128 if (dt_config_.is_simulated) {
129 down_estimator_.assume_perfect_gravity();
130 }
131
132 for (size_t camera_index = 0; camera_index < kNumCameras; ++camera_index) {
133 const std::string_view channel_name = kDetectionChannels.at(camera_index);
134 const aos::Channel *const channel = CHECK_NOTNULL(
135 event_loop->GetChannel<frc971::vision::TargetMap>(channel_name));
136 event_loop->MakeWatcher(
137 channel_name, [this, channel,
138 camera_index](const frc971::vision::TargetMap &targets) {
139 CHECK(targets.has_target_poses());
140 CHECK(targets.has_monotonic_timestamp_ns());
141 const std::optional<aos::monotonic_clock::duration> clock_offset =
142 utils_.ClockOffset(channel->source_node()->string_view());
143 if (!clock_offset.has_value()) {
144 VLOG(1) << "Rejecting image due to disconnected message bridge at "
145 << event_loop_->monotonic_now();
146 cameras_.at(camera_index)
147 .rejection_counter.IncrementError(
148 RejectionReason::MESSAGE_BRIDGE_DISCONNECTED);
149 return;
150 }
151 const aos::monotonic_clock::time_point orin_capture_time(
152 std::chrono::nanoseconds(targets.monotonic_timestamp_ns()) -
153 clock_offset.value());
154 if (orin_capture_time > event_loop_->context().monotonic_event_time) {
155 VLOG(1) << "Rejecting image due to being from future at "
156 << event_loop_->monotonic_now() << " with timestamp of "
157 << orin_capture_time << " and event time pf "
158 << event_loop_->context().monotonic_event_time;
159 cameras_.at(camera_index)
160 .rejection_counter.IncrementError(
161 RejectionReason::IMAGE_FROM_FUTURE);
162 return;
163 }
164 auto debug_builder =
165 cameras_.at(camera_index).debug_sender.MakeStaticBuilder();
166 auto target_debug_list = debug_builder->add_targets();
167 // The static_length should already be 20.
168 CHECK(target_debug_list->reserve(20));
169 for (const frc971::vision::TargetPoseFbs *target :
170 *targets.target_poses()) {
171 VLOG(1) << "Handling target from " << camera_index;
172 HandleTarget(camera_index, orin_capture_time, *target,
173 target_debug_list->emplace_back());
174 }
175 StatisticsForCamera(cameras_.at(camera_index),
176 debug_builder->add_statistics());
177 debug_builder.CheckOk(debug_builder.Send());
178 SendStatus();
179 });
180 }
181
182 event_loop_->AddPhasedLoop([this](int) { SendOutput(); },
183 std::chrono::milliseconds(20));
184
185 event_loop_->MakeWatcher(
186 "/drivetrain",
187 [this](
188 const frc971::control_loops::drivetrain::LocalizerControl &control) {
189 // This is triggered whenever we need to force the X/Y/(maybe theta)
190 // position of the robot to a particular point---e.g., during pre-match
191 // setup, or when commanded by a button on the driverstation.
192
193 // For some forms of reset, we choose to keep our current yaw estimate
194 // rather than overriding it from the control message.
195 const double theta = control.keep_current_theta()
196 ? ekf_.X_hat(StateIdx::kTheta)
197 : control.theta();
James Kuszmaula28aaa42024-02-27 20:43:44 -0800198 // Encoder values need to be reset based on the current values to ensure
James Kuszmaul313e9ce2024-02-11 17:47:33 -0800199 // that we don't get weird corrections on the next encoder update.
200 const double left_encoder = ekf_.X_hat(StateIdx::kLeftEncoder);
201 const double right_encoder = ekf_.X_hat(StateIdx::kRightEncoder);
202 ekf_.ResetInitialState(
203 t_,
204 (HybridEkf::State() << control.x(), control.y(), theta,
205 left_encoder, 0, right_encoder, 0, 0, 0, 0, 0, 0)
206 .finished(),
207 ekf_.P());
208 });
209
210 ekf_.set_ignore_accel(true);
211 // Priority should be lower than the imu reading process, but non-zero.
212 event_loop->SetRuntimeRealtimePriority(10);
213 event_loop->OnRun([this, event_loop]() {
214 ekf_.ResetInitialState(event_loop->monotonic_now(),
215 HybridEkf::State::Zero(), ekf_.P());
216 });
217}
218
219void Localizer::HandleImu(aos::monotonic_clock::time_point /*sample_time_pico*/,
220 aos::monotonic_clock::time_point sample_time_orin,
221 std::optional<Eigen::Vector2d> /*encoders*/,
222 Eigen::Vector3d gyro, Eigen::Vector3d accel) {
223 std::optional<Eigen::Vector2d> encoders = utils_.Encoders(sample_time_orin);
224 last_encoder_readings_ = encoders;
225 // Ignore invalid readings; the HybridEkf will handle it reasonably.
226 if (!encoders.has_value()) {
227 return;
228 }
229 if (t_ == aos::monotonic_clock::min_time) {
230 t_ = sample_time_orin;
231 }
232 if (t_ + 10 * frc971::controls::ImuWatcher::kNominalDt < sample_time_orin) {
233 t_ = sample_time_orin;
234 ++clock_resets_;
235 }
236 const aos::monotonic_clock::duration dt = sample_time_orin - t_;
237 t_ = sample_time_orin;
238 // We don't actually use the down estimator currently, but it's really
239 // convenient for debugging.
240 down_estimator_.Predict(gyro, accel, dt);
241 const double yaw_rate = (dt_config_.imu_transform * gyro)(2);
242 ekf_.UpdateEncodersAndGyro(encoders.value()(0), encoders.value()(1), yaw_rate,
243 utils_.VoltageOrZero(sample_time_orin), accel, t_);
244 SendStatus();
245}
246
247void Localizer::RejectImage(int camera_index, RejectionReason reason,
248 TargetEstimateDebugStatic *builder) {
249 if (builder != nullptr) {
250 builder->set_accepted(false);
251 builder->set_rejection_reason(reason);
252 }
253 cameras_.at(camera_index).rejection_counter.IncrementError(reason);
254}
255
256// Only use april tags present in the target map; this method has also been used
257// (in the past) for ignoring april tags that tend to produce problematic
258// readings.
259bool Localizer::UseAprilTag(uint64_t target_id) {
260 return target_poses_.count(target_id) != 0;
261}
262
263namespace {
264// Converts a camera transformation matrix from treating the +Z axis from
265// pointing straight out the lens to having the +X pointing straight out the
266// lens, with +Z going "up" (i.e., -Y in the normal convention) and +Y going
267// leftwards (i.e., -X in the normal convention).
268Localizer::Transform ZToXCamera(const Localizer::Transform &transform) {
269 return transform *
270 Eigen::Matrix4d{
271 {0, -1, 0, 0}, {0, 0, -1, 0}, {1, 0, 0, 0}, {0, 0, 0, 1}};
272}
273} // namespace
274
275void Localizer::HandleTarget(
276 int camera_index, const aos::monotonic_clock::time_point capture_time,
277 const frc971::vision::TargetPoseFbs &target,
278 TargetEstimateDebugStatic *debug_builder) {
279 ++total_candidate_targets_;
280 ++cameras_.at(camera_index).total_candidate_targets;
281 const uint64_t target_id = target.id();
282
283 if (debug_builder == nullptr) {
284 AOS_LOG(ERROR, "Dropped message from debug vector.");
285 } else {
286 debug_builder->set_camera(camera_index);
287 debug_builder->set_image_age_sec(aos::time::DurationInSeconds(
288 event_loop_->monotonic_now() - capture_time));
289 debug_builder->set_image_monotonic_timestamp_ns(
290 std::chrono::duration_cast<std::chrono::nanoseconds>(
291 capture_time.time_since_epoch())
292 .count());
293 debug_builder->set_april_tag(target_id);
294 }
295 VLOG(2) << aos::FlatbufferToJson(&target);
296 if (!UseAprilTag(target_id)) {
297 VLOG(1) << "Rejecting target due to invalid ID " << target_id;
298 RejectImage(camera_index, RejectionReason::NO_SUCH_TARGET, debug_builder);
299 return;
300 }
301
302 const Transform &H_field_target = target_poses_.at(target_id);
303 const Transform &H_robot_camera = cameras_.at(camera_index).extrinsics;
304
305 const Transform H_camera_target = PoseToTransform(&target);
306
307 // In order to do the EKF correction, we determine the expected state based
308 // on the state at the time the image was captured; however, we insert the
309 // correction update itself at the current time. This is technically not
310 // quite correct, but saves substantial CPU usage & code complexity by
311 // making it so that we don't have to constantly rewind the entire EKF
312 // history.
313 const std::optional<State> state_at_capture =
314 ekf_.LastStateBeforeTime(capture_time);
315
316 if (!state_at_capture.has_value()) {
317 VLOG(1) << "Rejecting image due to being too old.";
318 return RejectImage(camera_index, RejectionReason::IMAGE_TOO_OLD,
319 debug_builder);
320 } else if (target.pose_error() > FLAGS_max_pose_error) {
321 VLOG(1) << "Rejecting target due to high pose error "
322 << target.pose_error();
323 return RejectImage(camera_index, RejectionReason::HIGH_POSE_ERROR,
324 debug_builder);
325 } else if (target.pose_error_ratio() > FLAGS_max_pose_error_ratio) {
326 VLOG(1) << "Rejecting target due to high pose error ratio "
327 << target.pose_error_ratio();
328 return RejectImage(camera_index, RejectionReason::HIGH_POSE_ERROR_RATIO,
329 debug_builder);
330 }
331
332 Corrector corrector(state_at_capture.value(), H_field_target, H_robot_camera,
333 H_camera_target);
334 const double distance_to_target = corrector.observed()(Corrector::kDistance);
335
336 // Heading, distance, skew at 1 meter.
337 Eigen::Matrix<double, 3, 1> noises(0.01, 0.05, 0.05);
338 const double distance_noise_scalar = std::pow(distance_to_target, 2.0);
339 noises(Corrector::kDistance) *= distance_noise_scalar;
340 noises(Corrector::kSkew) *= distance_noise_scalar;
341 // TODO(james): This is leftover from last year; figure out if we want it.
342 // Scale noise by the distortion factor for this detection
343 noises *= (1.0 + FLAGS_distortion_noise_scalar * target.distortion_factor());
344
345 Eigen::Matrix3d R = Eigen::Matrix3d::Zero();
346 R.diagonal() = noises.cwiseAbs2();
347 if (debug_builder != nullptr) {
348 const Eigen::Vector3d camera_position =
349 corrector.observed_camera_pose().abs_pos();
350 debug_builder->set_camera_x(camera_position.x());
351 debug_builder->set_camera_y(camera_position.y());
352 debug_builder->set_camera_theta(
353 corrector.observed_camera_pose().abs_theta());
354 // Calculate the camera-to-robot transformation matrix ignoring the
355 // pitch/roll of the camera.
356 const Transform H_camera_robot_stripped =
357 frc971::control_loops::Pose(ZToXCamera(H_robot_camera))
358 .AsTransformationMatrix()
359 .inverse();
360 const frc971::control_loops::Pose measured_pose(
361 corrector.observed_camera_pose().AsTransformationMatrix() *
362 H_camera_robot_stripped);
363 debug_builder->set_implied_robot_x(measured_pose.rel_pos().x());
364 debug_builder->set_implied_robot_y(measured_pose.rel_pos().y());
365 debug_builder->set_implied_robot_theta(measured_pose.rel_theta());
366
367 Corrector::PopulateMeasurement(corrector.expected(),
368 debug_builder->add_expected_observation());
369 Corrector::PopulateMeasurement(corrector.observed(),
370 debug_builder->add_actual_observation());
371 Corrector::PopulateMeasurement(noises, debug_builder->add_modeled_noise());
372 }
373
374 const double camera_yaw_error =
375 aos::math::NormalizeAngle(corrector.expected_camera_pose().abs_theta() -
376 corrector.observed_camera_pose().abs_theta());
377 constexpr double kDegToRad = M_PI / 180.0;
378
379 const double robot_speed =
380 (state_at_capture.value()(StateIdx::kLeftVelocity) +
381 state_at_capture.value()(StateIdx::kRightVelocity)) /
382 2.0;
383 const double yaw_threshold =
384 (utils_.MaybeInAutonomous() ? FLAGS_max_implied_yaw_error
385 : FLAGS_max_implied_teleop_yaw_error) *
386 kDegToRad;
387
388 if (utils_.MaybeInAutonomous() &&
389 (std::abs(robot_speed) > FLAGS_max_auto_image_robot_speed)) {
390 return RejectImage(camera_index, RejectionReason::ROBOT_TOO_FAST,
391 debug_builder);
392 } else if (std::abs(camera_yaw_error) > yaw_threshold) {
393 return RejectImage(camera_index, RejectionReason::HIGH_IMPLIED_YAW_ERROR,
394 debug_builder);
395 } else if (distance_to_target > FLAGS_max_distance_to_target) {
396 return RejectImage(camera_index, RejectionReason::HIGH_DISTANCE_TO_TARGET,
397 debug_builder);
398 }
399
400 const Input U = ekf_.MostRecentInput();
401 VLOG(1) << "previous state " << ekf_.X_hat().topRows<3>().transpose();
402 const State prior_state = ekf_.X_hat();
403 // For the correction step, instead of passing in the measurement directly,
404 // we pass in (0, 0, 0) as the measurement and then for the expected
405 // measurement (Zhat) we calculate the error between the pose implied by
406 // the camera measurement and the current estimate of the
407 // pose. This doesn't affect any of the math, it just makes the code a bit
408 // more convenient to write given the Correct() interface we already have.
James Kuszmaulf6aa0382024-03-01 19:46:05 -0800409 observations_.CorrectKnownH(Eigen::Vector3d::Zero(), &U, corrector, R, t_);
James Kuszmaul313e9ce2024-02-11 17:47:33 -0800410 ++total_accepted_targets_;
411 ++cameras_.at(camera_index).total_accepted_targets;
412 VLOG(1) << "new state " << ekf_.X_hat().topRows<3>().transpose();
413 if (debug_builder != nullptr) {
414 debug_builder->set_correction_x(ekf_.X_hat()(StateIdx::kX) -
415 prior_state(StateIdx::kX));
416 debug_builder->set_correction_y(ekf_.X_hat()(StateIdx::kY) -
417 prior_state(StateIdx::kY));
418 debug_builder->set_correction_theta(ekf_.X_hat()(StateIdx::kTheta) -
419 prior_state(StateIdx::kTheta));
420 debug_builder->set_accepted(true);
421 }
422}
423
424void Localizer::SendOutput() {
425 auto builder = output_sender_.MakeBuilder();
426 frc971::controls::LocalizerOutput::Builder output_builder =
427 builder.MakeBuilder<frc971::controls::LocalizerOutput>();
428 output_builder.add_monotonic_timestamp_ns(
429 std::chrono::duration_cast<std::chrono::nanoseconds>(
430 event_loop_->context().monotonic_event_time.time_since_epoch())
431 .count());
432 output_builder.add_x(ekf_.X_hat(StateIdx::kX));
433 output_builder.add_y(ekf_.X_hat(StateIdx::kY));
434 output_builder.add_theta(ekf_.X_hat(StateIdx::kTheta));
435 output_builder.add_zeroed(imu_watcher_.zeroer().Zeroed());
436 output_builder.add_image_accepted_count(total_accepted_targets_);
437 const Eigen::Quaterniond &orientation =
438 Eigen::AngleAxis<double>(ekf_.X_hat(StateIdx::kTheta),
439 Eigen::Vector3d::UnitZ()) *
440 down_estimator_.X_hat();
441 frc971::controls::Quaternion quaternion;
442 quaternion.mutate_x(orientation.x());
443 quaternion.mutate_y(orientation.y());
444 quaternion.mutate_z(orientation.z());
445 quaternion.mutate_w(orientation.w());
446 output_builder.add_orientation(&quaternion);
447 server_statistics_fetcher_.Fetch();
448 client_statistics_fetcher_.Fetch();
449
450 bool orins_connected = true;
451
452 if (server_statistics_fetcher_.get()) {
453 for (const auto *orin_server_status :
454 *server_statistics_fetcher_->connections()) {
455 if (orin_server_status->state() ==
456 aos::message_bridge::State::DISCONNECTED) {
457 orins_connected = false;
458 }
459 }
460 }
461
462 if (client_statistics_fetcher_.get()) {
463 for (const auto *pi_client_status :
464 *client_statistics_fetcher_->connections()) {
465 if (pi_client_status->state() ==
466 aos::message_bridge::State::DISCONNECTED) {
467 orins_connected = false;
468 }
469 }
470 }
471
472 // The output message is year-agnostic, and retains "pi" naming for histrocial
473 // reasons.
474 output_builder.add_all_pis_connected(orins_connected);
475 builder.CheckOk(builder.Send(output_builder.Finish()));
476}
477
478flatbuffers::Offset<frc971::control_loops::drivetrain::LocalizerState>
479Localizer::PopulateState(const State &X_hat,
480 flatbuffers::FlatBufferBuilder *fbb) {
481 frc971::control_loops::drivetrain::LocalizerState::Builder builder(*fbb);
482 builder.add_x(X_hat(StateIdx::kX));
483 builder.add_y(X_hat(StateIdx::kY));
484 builder.add_theta(X_hat(StateIdx::kTheta));
485 builder.add_left_velocity(X_hat(StateIdx::kLeftVelocity));
486 builder.add_right_velocity(X_hat(StateIdx::kRightVelocity));
487 builder.add_left_encoder(X_hat(StateIdx::kLeftEncoder));
488 builder.add_right_encoder(X_hat(StateIdx::kRightEncoder));
489 builder.add_left_voltage_error(X_hat(StateIdx::kLeftVoltageError));
490 builder.add_right_voltage_error(X_hat(StateIdx::kRightVoltageError));
491 builder.add_angular_error(X_hat(StateIdx::kAngularError));
492 builder.add_longitudinal_velocity_offset(
493 X_hat(StateIdx::kLongitudinalVelocityOffset));
494 builder.add_lateral_velocity(X_hat(StateIdx::kLateralVelocity));
495 return builder.Finish();
496}
497
498flatbuffers::Offset<ImuStatus> Localizer::PopulateImu(
499 flatbuffers::FlatBufferBuilder *fbb) const {
500 const auto zeroer_offset = imu_watcher_.zeroer().PopulateStatus(fbb);
501 const auto failures_offset = imu_watcher_.PopulateImuFailures(fbb);
502 ImuStatus::Builder builder(*fbb);
503 builder.add_zeroed(imu_watcher_.zeroer().Zeroed());
504 builder.add_faulted_zero(imu_watcher_.zeroer().Faulted());
505 builder.add_zeroing(zeroer_offset);
506 if (imu_watcher_.pico_offset().has_value()) {
507 builder.add_board_offset_ns(imu_watcher_.pico_offset().value().count());
508 builder.add_board_offset_error_ns(imu_watcher_.pico_offset_error().count());
509 }
510 if (last_encoder_readings_.has_value()) {
511 builder.add_left_encoder(last_encoder_readings_.value()(0));
512 builder.add_right_encoder(last_encoder_readings_.value()(1));
513 }
514 builder.add_imu_failures(failures_offset);
515 return builder.Finish();
516}
517
518flatbuffers::Offset<CumulativeStatistics> Localizer::StatisticsForCamera(
519 const CameraState &camera, flatbuffers::FlatBufferBuilder *fbb) {
520 const auto counts_offset = camera.rejection_counter.PopulateCounts(fbb);
521 CumulativeStatistics::Builder stats_builder(*fbb);
522 stats_builder.add_total_accepted(camera.total_accepted_targets);
523 stats_builder.add_total_candidates(camera.total_candidate_targets);
524 stats_builder.add_rejection_reasons(counts_offset);
525 return stats_builder.Finish();
526}
527
528void Localizer::StatisticsForCamera(const CameraState &camera,
529 CumulativeStatisticsStatic *builder) {
530 camera.rejection_counter.PopulateCountsStaticFbs(
531 builder->add_rejection_reasons());
532 builder->set_total_accepted(camera.total_accepted_targets);
533 builder->set_total_candidates(camera.total_candidate_targets);
534}
535
536void Localizer::SendStatus() {
537 auto builder = status_sender_.MakeBuilder();
538 std::array<flatbuffers::Offset<CumulativeStatistics>, kNumCameras>
539 stats_offsets;
540 for (size_t ii = 0; ii < kNumCameras; ++ii) {
541 stats_offsets.at(ii) = StatisticsForCamera(cameras_.at(ii), builder.fbb());
542 }
543 auto stats_offset =
544 builder.fbb()->CreateVector(stats_offsets.data(), stats_offsets.size());
545 auto down_estimator_offset =
546 down_estimator_.PopulateStatus(builder.fbb(), t_);
547 auto imu_offset = PopulateImu(builder.fbb());
548 auto state_offset = PopulateState(ekf_.X_hat(), builder.fbb());
549 Status::Builder status_builder = builder.MakeBuilder<Status>();
550 status_builder.add_state(state_offset);
551 status_builder.add_down_estimator(down_estimator_offset);
552 status_builder.add_imu(imu_offset);
553 status_builder.add_statistics(stats_offset);
554 builder.CheckOk(builder.Send(status_builder.Finish()));
555}
556
557Eigen::Vector3d Localizer::Corrector::HeadingDistanceSkew(
558 const Pose &relative_pose) {
559 const double heading = relative_pose.heading();
560 const double distance = relative_pose.xy_norm();
561 const double skew =
562 ::aos::math::NormalizeAngle(relative_pose.rel_theta() - heading);
563 return {heading, distance, skew};
564}
565
566Localizer::Corrector Localizer::Corrector::CalculateHeadingDistanceSkewH(
567 const State &state_at_capture, const Transform &H_field_target,
568 const Transform &H_robot_camera, const Transform &H_camera_target) {
569 const Transform H_field_camera = H_field_target * H_camera_target.inverse();
570 const Pose expected_robot_pose(
571 {state_at_capture(StateIdx::kX), state_at_capture(StateIdx::kY), 0.0},
572 state_at_capture(StateIdx::kTheta));
573 // Observed position on the field, reduced to just the 2-D pose.
574 const Pose observed_camera(ZToXCamera(H_field_camera));
575 const Pose expected_camera(expected_robot_pose.AsTransformationMatrix() *
576 ZToXCamera(H_robot_camera));
577 const Pose nominal_target(ZToXCamera(H_field_target));
578 const Pose observed_target = nominal_target.Rebase(&observed_camera);
579 const Pose expected_target = nominal_target.Rebase(&expected_camera);
580 return Localizer::Corrector{
581 expected_robot_pose,
582 observed_camera,
583 expected_camera,
584 HeadingDistanceSkew(expected_target),
585 HeadingDistanceSkew(observed_target),
586 frc971::control_loops::drivetrain::HMatrixForCameraHeadingDistanceSkew(
587 nominal_target, observed_camera)};
588}
589
590Localizer::Corrector::Corrector(const State &state_at_capture,
591 const Transform &H_field_target,
592 const Transform &H_robot_camera,
593 const Transform &H_camera_target)
594 : Corrector(CalculateHeadingDistanceSkewH(
595 state_at_capture, H_field_target, H_robot_camera, H_camera_target)) {}
596
597Localizer::Output Localizer::Corrector::H(const State &, const Input &) {
598 return expected_ - observed_;
599}
600
601} // namespace y2024::localizer