blob: c55ccc9a2434d31735ec9dcdd4a3bdc0377d6b02 [file] [log] [blame]
James Kuszmaul1057ce82019-02-09 17:58:24 -08001#ifndef Y2019_CONTROL_LOOPS_DRIVETRAIN_LOCALIZATER_H_
2#define Y2019_CONTROL_LOOPS_DRIVETRAIN_LOCALIZATER_H_
3
4#include <cmath>
5#include <memory>
6
James Kuszmaulf4ede202020-02-14 08:47:40 -08007#include "frc971/control_loops/drivetrain/camera.h"
James Kuszmaul1057ce82019-02-09 17:58:24 -08008#include "frc971/control_loops/drivetrain/hybrid_ekf.h"
James Kuszmaul2971b5a2023-01-29 15:49:32 -08009#include "frc971/control_loops/pose.h"
James Kuszmaul1057ce82019-02-09 17:58:24 -080010
11namespace y2019 {
12namespace control_loops {
13
14template <int num_cameras, int num_targets, int num_obstacles,
15 int max_targets_per_frame, typename Scalar = double>
16class TypedLocalizer
17 : public ::frc971::control_loops::drivetrain::HybridEkf<Scalar> {
18 public:
James Kuszmaulf4ede202020-02-14 08:47:40 -080019 typedef frc971::control_loops::TypedCamera<num_targets, num_obstacles, Scalar>
20 Camera;
James Kuszmaul1057ce82019-02-09 17:58:24 -080021 typedef typename Camera::TargetView TargetView;
22 typedef typename Camera::Pose Pose;
James Kuszmaulf4ede202020-02-14 08:47:40 -080023 typedef typename frc971::control_loops::Target Target;
James Kuszmaul1057ce82019-02-09 17:58:24 -080024 typedef ::frc971::control_loops::drivetrain::HybridEkf<Scalar> HybridEkf;
25 typedef typename HybridEkf::State State;
26 typedef typename HybridEkf::StateSquare StateSquare;
27 typedef typename HybridEkf::Input Input;
28 typedef typename HybridEkf::Output Output;
29 using HybridEkf::kNInputs;
30 using HybridEkf::kNOutputs;
31 using HybridEkf::kNStates;
32
33 // robot_pose should be the object that is used by the cameras, such that when
34 // we update robot_pose, the cameras will change what they report the relative
35 // position of the targets as.
36 // Note that the parameters for the cameras should be set to allow slightly
37 // larger fields of view and slightly longer range than the true cameras so
38 // that we can identify potential matches for targets even when we have slight
39 // modelling errors.
40 TypedLocalizer(
41 const ::frc971::control_loops::drivetrain::DrivetrainConfig<Scalar>
42 &dt_config,
43 Pose *robot_pose)
44 : ::frc971::control_loops::drivetrain::HybridEkf<Scalar>(dt_config),
James Kuszmaul2971b5a2023-01-29 15:49:32 -080045 robot_pose_(robot_pose),
46 h_queue_(this),
47 make_h_queue_(this) {}
James Kuszmaul1057ce82019-02-09 17:58:24 -080048
49 // Performs a kalman filter correction with a single camera frame, consisting
50 // of up to max_targets_per_frame targets and taken at time t.
51 // camera is the Camera used to take the images.
52 void UpdateTargets(
53 const Camera &camera,
54 const ::aos::SizedArray<TargetView, max_targets_per_frame> &targets,
55 ::aos::monotonic_clock::time_point t) {
56 if (targets.empty()) {
57 return;
58 }
59
James Kuszmaul6f941b72019-03-08 18:12:25 -080060 if (!SanitizeTargets(targets)) {
Austin Schuhf257f3c2019-10-27 21:00:43 -070061 AOS_LOG(ERROR, "Throwing out targets due to in insane values.\n");
James Kuszmaul6f941b72019-03-08 18:12:25 -080062 return;
63 }
64
James Kuszmaul1057ce82019-02-09 17:58:24 -080065 if (t > HybridEkf::latest_t()) {
Austin Schuhf257f3c2019-10-27 21:00:43 -070066 AOS_LOG(ERROR,
67 "target observations must be older than most recent encoder/gyro "
68 "update.\n");
James Kuszmaul1057ce82019-02-09 17:58:24 -080069 return;
70 }
71
72 Output z;
73 Eigen::Matrix<Scalar, kNOutputs, kNOutputs> R;
74 TargetViewToMatrices(targets[0], &z, &R);
75
76 // In order to perform the correction steps for the targets, we will
77 // separately perform a Correct step for each following target.
78 // This way, we can have the first correction figure out the mappings
79 // between targets in the image and targets on the field, and then re-use
80 // those mappings for all the remaining corrections.
81 // As such, we need to store the EKF functions that the remaining targets
82 // will need in arrays:
83 ::aos::SizedArray<::std::function<Output(const State &, const Input &)>,
James Kuszmaul2971b5a2023-01-29 15:49:32 -080084 max_targets_per_frame>
85 h_functions;
James Kuszmaul1057ce82019-02-09 17:58:24 -080086 ::aos::SizedArray<::std::function<Eigen::Matrix<Scalar, kNOutputs,
87 kNStates>(const State &)>,
James Kuszmaul2971b5a2023-01-29 15:49:32 -080088 max_targets_per_frame>
89 dhdx_functions;
90 make_h_queue_.CorrectKnownHBuilder(
James Kuszmaul1057ce82019-02-09 17:58:24 -080091 z, nullptr,
James Kuszmaul2971b5a2023-01-29 15:49:32 -080092 ExpectedObservationBuilder(this, camera, targets, &h_functions,
93 &dhdx_functions),
94 R, t);
James Kuszmaul1057ce82019-02-09 17:58:24 -080095 // Fetch cache:
96 for (size_t ii = 1; ii < targets.size(); ++ii) {
97 TargetViewToMatrices(targets[ii], &z, &R);
James Kuszmaul2971b5a2023-01-29 15:49:32 -080098 h_queue_.CorrectKnownH(
99 z, nullptr,
100 ExpectedObservationFunctor(h_functions[ii], dhdx_functions[ii]), R,
101 t);
James Kuszmaul1057ce82019-02-09 17:58:24 -0800102 }
103 }
104
105 private:
James Kuszmaul2971b5a2023-01-29 15:49:32 -0800106 class ExpectedObservationFunctor
107 : public HybridEkf::ExpectedObservationFunctor {
108 public:
109 ExpectedObservationFunctor(
110 ::std::function<Output(const State &, const Input &)> h,
111 ::std::function<
112 Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
113 dhdx)
114 : h_(h), dhdx_(dhdx) {}
115
116 Output H(const State &state, const Input &input) final {
117 return h_(state, input);
118 }
119
120 virtual Eigen::Matrix<Scalar, kNOutputs, kNStates> DHDX(
121 const State &state) final {
122 return dhdx_(state);
123 }
124
125 private:
126 ::std::function<Output(const State &, const Input &)> h_;
127 ::std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
128 dhdx_;
129 };
130 class ExpectedObservationBuilder
131 : public HybridEkf::ExpectedObservationBuilder {
132 public:
133 ExpectedObservationBuilder(
134 TypedLocalizer *localizer, const Camera &camera,
135 const ::aos::SizedArray<TargetView, max_targets_per_frame>
136 &target_views,
137 ::aos::SizedArray<::std::function<Output(const State &, const Input &)>,
138 max_targets_per_frame> *h_functions,
139 ::aos::SizedArray<::std::function<Eigen::Matrix<
140 Scalar, kNOutputs, kNStates>(const State &)>,
141 max_targets_per_frame> *dhdx_functions)
142 : localizer_(localizer),
143 camera_(camera),
144 target_views_(target_views),
145 h_functions_(h_functions),
146 dhdx_functions_(dhdx_functions) {}
147
148 virtual ExpectedObservationFunctor *MakeExpectedObservations(
149 const State &state, const StateSquare &P) {
150 ::std::function<Output(const State &, const Input &)> h;
151 ::std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
152 dhdx;
153 localizer_->MakeH(camera_, target_views_, h_functions_, dhdx_functions_,
154 state, P, &h, &dhdx);
155 functor_.emplace(h, dhdx);
156 return &functor_.value();
157 }
158
159 private:
160 TypedLocalizer *localizer_;
161 const Camera &camera_;
162 const ::aos::SizedArray<TargetView, max_targets_per_frame> &target_views_;
163 ::aos::SizedArray<::std::function<Output(const State &, const Input &)>,
164 max_targets_per_frame> *h_functions_;
165 ::aos::SizedArray<::std::function<Eigen::Matrix<Scalar, kNOutputs,
166 kNStates>(const State &)>,
167 max_targets_per_frame> *dhdx_functions_;
168 std::optional<ExpectedObservationFunctor> functor_;
169 };
James Kuszmaul1057ce82019-02-09 17:58:24 -0800170 // The threshold to use for completely rejecting potentially bad target
171 // matches.
172 // TODO(james): Tune
Austin Schuh113a85d2019-03-28 17:18:08 -0700173 static constexpr Scalar kRejectionScore = 1.0;
James Kuszmaul1057ce82019-02-09 17:58:24 -0800174
James Kuszmaul6f941b72019-03-08 18:12:25 -0800175 // Checks that the targets coming in make some sense--mostly to prevent NaNs
176 // or the such from propagating.
177 bool SanitizeTargets(
178 const ::aos::SizedArray<TargetView, max_targets_per_frame> &targets) {
179 for (const TargetView &view : targets) {
180 const typename TargetView::Reading reading = view.reading;
181 if (!(::std::isfinite(reading.heading) &&
182 ::std::isfinite(reading.distance) &&
183 ::std::isfinite(reading.skew) && ::std::isfinite(reading.height))) {
Austin Schuhf257f3c2019-10-27 21:00:43 -0700184 AOS_LOG(ERROR, "Got non-finite values in target.\n");
James Kuszmaul6f941b72019-03-08 18:12:25 -0800185 return false;
186 }
187 if (reading.distance < 0) {
Austin Schuhf257f3c2019-10-27 21:00:43 -0700188 AOS_LOG(ERROR, "Got negative distance.\n");
James Kuszmaul6f941b72019-03-08 18:12:25 -0800189 return false;
190 }
191 if (::std::abs(::aos::math::NormalizeAngle(reading.skew)) > M_PI_2) {
Austin Schuhf257f3c2019-10-27 21:00:43 -0700192 AOS_LOG(ERROR, "Got skew > pi / 2.\n");
James Kuszmaul6f941b72019-03-08 18:12:25 -0800193 return false;
194 }
195 }
196 return true;
197 }
198
James Kuszmaul1057ce82019-02-09 17:58:24 -0800199 // Computes the measurement (z) and noise covariance (R) matrices for a given
200 // TargetView.
201 void TargetViewToMatrices(const TargetView &view, Output *z,
202 Eigen::Matrix<Scalar, kNOutputs, kNOutputs> *R) {
James Kuszmaul6f941b72019-03-08 18:12:25 -0800203 *z << view.reading.heading, view.reading.distance,
204 ::aos::math::NormalizeAngle(view.reading.skew);
James Kuszmaul1057ce82019-02-09 17:58:24 -0800205 // TODO(james): R should account as well for our confidence in the target
206 // matching. However, handling that properly requires thing a lot more about
207 // the probabilities.
208 R->setZero();
209 R->diagonal() << ::std::pow(view.noise.heading, 2),
210 ::std::pow(view.noise.distance, 2), ::std::pow(view.noise.skew, 2);
211 }
212
213 // This is the function that will be called once the Ekf has inserted the
214 // measurement into the right spot in the measurement queue and needs the
215 // output functions to actually perform the corrections.
216 // Specifically, this will take the estimate of the state at that time and
217 // figure out how the targets seen by the camera best map onto the actual
218 // targets on the field.
219 // It then fills in the h and dhdx functions that are called by the Ekf.
220 void MakeH(
221 const Camera &camera,
222 const ::aos::SizedArray<TargetView, max_targets_per_frame> &target_views,
223 ::aos::SizedArray<::std::function<Output(const State &, const Input &)>,
224 max_targets_per_frame> *h_functions,
225 ::aos::SizedArray<::std::function<Eigen::Matrix<Scalar, kNOutputs,
226 kNStates>(const State &)>,
227 max_targets_per_frame> *dhdx_functions,
228 const State &X_hat, const StateSquare &P,
229 ::std::function<Output(const State &, const Input &)> *h,
James Kuszmaul2971b5a2023-01-29 15:49:32 -0800230 ::std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
231 *dhdx) {
James Kuszmaul1057ce82019-02-09 17:58:24 -0800232 // Because we need to match camera targets ("views") to actual field
233 // targets, and because we want to take advantage of the correlations
234 // between the targets (i.e., if we see two targets in the image, they
235 // probably correspond to different on-field targets), the matching problem
236 // here is somewhat non-trivial. Some of the methods we use only work
237 // because we are dealing with very small N (e.g., handling the correlations
238 // between multiple views has combinatoric complexity, but since N = 3,
239 // it's not an issue).
240 //
241 // High-level steps:
242 // 1) Set the base robot pose for the cameras to the Pose implied by X_hat.
243 // 2) Fetch all the expected target views from the camera.
244 // 3) Determine the "magnitude" of the Kalman correction from each potential
245 // view/target pair.
246 // 4) Match based on the combination of targets with the smallest
247 // corrections.
248 // 5) Calculate h and dhdx for each pair of targets.
249 //
250 // For the "magnitude" of the correction, we do not directly use the
251 // standard Kalman correction formula. Instead, we calculate the correction
252 // we would get from each component of the measurement and take the L2 norm
253 // of those. This prevents situations where a target matches very poorly but
254 // produces an overall correction of near-zero.
255 // TODO(james): I do not know if this is strictly the correct method to
256 // minimize likely error, but should be reasonable.
257 //
258 // For the matching, we do the following (see MatchFrames):
259 // 1. Compute the best max_targets_per_frame matches for each view.
260 // 2. Exhaust every possible combination of view/target pairs and
261 // choose the best one.
262 // When we don't think the camera should be able to see as many targets as
263 // we actually got in the frame, then we do permit doubling/tripling/etc.
264 // up on potential targets once we've exhausted all the targets we think
265 // we can see.
266
267 // Set the current robot pose so that the cameras know where they are
268 // (all the cameras have robot_pose_ as their base):
269 *robot_pose_->mutable_pos() << X_hat(0, 0), X_hat(1, 0), 0.0;
270 robot_pose_->set_theta(X_hat(2, 0));
271
272 // Compute the things we *think* the camera should be seeing.
273 // Note: Because we will not try to match to any targets that are not
274 // returned by this function, we generally want the modelled camera to have
275 // a slightly larger field of view than the real camera, and be able to see
276 // slightly smaller targets.
277 const ::aos::SizedArray<TargetView, num_targets> camera_views =
278 camera.target_views();
279
280 // Each row contains the scores for each pair of target view and camera
281 // target view. Values in each row will not be populated past
282 // camera.target_views().size(); of the rows, only the first
283 // target_views.size() shall be populated.
284 // Higher scores imply a worse match. Zero implies a perfect match.
285 Eigen::Matrix<Scalar, max_targets_per_frame, num_targets> scores;
286 scores.setConstant(::std::numeric_limits<Scalar>::infinity());
287 // Each row contains the indices of the best matches per view, where
288 // index 0 is the best, 1 the second best, and 2 the third, etc.
289 // -1 indicates an unfilled field.
290 Eigen::Matrix<int, max_targets_per_frame, max_targets_per_frame>
291 best_matches;
292 best_matches.setConstant(-1);
293 // The H matrices for each potential matching. This has the same structure
294 // as the scores matrix.
295 ::std::array<::std::array<Eigen::Matrix<Scalar, kNOutputs, kNStates>,
296 max_targets_per_frame>,
James Kuszmaul2971b5a2023-01-29 15:49:32 -0800297 num_targets>
298 all_H_matrices;
James Kuszmaul1057ce82019-02-09 17:58:24 -0800299
300 // Iterate through and fill out the scores for each potential pairing:
301 for (size_t ii = 0; ii < target_views.size(); ++ii) {
302 const TargetView &target_view = target_views[ii];
303 Output z;
304 Eigen::Matrix<Scalar, kNOutputs, kNOutputs> R;
305 TargetViewToMatrices(target_view, &z, &R);
306
307 for (size_t jj = 0; jj < camera_views.size(); ++jj) {
308 // Compute the ckalman update for this step:
309 const TargetView &view = camera_views[jj];
310 const Eigen::Matrix<Scalar, kNOutputs, kNStates> H =
James Kuszmaul46f3a212019-03-10 10:14:24 -0700311 HMatrix(*view.target, camera.pose());
James Kuszmaul1057ce82019-02-09 17:58:24 -0800312 const Eigen::Matrix<Scalar, kNStates, kNOutputs> PH = P * H.transpose();
313 const Eigen::Matrix<Scalar, kNOutputs, kNOutputs> S = H * PH + R;
314 // Note: The inverse here should be very cheap so long as kNOutputs = 3.
315 const Eigen::Matrix<Scalar, kNStates, kNOutputs> K = PH * S.inverse();
316 const Output err = z - Output(view.reading.heading,
317 view.reading.distance, view.reading.skew);
318 // In order to compute the actual score, we want to consider each
319 // component of the error separately, as well as considering the impacts
320 // on the each of the states separately. As such, we calculate what
321 // the separate updates from each error component would be, and sum
322 // the impacts on the states.
323 Output scorer;
324 for (size_t kk = 0; kk < kNOutputs; ++kk) {
325 // TODO(james): squaredNorm or norm or L1-norm? Do we care about the
326 // square root? Do we prefer a quadratic or linear response?
327 scorer(kk, 0) = (K.col(kk) * err(kk, 0)).squaredNorm();
328 }
329 // Compute the overall score--note that we add in a term for the height,
330 // scaled by a manual fudge-factor. The height is not accounted for
331 // in the Kalman update because we are not trying to estimate the height
332 // of the robot directly.
333 Scalar score =
334 scorer.squaredNorm() +
335 ::std::pow((view.reading.height - target_view.reading.height) /
336 target_view.noise.height / 20.0,
337 2);
338 scores(ii, jj) = score;
339 all_H_matrices[ii][jj] = H;
340
341 // Update the best_matches matrix:
342 int insert_target = jj;
343 for (size_t kk = 0; kk < max_targets_per_frame; ++kk) {
344 int idx = best_matches(ii, kk);
345 // Note that -1 indicates an unfilled value.
346 if (idx == -1 || scores(ii, idx) > scores(ii, insert_target)) {
347 best_matches(ii, kk) = insert_target;
348 insert_target = idx;
349 if (idx == -1) {
350 break;
351 }
352 }
353 }
354 }
355 }
356
357 if (camera_views.size() == 0) {
Austin Schuhf257f3c2019-10-27 21:00:43 -0700358 AOS_LOG(DEBUG, "Unable to identify potential target matches.\n");
James Kuszmaul1057ce82019-02-09 17:58:24 -0800359 // If we can't get a match, provide H = zero, which will make this
360 // correction step a nop.
361 *h = [](const State &, const Input &) { return Output::Zero(); };
362 *dhdx = [](const State &) {
363 return Eigen::Matrix<Scalar, kNOutputs, kNStates>::Zero();
364 };
365 for (size_t ii = 0; ii < target_views.size(); ++ii) {
366 h_functions->push_back(*h);
367 dhdx_functions->push_back(*dhdx);
368 }
369 } else {
370 // Go through and brute force the issue of what the best combination of
371 // target matches are. The worst case for this algorithm will be
372 // max_targets_per_frame!, which is awful for any N > ~4, but since
373 // max_targets_per_frame = 3, I'm not really worried.
374 ::std::array<int, max_targets_per_frame> best_frames =
375 MatchFrames(scores, best_matches, target_views.size());
376 for (size_t ii = 0; ii < target_views.size(); ++ii) {
James Kuszmaul6f941b72019-03-08 18:12:25 -0800377 size_t view_idx = best_frames[ii];
James Kuszmaul9776b392023-01-14 14:08:08 -0800378 if (view_idx >= camera_views.size()) {
Austin Schuhf257f3c2019-10-27 21:00:43 -0700379 AOS_LOG(ERROR, "Somehow, the view scorer failed.\n");
James Kuszmaul074429e2019-03-23 16:01:49 -0700380 h_functions->push_back(
381 [](const State &, const Input &) { return Output::Zero(); });
382 dhdx_functions->push_back([](const State &) {
383 return Eigen::Matrix<Scalar, kNOutputs, kNStates>::Zero();
384 });
James Kuszmaul6f941b72019-03-08 18:12:25 -0800385 continue;
386 }
James Kuszmaul1057ce82019-02-09 17:58:24 -0800387 const Eigen::Matrix<Scalar, kNOutputs, kNStates> best_H =
388 all_H_matrices[ii][view_idx];
389 const TargetView best_view = camera_views[view_idx];
390 const TargetView target_view = target_views[ii];
391 const Scalar match_score = scores(ii, view_idx);
392 if (match_score > kRejectionScore) {
Austin Schuhf257f3c2019-10-27 21:00:43 -0700393 AOS_LOG(DEBUG,
394 "Rejecting target at (%f, %f, %f, %f) due to high score.\n",
395 target_view.reading.heading, target_view.reading.distance,
396 target_view.reading.skew, target_view.reading.height);
James Kuszmaul1057ce82019-02-09 17:58:24 -0800397 h_functions->push_back(
398 [](const State &, const Input &) { return Output::Zero(); });
399 dhdx_functions->push_back([](const State &) {
400 return Eigen::Matrix<Scalar, kNOutputs, kNStates>::Zero();
401 });
402 } else {
403 h_functions->push_back([this, &camera, best_view, target_view](
404 const State &X, const Input &) {
405 // This function actually handles determining what the Output should
406 // be at a given state, now that we have chosen the target that
407 // we want to match to.
408 *robot_pose_->mutable_pos() << X(0, 0), X(1, 0), 0.0;
409 robot_pose_->set_theta(X(2, 0));
410 const Pose relative_pose =
411 best_view.target->pose().Rebase(&camera.pose());
412 const Scalar heading = relative_pose.heading();
413 const Scalar distance = relative_pose.xy_norm();
James Kuszmaul289756f2019-03-05 21:52:10 -0800414 const Scalar skew = ::aos::math::NormalizeAngle(
415 relative_pose.rel_theta() - heading);
James Kuszmaul1057ce82019-02-09 17:58:24 -0800416 return Output(heading, distance, skew);
417 });
418
419 // TODO(james): Experiment to better understand whether we want to
420 // recalculate H or not.
421 dhdx_functions->push_back(
422 [best_H](const Eigen::Matrix<Scalar, kNStates, 1> &) {
423 return best_H;
424 });
425 }
426 }
427 *h = h_functions->at(0);
428 *dhdx = dhdx_functions->at(0);
429 }
430 }
431
James Kuszmaul2971b5a2023-01-29 15:49:32 -0800432 Eigen::Matrix<Scalar, kNOutputs, kNStates> HMatrix(const Target &target,
433 const Pose &camera_pose) {
James Kuszmaul1057ce82019-02-09 17:58:24 -0800434 // To calculate dheading/d{x,y,theta}:
435 // heading = arctan2(target_pos - camera_pos) - camera_theta
436 Eigen::Matrix<Scalar, 3, 1> target_pos = target.pose().abs_pos();
James Kuszmaul46f3a212019-03-10 10:14:24 -0700437 Eigen::Matrix<Scalar, 3, 1> camera_pos = camera_pose.abs_pos();
James Kuszmaul1057ce82019-02-09 17:58:24 -0800438 Scalar diffx = target_pos.x() - camera_pos.x();
439 Scalar diffy = target_pos.y() - camera_pos.y();
440 Scalar norm2 = diffx * diffx + diffy * diffy;
441 Scalar dheadingdx = diffy / norm2;
442 Scalar dheadingdy = -diffx / norm2;
443 Scalar dheadingdtheta = -1.0;
444
445 // To calculate ddistance/d{x,y}:
446 // distance = sqrt(diffx^2 + diffy^2)
447 Scalar distance = ::std::sqrt(norm2);
448 Scalar ddistdx = -diffx / distance;
449 Scalar ddistdy = -diffy / distance;
450
James Kuszmaul289756f2019-03-05 21:52:10 -0800451 // Skew = target.theta - camera.theta - heading
452 // = target.theta - arctan2(target_pos - camera_pos)
453 Scalar dskewdx = -dheadingdx;
454 Scalar dskewdy = -dheadingdy;
James Kuszmaul1057ce82019-02-09 17:58:24 -0800455 Eigen::Matrix<Scalar, kNOutputs, kNStates> H;
456 H.setZero();
457 H(0, 0) = dheadingdx;
458 H(0, 1) = dheadingdy;
459 H(0, 2) = dheadingdtheta;
460 H(1, 0) = ddistdx;
461 H(1, 1) = ddistdy;
James Kuszmaul289756f2019-03-05 21:52:10 -0800462 H(2, 0) = dskewdx;
463 H(2, 1) = dskewdy;
James Kuszmaul1057ce82019-02-09 17:58:24 -0800464 return H;
465 }
466
467 // A helper function for the fuller version of MatchFrames; this just
468 // removes some of the arguments that are only needed during the recursion.
469 // n_views is the number of targets actually seen in the camera image (i.e.,
470 // the number of rows in scores/best_matches that are actually populated).
471 ::std::array<int, max_targets_per_frame> MatchFrames(
472 const Eigen::Matrix<Scalar, max_targets_per_frame, num_targets> &scores,
James Kuszmaul2971b5a2023-01-29 15:49:32 -0800473 const Eigen::Matrix<int, max_targets_per_frame, max_targets_per_frame>
474 &best_matches,
James Kuszmaul1057ce82019-02-09 17:58:24 -0800475 int n_views) {
476 ::std::array<int, max_targets_per_frame> best_set;
James Kuszmaul6f941b72019-03-08 18:12:25 -0800477 best_set.fill(-1);
James Kuszmaul1057ce82019-02-09 17:58:24 -0800478 Scalar best_score;
479 // We start out without having "used" any views/targets:
480 ::aos::SizedArray<bool, max_targets_per_frame> used_views;
481 for (int ii = 0; ii < n_views; ++ii) {
482 used_views.push_back(false);
483 }
484 MatchFrames(scores, best_matches, used_views, {{false}}, &best_set,
485 &best_score);
486 return best_set;
487 }
488
489 // Recursively iterates over every plausible combination of targets/views
490 // that there is and determines the lowest-scoring combination.
491 // used_views and used_targets indicate which rows/columns of the
492 // scores/best_matches matrices should be ignored. When used_views is all
493 // true, that means that we are done recursing.
494 void MatchFrames(
495 const Eigen::Matrix<Scalar, max_targets_per_frame, num_targets> &scores,
James Kuszmaul2971b5a2023-01-29 15:49:32 -0800496 const Eigen::Matrix<int, max_targets_per_frame, max_targets_per_frame>
497 &best_matches,
James Kuszmaul1057ce82019-02-09 17:58:24 -0800498 const ::aos::SizedArray<bool, max_targets_per_frame> &used_views,
499 const ::std::array<bool, num_targets> &used_targets,
500 ::std::array<int, max_targets_per_frame> *best_set, Scalar *best_score) {
501 *best_score = ::std::numeric_limits<Scalar>::infinity();
502 // Iterate by letting each target in the camera frame (that isn't in
503 // used_views) choose it's best match that isn't already taken. We then set
504 // the appropriate flags in used_views and used_targets and call MatchFrames
505 // to let all the other views sort themselves out.
506 for (size_t ii = 0; ii < used_views.size(); ++ii) {
507 if (used_views[ii]) {
508 continue;
509 }
510 int best_match = -1;
511 for (size_t jj = 0; jj < max_targets_per_frame; ++jj) {
512 if (best_matches(ii, jj) == -1) {
513 // If we run out of potential targets from the camera, then there
514 // are more targets in the frame than we think there should be.
515 // In this case, we are going to be doubling/tripling/etc. up
516 // anyhow. So we just give everyone their top choice:
517 // TODO(james): If we ever are dealing with larger numbers of
518 // targets per frame, do something to minimize doubling-up.
519 best_match = best_matches(ii, 0);
520 break;
521 }
522 best_match = best_matches(ii, jj);
523 if (!used_targets[best_match]) {
524 break;
525 }
526 }
527 // If we reach here and best_match = -1, that means that no potential
528 // targets were generated by the camera, and we should never have gotten
529 // here.
Austin Schuhf257f3c2019-10-27 21:00:43 -0700530 AOS_CHECK(best_match != -1);
James Kuszmaul1057ce82019-02-09 17:58:24 -0800531 ::aos::SizedArray<bool, max_targets_per_frame> sub_views = used_views;
532 sub_views[ii] = true;
533 ::std::array<bool, num_targets> sub_targets = used_targets;
534 sub_targets[best_match] = true;
535 ::std::array<int, max_targets_per_frame> sub_best_set;
536 Scalar score;
537 MatchFrames(scores, best_matches, sub_views, sub_targets, &sub_best_set,
538 &score);
539 score += scores(ii, best_match);
540 sub_best_set[ii] = best_match;
541 if (score < *best_score) {
542 *best_score = score;
543 *best_set = sub_best_set;
544 }
545 }
546 // best_score will be infinite if we did not find a result due to there
547 // being no targets that weren't set in used_vies; this is the
548 // base case of the recursion and so we set best_score to zero:
549 if (!::std::isfinite(*best_score)) {
550 *best_score = 0.0;
551 }
552 }
553
554 // The pose that is used by the cameras to determine the location of the robot
555 // and thus the expected view of the targets.
556 Pose *robot_pose_;
James Kuszmaul2971b5a2023-01-29 15:49:32 -0800557
558 typename HybridEkf::template ExpectedObservationAllocator<
559 ExpectedObservationFunctor>
560 h_queue_;
561 typename HybridEkf::template ExpectedObservationAllocator<
562 ExpectedObservationBuilder>
563 make_h_queue_;
564
565 friend class ExpectedObservationBuilder;
James Kuszmaul1057ce82019-02-09 17:58:24 -0800566}; // class TypedLocalizer
567
568} // namespace control_loops
569} // namespace y2019
570
571#endif // Y2019_CONTROL_LOOPS_DRIVETRAIN_LOCALIZATER_H_