blob: 627769b3709e57e3a738e68b83cc5ee00db2ee07 [file] [log] [blame]
Austin Schuhdcb6b362022-02-25 18:06:21 -08001#include "frc971/vision/extrinsics_calibration.h"
2
3#include "aos/time/time.h"
4#include "ceres/ceres.h"
5#include "frc971/analysis/in_process_plotter.h"
6#include "frc971/control_loops/runge_kutta.h"
7#include "frc971/vision/calibration_accumulator.h"
8#include "frc971/vision/charuco_lib.h"
9
10namespace frc971 {
11namespace vision {
12
13namespace chrono = std::chrono;
14using aos::distributed_clock;
15using aos::monotonic_clock;
16
17constexpr double kGravity = 9.8;
18
19// The basic ideas here are taken from Kalibr.
20// (https://github.com/ethz-asl/kalibr), but adapted to work with AOS, and to be
21// simpler.
22//
23// Camera readings and IMU readings come in at different times, on different
24// time scales. Our first problem is to align them in time so we can actually
25// compute an error. This is done in the calibration accumulator code. The
26// kalibr paper uses splines, while this uses kalman filters to solve the same
27// interpolation problem so we can get the expected vs actual pose at the time
28// each image arrives.
29//
30// The cost function is then fed the computed angular and positional error for
31// each camera sample before the kalman filter update. Intuitively, the smaller
32// the corrections to the kalman filter each step, the better the estimate
33// should be.
34//
35// We don't actually implement the angular kalman filter because the IMU is so
36// good. We give the solver an initial position and bias, and let it solve from
37// there. This lets us represent drift that is linear in time, which should be
38// good enough for ~1 minute calibration.
39//
40// TODO(austin): Kalman smoother ala
41// https://stanford.edu/~boyd/papers/pdf/auto_ks.pdf should allow for better
42// parallelism, and since we aren't causal, will take that into account a lot
43// better.
44
45// This class takes the initial parameters and biases, and computes the error
46// between the measured and expected camera readings. When optimized, this
47// gives us a cost function to minimize.
48template <typename Scalar>
49class CeresPoseFilter : public CalibrationDataObserver {
50 public:
51 typedef Eigen::Transform<Scalar, 3, Eigen::Affine> Affine3s;
52
53 CeresPoseFilter(Eigen::Quaternion<Scalar> initial_orientation,
54 Eigen::Quaternion<Scalar> imu_to_camera,
55 Eigen::Matrix<Scalar, 3, 1> gyro_bias,
56 Eigen::Matrix<Scalar, 6, 1> initial_state,
57 Eigen::Quaternion<Scalar> board_to_world,
58 Eigen::Matrix<Scalar, 3, 1> imu_to_camera_translation,
59 Scalar gravity_scalar,
60 Eigen::Matrix<Scalar, 3, 1> accelerometer_bias)
61 : accel_(Eigen::Matrix<double, 3, 1>::Zero()),
62 omega_(Eigen::Matrix<double, 3, 1>::Zero()),
63 imu_bias_(gyro_bias),
64 orientation_(initial_orientation),
65 x_hat_(initial_state),
66 p_(Eigen::Matrix<Scalar, 6, 6>::Zero()),
67 imu_to_camera_rotation_(imu_to_camera),
68 imu_to_camera_translation_(imu_to_camera_translation),
69 board_to_world_(board_to_world),
70 gravity_scalar_(gravity_scalar),
71 accelerometer_bias_(accelerometer_bias) {}
72
73 Scalar gravity_scalar() { return gravity_scalar_; }
74
75 virtual void ObserveCameraUpdate(
76 distributed_clock::time_point /*t*/,
77 Eigen::Vector3d /*board_to_camera_rotation*/,
78 Eigen::Quaternion<Scalar> /*imu_to_world_rotation*/,
79 Affine3s /*imu_to_world*/) {}
80
81 // Observes a camera measurement by applying a kalman filter correction and
82 // accumulating up the error associated with the step.
83 void UpdateCamera(distributed_clock::time_point t,
84 std::pair<Eigen::Vector3d, Eigen::Vector3d> rt) override {
85 Integrate(t);
86
87 const Eigen::Quaternion<Scalar> board_to_camera_rotation(
88 frc971::controls::ToQuaternionFromRotationVector(rt.first)
89 .cast<Scalar>());
90 const Affine3s board_to_camera =
91 Eigen::Translation3d(rt.second).cast<Scalar>() *
92 board_to_camera_rotation;
93
94 const Affine3s imu_to_camera =
95 imu_to_camera_translation_ * imu_to_camera_rotation_;
96
97 // This converts us from (facing the board),
98 // x right, y up, z towards us -> x right, y away, z up.
99 // Confirmed to be right.
100
101 // Want world -> imu rotation.
102 // world <- board <- camera <- imu.
103 const Eigen::Quaternion<Scalar> imu_to_world_rotation =
104 board_to_world_ * board_to_camera_rotation.inverse() *
105 imu_to_camera_rotation_;
106
107 const Affine3s imu_to_world =
108 board_to_world_ * board_to_camera.inverse() * imu_to_camera;
109
110 const Eigen::Matrix<Scalar, 3, 1> z =
111 imu_to_world * Eigen::Matrix<Scalar, 3, 1>::Zero();
112
113 Eigen::Matrix<Scalar, 3, 6> H = Eigen::Matrix<Scalar, 3, 6>::Zero();
114 H(0, 0) = static_cast<Scalar>(1.0);
115 H(1, 1) = static_cast<Scalar>(1.0);
116 H(2, 2) = static_cast<Scalar>(1.0);
117 const Eigen::Matrix<Scalar, 3, 1> y = z - H * x_hat_;
118
119 const Eigen::Matrix<double, 3, 3> R =
120 (::Eigen::DiagonalMatrix<double, 3>().diagonal() << ::std::pow(0.01, 2),
121 ::std::pow(0.01, 2), ::std::pow(0.01, 2))
122 .finished()
123 .asDiagonal();
124
125 const Eigen::Matrix<Scalar, 3, 3> S =
126 H * p_ * H.transpose() + R.cast<Scalar>();
127 const Eigen::Matrix<Scalar, 6, 3> K = p_ * H.transpose() * S.inverse();
128
129 x_hat_ += K * y;
130 p_ = (Eigen::Matrix<Scalar, 6, 6>::Identity() - K * H) * p_;
131
132 const Eigen::Quaternion<Scalar> error(imu_to_world_rotation.inverse() *
133 orientation());
134
135 errors_.emplace_back(
136 Eigen::Matrix<Scalar, 3, 1>(error.x(), error.y(), error.z()));
137 position_errors_.emplace_back(y);
138
139 ObserveCameraUpdate(t, rt.first, imu_to_world_rotation, imu_to_world);
140 }
141
142 virtual void ObserveIMUUpdate(
143 distributed_clock::time_point /*t*/,
144 std::pair<Eigen::Vector3d, Eigen::Vector3d> /*wa*/) {}
145
146 void UpdateIMU(distributed_clock::time_point t,
147 std::pair<Eigen::Vector3d, Eigen::Vector3d> wa) override {
148 Integrate(t);
149 omega_ = wa.first;
150 accel_ = wa.second;
151
152 ObserveIMUUpdate(t, wa);
153 }
154
155 const Eigen::Quaternion<Scalar> &orientation() const { return orientation_; }
156
157 size_t num_errors() const { return errors_.size(); }
158 Scalar errorx(size_t i) const { return errors_[i].x(); }
159 Scalar errory(size_t i) const { return errors_[i].y(); }
160 Scalar errorz(size_t i) const { return errors_[i].z(); }
161
162 size_t num_perrors() const { return position_errors_.size(); }
163 Scalar errorpx(size_t i) const { return position_errors_[i].x(); }
164 Scalar errorpy(size_t i) const { return position_errors_[i].y(); }
165 Scalar errorpz(size_t i) const { return position_errors_[i].z(); }
166
167 private:
168 Eigen::Matrix<Scalar, 46, 1> Pack(Eigen::Quaternion<Scalar> q,
169 Eigen::Matrix<Scalar, 6, 1> x_hat,
170 Eigen::Matrix<Scalar, 6, 6> p) {
171 Eigen::Matrix<Scalar, 46, 1> result = Eigen::Matrix<Scalar, 46, 1>::Zero();
172 result.template block<4, 1>(0, 0) = q.coeffs();
173 result.template block<6, 1>(4, 0) = x_hat;
174 result.template block<36, 1>(10, 0) =
175 Eigen::Map<Eigen::Matrix<Scalar, 36, 1>>(p.data(), p.size());
176
177 return result;
178 }
179
180 std::tuple<Eigen::Quaternion<Scalar>, Eigen::Matrix<Scalar, 6, 1>,
181 Eigen::Matrix<Scalar, 6, 6>>
182 UnPack(Eigen::Matrix<Scalar, 46, 1> input) {
183 Eigen::Quaternion<Scalar> q(input.template block<4, 1>(0, 0));
184 Eigen::Matrix<Scalar, 6, 1> x_hat(input.template block<6, 1>(4, 0));
185 Eigen::Matrix<Scalar, 6, 6> p =
186 Eigen::Map<Eigen::Matrix<Scalar, 6, 6>>(input.data() + 10, 6, 6);
187 return std::make_tuple(q, x_hat, p);
188 }
189
190 Eigen::Matrix<Scalar, 46, 1> Derivative(
191 const Eigen::Matrix<Scalar, 46, 1> &input) {
192 auto [q, x_hat, p] = UnPack(input);
193
194 Eigen::Quaternion<Scalar> omega_q;
195 omega_q.w() = Scalar(0.0);
196 omega_q.vec() = 0.5 * (omega_.cast<Scalar>() - imu_bias_);
197 Eigen::Matrix<Scalar, 4, 1> q_dot = (q * omega_q).coeffs();
198
199 Eigen::Matrix<double, 6, 6> A = Eigen::Matrix<double, 6, 6>::Zero();
200 A(0, 3) = 1.0;
201 A(1, 4) = 1.0;
202 A(2, 5) = 1.0;
203
204 Eigen::Matrix<Scalar, 6, 1> x_hat_dot = A * x_hat;
205 x_hat_dot.template block<3, 1>(3, 0) =
206 orientation() * (accel_.cast<Scalar>() - accelerometer_bias_) -
207 Eigen::Vector3d(0, 0, kGravity).cast<Scalar>() * gravity_scalar_;
208
209 // Initialize the position noise to 0. If the solver is going to back-solve
210 // for the most likely starting position, let's just say that the noise is
211 // small.
212 constexpr double kPositionNoise = 0.0;
213 constexpr double kAccelerometerNoise = 2.3e-6 * 9.8;
214 constexpr double kIMUdt = 5.0e-4;
215 Eigen::Matrix<double, 6, 6> Q_dot(
216 (::Eigen::DiagonalMatrix<double, 6>().diagonal()
217 << ::std::pow(kPositionNoise, 2) / kIMUdt,
218 ::std::pow(kPositionNoise, 2) / kIMUdt,
219 ::std::pow(kPositionNoise, 2) / kIMUdt,
220 ::std::pow(kAccelerometerNoise, 2) / kIMUdt,
221 ::std::pow(kAccelerometerNoise, 2) / kIMUdt,
222 ::std::pow(kAccelerometerNoise, 2) / kIMUdt)
223 .finished()
224 .asDiagonal());
225 Eigen::Matrix<Scalar, 6, 6> p_dot = A.cast<Scalar>() * p +
226 p * A.transpose().cast<Scalar>() +
227 Q_dot.cast<Scalar>();
228
229 return Pack(Eigen::Quaternion<Scalar>(q_dot), x_hat_dot, p_dot);
230 }
231
232 virtual void ObserveIntegrated(distributed_clock::time_point /*t*/,
233 Eigen::Matrix<Scalar, 6, 1> /*x_hat*/,
234 Eigen::Quaternion<Scalar> /*orientation*/,
235 Eigen::Matrix<Scalar, 6, 6> /*p*/) {}
236
237 void Integrate(distributed_clock::time_point t) {
238 if (last_time_ != distributed_clock::min_time) {
239 Eigen::Matrix<Scalar, 46, 1> next = control_loops::RungeKutta(
240 [this](auto r) { return Derivative(r); },
241 Pack(orientation_, x_hat_, p_),
242 aos::time::DurationInSeconds(t - last_time_));
243
244 std::tie(orientation_, x_hat_, p_) = UnPack(next);
245
246 // Normalize q so it doesn't drift.
247 orientation_.normalize();
248 }
249
250 last_time_ = t;
251 ObserveIntegrated(t, x_hat_, orientation_, p_);
252 }
253
254 Eigen::Matrix<double, 3, 1> accel_;
255 Eigen::Matrix<double, 3, 1> omega_;
256 Eigen::Matrix<Scalar, 3, 1> imu_bias_;
257
258 // IMU -> world quaternion
259 Eigen::Quaternion<Scalar> orientation_;
260 Eigen::Matrix<Scalar, 6, 1> x_hat_;
261 Eigen::Matrix<Scalar, 6, 6> p_;
262 distributed_clock::time_point last_time_ = distributed_clock::min_time;
263
264 Eigen::Quaternion<Scalar> imu_to_camera_rotation_;
265 Eigen::Translation<Scalar, 3> imu_to_camera_translation_ =
266 Eigen::Translation3d(0, 0, 0).cast<Scalar>();
267
268 Eigen::Quaternion<Scalar> board_to_world_;
269 Scalar gravity_scalar_;
270 Eigen::Matrix<Scalar, 3, 1> accelerometer_bias_;
271 // States:
272 // xyz position
273 // xyz velocity
274 //
275 // Inputs
276 // xyz accel
277 //
278 // Measurement:
279 // xyz position from camera.
280 //
281 // Since the gyro is so good, we can just solve for the bias and initial
282 // position with the solver and see what it learns.
283
284 // Returns the angular errors for each camera sample.
285 std::vector<Eigen::Matrix<Scalar, 3, 1>> errors_;
286 std::vector<Eigen::Matrix<Scalar, 3, 1>> position_errors_;
287};
288
289// Subclass of the filter above which has plotting. This keeps debug code and
290// actual code separate.
291class PoseFilter : public CeresPoseFilter<double> {
292 public:
293 PoseFilter(Eigen::Quaternion<double> initial_orientation,
294 Eigen::Quaternion<double> imu_to_camera,
295 Eigen::Matrix<double, 3, 1> gyro_bias,
296 Eigen::Matrix<double, 6, 1> initial_state,
297 Eigen::Quaternion<double> board_to_world,
298 Eigen::Matrix<double, 3, 1> imu_to_camera_translation,
299 double gravity_scalar,
300 Eigen::Matrix<double, 3, 1> accelerometer_bias)
301 : CeresPoseFilter<double>(initial_orientation, imu_to_camera, gyro_bias,
302 initial_state, board_to_world,
303 imu_to_camera_translation, gravity_scalar,
304 accelerometer_bias) {}
305
306 void Plot() {
307 std::vector<double> rx;
308 std::vector<double> ry;
309 std::vector<double> rz;
310 std::vector<double> x;
311 std::vector<double> y;
312 std::vector<double> z;
313 std::vector<double> vx;
314 std::vector<double> vy;
315 std::vector<double> vz;
316 for (const Eigen::Quaternion<double> &q : orientations_) {
317 Eigen::Matrix<double, 3, 1> rotation_vector =
318 frc971::controls::ToRotationVectorFromQuaternion(q);
319 rx.emplace_back(rotation_vector(0, 0));
320 ry.emplace_back(rotation_vector(1, 0));
321 rz.emplace_back(rotation_vector(2, 0));
322 }
323 for (const Eigen::Matrix<double, 6, 1> &x_hat : x_hats_) {
324 x.emplace_back(x_hat(0));
325 y.emplace_back(x_hat(1));
326 z.emplace_back(x_hat(2));
327 vx.emplace_back(x_hat(3));
328 vy.emplace_back(x_hat(4));
329 vz.emplace_back(x_hat(5));
330 }
331
332 frc971::analysis::Plotter plotter;
333 plotter.AddFigure("position");
334 plotter.AddLine(times_, rx, "x_hat(0)");
335 plotter.AddLine(times_, ry, "x_hat(1)");
336 plotter.AddLine(times_, rz, "x_hat(2)");
337 plotter.AddLine(ct, cx, "Camera x");
338 plotter.AddLine(ct, cy, "Camera y");
339 plotter.AddLine(ct, cz, "Camera z");
340 plotter.AddLine(ct, cerrx, "Camera error x");
341 plotter.AddLine(ct, cerry, "Camera error y");
342 plotter.AddLine(ct, cerrz, "Camera error z");
343 plotter.Publish();
344
345 plotter.AddFigure("error");
346 plotter.AddLine(times_, rx, "x_hat(0)");
347 plotter.AddLine(times_, ry, "x_hat(1)");
348 plotter.AddLine(times_, rz, "x_hat(2)");
349 plotter.AddLine(ct, cerrx, "Camera error x");
350 plotter.AddLine(ct, cerry, "Camera error y");
351 plotter.AddLine(ct, cerrz, "Camera error z");
352 plotter.Publish();
353
354 plotter.AddFigure("imu");
355 plotter.AddLine(ct, world_gravity_x, "world_gravity(0)");
356 plotter.AddLine(ct, world_gravity_y, "world_gravity(1)");
357 plotter.AddLine(ct, world_gravity_z, "world_gravity(2)");
358 plotter.AddLine(imut, imu_x, "imu x");
359 plotter.AddLine(imut, imu_y, "imu y");
360 plotter.AddLine(imut, imu_z, "imu z");
361 plotter.AddLine(times_, rx, "rotation x");
362 plotter.AddLine(times_, ry, "rotation y");
363 plotter.AddLine(times_, rz, "rotation z");
364 plotter.Publish();
365
366 plotter.AddFigure("raw");
367 plotter.AddLine(imut, imu_x, "imu x");
368 plotter.AddLine(imut, imu_y, "imu y");
369 plotter.AddLine(imut, imu_z, "imu z");
370 plotter.AddLine(imut, imu_ratex, "omega x");
371 plotter.AddLine(imut, imu_ratey, "omega y");
372 plotter.AddLine(imut, imu_ratez, "omega z");
373 plotter.AddLine(ct, raw_cx, "Camera x");
374 plotter.AddLine(ct, raw_cy, "Camera y");
375 plotter.AddLine(ct, raw_cz, "Camera z");
376 plotter.Publish();
377
378 plotter.AddFigure("xyz vel");
379 plotter.AddLine(times_, x, "x");
380 plotter.AddLine(times_, y, "y");
381 plotter.AddLine(times_, z, "z");
382 plotter.AddLine(times_, vx, "vx");
383 plotter.AddLine(times_, vy, "vy");
384 plotter.AddLine(times_, vz, "vz");
385 plotter.AddLine(ct, camera_position_x, "Camera x");
386 plotter.AddLine(ct, camera_position_y, "Camera y");
387 plotter.AddLine(ct, camera_position_z, "Camera z");
388 plotter.Publish();
389
390 plotter.Spin();
391 }
392
393 void ObserveIntegrated(distributed_clock::time_point t,
394 Eigen::Matrix<double, 6, 1> x_hat,
395 Eigen::Quaternion<double> orientation,
396 Eigen::Matrix<double, 6, 6> p) override {
397 VLOG(1) << t << " -> " << p;
398 VLOG(1) << t << " xhat -> " << x_hat.transpose();
399 times_.emplace_back(chrono::duration<double>(t.time_since_epoch()).count());
400 x_hats_.emplace_back(x_hat);
401 orientations_.emplace_back(orientation);
402 }
403
404 void ObserveIMUUpdate(
405 distributed_clock::time_point t,
406 std::pair<Eigen::Vector3d, Eigen::Vector3d> wa) override {
407 imut.emplace_back(chrono::duration<double>(t.time_since_epoch()).count());
408 imu_ratex.emplace_back(wa.first.x());
409 imu_ratey.emplace_back(wa.first.y());
410 imu_ratez.emplace_back(wa.first.z());
411 imu_x.emplace_back(wa.second.x());
412 imu_y.emplace_back(wa.second.y());
413 imu_z.emplace_back(wa.second.z());
414
415 last_accel_ = wa.second;
416 }
417
418 void ObserveCameraUpdate(distributed_clock::time_point t,
419 Eigen::Vector3d board_to_camera_rotation,
420 Eigen::Quaternion<double> imu_to_world_rotation,
421 Eigen::Affine3d imu_to_world) override {
422 raw_cx.emplace_back(board_to_camera_rotation(0, 0));
423 raw_cy.emplace_back(board_to_camera_rotation(1, 0));
424 raw_cz.emplace_back(board_to_camera_rotation(2, 0));
425
426 Eigen::Matrix<double, 3, 1> rotation_vector =
427 frc971::controls::ToRotationVectorFromQuaternion(imu_to_world_rotation);
428 ct.emplace_back(chrono::duration<double>(t.time_since_epoch()).count());
429
430 Eigen::Matrix<double, 3, 1> cerr =
431 frc971::controls::ToRotationVectorFromQuaternion(
432 imu_to_world_rotation.inverse() * orientation());
433
434 cx.emplace_back(rotation_vector(0, 0));
435 cy.emplace_back(rotation_vector(1, 0));
436 cz.emplace_back(rotation_vector(2, 0));
437
438 cerrx.emplace_back(cerr(0, 0));
439 cerry.emplace_back(cerr(1, 0));
440 cerrz.emplace_back(cerr(2, 0));
441
442 const Eigen::Vector3d world_gravity =
443 imu_to_world_rotation * last_accel_ -
444 Eigen::Vector3d(0, 0, kGravity) * gravity_scalar();
445
446 const Eigen::Vector3d camera_position =
447 imu_to_world * Eigen::Vector3d::Zero();
448
449 world_gravity_x.emplace_back(world_gravity.x());
450 world_gravity_y.emplace_back(world_gravity.y());
451 world_gravity_z.emplace_back(world_gravity.z());
452
453 camera_position_x.emplace_back(camera_position.x());
454 camera_position_y.emplace_back(camera_position.y());
455 camera_position_z.emplace_back(camera_position.z());
456 }
457
458 std::vector<double> ct;
459 std::vector<double> cx;
460 std::vector<double> cy;
461 std::vector<double> cz;
462 std::vector<double> raw_cx;
463 std::vector<double> raw_cy;
464 std::vector<double> raw_cz;
465 std::vector<double> cerrx;
466 std::vector<double> cerry;
467 std::vector<double> cerrz;
468
469 std::vector<double> world_gravity_x;
470 std::vector<double> world_gravity_y;
471 std::vector<double> world_gravity_z;
472 std::vector<double> imu_x;
473 std::vector<double> imu_y;
474 std::vector<double> imu_z;
475 std::vector<double> camera_position_x;
476 std::vector<double> camera_position_y;
477 std::vector<double> camera_position_z;
478
479 std::vector<double> imut;
480 std::vector<double> imu_ratex;
481 std::vector<double> imu_ratey;
482 std::vector<double> imu_ratez;
483
484 std::vector<double> times_;
485 std::vector<Eigen::Matrix<double, 6, 1>> x_hats_;
486 std::vector<Eigen::Quaternion<double>> orientations_;
487
488 Eigen::Matrix<double, 3, 1> last_accel_ = Eigen::Matrix<double, 3, 1>::Zero();
489};
490
491// Adapter class from the KF above to a Ceres cost function.
492struct CostFunctor {
493 CostFunctor(const CalibrationData *d) : data(d) {}
494
495 const CalibrationData *data;
496
497 template <typename S>
498 bool operator()(const S *const q1, const S *const q2, const S *const v,
499 const S *const p, const S *const btw, const S *const itc,
500 const S *const gravity_scalar_ptr,
501 const S *const accelerometer_bias_ptr, S *residual) const {
502 Eigen::Quaternion<S> initial_orientation(q1[3], q1[0], q1[1], q1[2]);
503 Eigen::Quaternion<S> mounting_orientation(q2[3], q2[0], q2[1], q2[2]);
504 Eigen::Quaternion<S> board_to_world(btw[3], btw[0], btw[1], btw[2]);
505 Eigen::Matrix<S, 3, 1> gyro_bias(v[0], v[1], v[2]);
506 Eigen::Matrix<S, 6, 1> initial_state;
507 initial_state(0) = p[0];
508 initial_state(1) = p[1];
509 initial_state(2) = p[2];
510 initial_state(3) = p[3];
511 initial_state(4) = p[4];
512 initial_state(5) = p[5];
513 Eigen::Matrix<S, 3, 1> imu_to_camera_translation(itc[0], itc[1], itc[2]);
514 Eigen::Matrix<S, 3, 1> accelerometer_bias(accelerometer_bias_ptr[0],
515 accelerometer_bias_ptr[1],
516 accelerometer_bias_ptr[2]);
517
518 CeresPoseFilter<S> filter(initial_orientation, mounting_orientation,
519 gyro_bias, initial_state, board_to_world,
520 imu_to_camera_translation, *gravity_scalar_ptr,
521 accelerometer_bias);
522 data->ReviewData(&filter);
523
524 for (size_t i = 0; i < filter.num_errors(); ++i) {
525 residual[3 * i + 0] = filter.errorx(i);
526 residual[3 * i + 1] = filter.errory(i);
527 residual[3 * i + 2] = filter.errorz(i);
528 }
529
530 for (size_t i = 0; i < filter.num_perrors(); ++i) {
531 residual[3 * filter.num_errors() + 3 * i + 0] = filter.errorpx(i);
532 residual[3 * filter.num_errors() + 3 * i + 1] = filter.errorpy(i);
533 residual[3 * filter.num_errors() + 3 * i + 2] = filter.errorpz(i);
534 }
535
536 return true;
537 }
538};
539
540void Solve(const CalibrationData &data,
541 CalibrationParameters *calibration_parameters) {
542 ceres::Problem problem;
543
544 ceres::EigenQuaternionParameterization *quaternion_local_parameterization =
545 new ceres::EigenQuaternionParameterization();
546 // Set up the only cost function (also known as residual). This uses
547 // auto-differentiation to obtain the derivative (jacobian).
548
549 ceres::CostFunction *cost_function =
550 new ceres::AutoDiffCostFunction<CostFunctor, ceres::DYNAMIC, 4, 4, 3, 6,
551 4, 3, 1, 3>(
552 new CostFunctor(&data), data.camera_samples_size() * 6);
553 problem.AddResidualBlock(
554 cost_function, new ceres::HuberLoss(1.0),
555 calibration_parameters->initial_orientation.coeffs().data(),
556 calibration_parameters->imu_to_camera.coeffs().data(),
557 calibration_parameters->gyro_bias.data(),
558 calibration_parameters->initial_state.data(),
559 calibration_parameters->board_to_world.coeffs().data(),
560 calibration_parameters->imu_to_camera_translation.data(),
561 &calibration_parameters->gravity_scalar,
562 calibration_parameters->accelerometer_bias.data());
563 problem.SetParameterization(
564 calibration_parameters->initial_orientation.coeffs().data(),
565 quaternion_local_parameterization);
566 problem.SetParameterization(
567 calibration_parameters->imu_to_camera.coeffs().data(),
568 quaternion_local_parameterization);
569 problem.SetParameterization(
570 calibration_parameters->board_to_world.coeffs().data(),
571 quaternion_local_parameterization);
572 for (int i = 0; i < 3; ++i) {
573 problem.SetParameterLowerBound(calibration_parameters->gyro_bias.data(), i,
574 -0.05);
575 problem.SetParameterUpperBound(calibration_parameters->gyro_bias.data(), i,
576 0.05);
577 problem.SetParameterLowerBound(
578 calibration_parameters->accelerometer_bias.data(), i, -0.05);
579 problem.SetParameterUpperBound(
580 calibration_parameters->accelerometer_bias.data(), i, 0.05);
581 }
582 problem.SetParameterLowerBound(&calibration_parameters->gravity_scalar, 0,
583 0.95);
584 problem.SetParameterUpperBound(&calibration_parameters->gravity_scalar, 0,
585 1.05);
586
587 // Run the solver!
588 ceres::Solver::Options options;
589 options.minimizer_progress_to_stdout = true;
590 options.gradient_tolerance = 1e-12;
591 options.function_tolerance = 1e-16;
592 options.parameter_tolerance = 1e-12;
593 ceres::Solver::Summary summary;
594 Solve(options, &problem, &summary);
595 LOG(INFO) << summary.FullReport();
596
597 LOG(INFO) << "initial_orientation "
598 << calibration_parameters->initial_orientation.coeffs().transpose();
599 LOG(INFO) << "imu_to_camera "
600 << calibration_parameters->imu_to_camera.coeffs().transpose();
601 LOG(INFO) << "imu_to_camera(rotation) "
602 << frc971::controls::ToRotationVectorFromQuaternion(
603 calibration_parameters->imu_to_camera)
604 .transpose();
605 LOG(INFO) << "gyro_bias " << calibration_parameters->gyro_bias.transpose();
606 LOG(INFO) << "board_to_world "
607 << calibration_parameters->board_to_world.coeffs().transpose();
608 LOG(INFO) << "board_to_world(rotation) "
609 << frc971::controls::ToRotationVectorFromQuaternion(
610 calibration_parameters->board_to_world)
611 .transpose();
612 LOG(INFO) << "imu_to_camera_translation "
613 << calibration_parameters->imu_to_camera_translation.transpose();
614 LOG(INFO) << "gravity " << kGravity * calibration_parameters->gravity_scalar;
615 LOG(INFO) << "accelerometer bias "
616 << calibration_parameters->accelerometer_bias.transpose();
617}
618
619void Plot(const CalibrationData &data,
620 const CalibrationParameters &calibration_parameters) {
621 PoseFilter filter(calibration_parameters.initial_orientation,
622 calibration_parameters.imu_to_camera,
623 calibration_parameters.gyro_bias,
624 calibration_parameters.initial_state,
625 calibration_parameters.board_to_world,
626 calibration_parameters.imu_to_camera_translation,
627 calibration_parameters.gravity_scalar,
628 calibration_parameters.accelerometer_bias);
629 data.ReviewData(&filter);
630 filter.Plot();
631}
632
633} // namespace vision
634} // namespace frc971