blob: 47d0cd7313f073ffc942f67517bea90d40dabd58 [file] [log] [blame]
Austin Schuh70cc9552019-01-21 19:46:48 -08001// Copyright (c) 2013 libmv authors.
2//
3// Permission is hereby granted, free of charge, to any person obtaining a copy
4// of this software and associated documentation files (the "Software"), to
5// deal in the Software without restriction, including without limitation the
6// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7// sell copies of the Software, and to permit persons to whom the Software is
8// furnished to do so, subject to the following conditions:
9//
10// The above copyright notice and this permission notice shall be included in
11// all copies or substantial portions of the Software.
12//
13// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19// IN THE SOFTWARE.
20//
21// Author: mierle@gmail.com (Keir Mierle)
22// sergey.vfx@gmail.com (Sergey Sharybin)
23//
24// This is an example application which contains bundle adjustment code used
25// in the Libmv library and Blender. It reads problems from files passed via
26// the command line and runs the bundle adjuster on the problem.
27//
28// File with problem a binary file, for which it is crucial to know in which
29// order bytes of float values are stored in. This information is provided
30// by a single character in the beginning of the file. There're two possible
31// values of this byte:
32// - V, which means values in the file are stored with big endian type
33// - v, which means values in the file are stored with little endian type
34//
35// The rest of the file contains data in the following order:
36// - Space in which markers' coordinates are stored in
37// - Camera intrinsics
38// - Number of cameras
39// - Cameras
40// - Number of 3D points
41// - 3D points
42// - Number of markers
43// - Markers
44//
45// Markers' space could either be normalized or image (pixels). This is defined
46// by the single character in the file. P means markers in the file is in image
47// space, and N means markers are in normalized space.
48//
49// Camera intrinsics are 8 described by 8 float 8.
50// This values goes in the following order:
51//
52// - Focal length, principal point X, principal point Y, k1, k2, k3, p1, p2
53//
54// Every camera is described by:
55//
56// - Image for which camera belongs to (single 4 bytes integer value).
57// - Column-major camera rotation matrix, 9 float values.
58// - Camera translation, 3-component vector of float values.
59//
60// Image number shall be greater or equal to zero. Order of cameras does not
61// matter and gaps are possible.
62//
63// Every 3D point is decribed by:
64//
65// - Track number point belongs to (single 4 bytes integer value).
66// - 3D position vector, 3-component vector of float values.
67//
68// Track number shall be greater or equal to zero. Order of tracks does not
69// matter and gaps are possible.
70//
71// Finally every marker is described by:
72//
73// - Image marker belongs to single 4 bytes integer value).
74// - Track marker belongs to single 4 bytes integer value).
75// - 2D marker position vector, (two float values).
76//
77// Marker's space is used a default value for refine_intrinsics command line
78// flag. This means if there's no refine_intrinsics flag passed via command
79// line, camera intrinsics will be refined if markers in the problem are
80// stored in image space and camera intrinsics will not be refined if markers
81// are in normalized space.
82//
83// Passing refine_intrinsics command line flag defines explicitly whether
84// refinement of intrinsics will happen. Currently, only none and all
85// intrinsics refinement is supported.
86//
87// There're existing problem files dumped from blender stored in folder
88// ../data/libmv-ba-problems.
89
90#include <cstdio>
91#include <fcntl.h>
92#include <sstream>
93#include <string>
94#include <vector>
95
96#ifdef _MSC_VER
97# include <io.h>
98# define open _open
99# define close _close
100typedef unsigned __int32 uint32_t;
101#else
102#include <stdint.h>
103#include <unistd.h>
104
105// O_BINARY is not defined on unix like platforms, as there is no
106// difference between binary and text files.
107#define O_BINARY 0
108
109#endif
110
111#include "ceres/ceres.h"
112#include "ceres/rotation.h"
113#include "gflags/gflags.h"
114#include "glog/logging.h"
115
116typedef Eigen::Matrix<double, 3, 3> Mat3;
117typedef Eigen::Matrix<double, 6, 1> Vec6;
118typedef Eigen::Vector3d Vec3;
119typedef Eigen::Vector4d Vec4;
120
121using std::vector;
122
123DEFINE_string(input, "", "Input File name");
124DEFINE_string(refine_intrinsics, "", "Camera intrinsics to be refined. "
125 "Options are: none, radial.");
126
127namespace {
128
129// A EuclideanCamera is the location and rotation of the camera
130// viewing an image.
131//
132// image identifies which image this camera represents.
133// R is a 3x3 matrix representing the rotation of the camera.
134// t is a translation vector representing its positions.
135struct EuclideanCamera {
136 EuclideanCamera() : image(-1) {}
137 EuclideanCamera(const EuclideanCamera &c) : image(c.image), R(c.R), t(c.t) {}
138
139 int image;
140 Mat3 R;
141 Vec3 t;
142};
143
144// A Point is the 3D location of a track.
145//
146// track identifies which track this point corresponds to.
147// X represents the 3D position of the track.
148struct EuclideanPoint {
149 EuclideanPoint() : track(-1) {}
150 EuclideanPoint(const EuclideanPoint &p) : track(p.track), X(p.X) {}
151 int track;
152 Vec3 X;
153};
154
155// A Marker is the 2D location of a tracked point in an image.
156//
157// x and y is the position of the marker in pixels from the top left corner
158// in the image identified by an image. All markers for to the same target
159// form a track identified by a common track number.
160struct Marker {
161 int image;
162 int track;
163 double x, y;
164};
165
166// Cameras intrinsics to be bundled.
167//
168// BUNDLE_RADIAL actually implies bundling of k1 and k2 coefficients only,
169// no bundling of k3 is possible at this moment.
170enum BundleIntrinsics {
171 BUNDLE_NO_INTRINSICS = 0,
172 BUNDLE_FOCAL_LENGTH = 1,
173 BUNDLE_PRINCIPAL_POINT = 2,
174 BUNDLE_RADIAL_K1 = 4,
175 BUNDLE_RADIAL_K2 = 8,
176 BUNDLE_RADIAL = 12,
177 BUNDLE_TANGENTIAL_P1 = 16,
178 BUNDLE_TANGENTIAL_P2 = 32,
179 BUNDLE_TANGENTIAL = 48,
180};
181
182// Denotes which blocks to keep constant during bundling.
183// For example it is useful to keep camera translations constant
184// when bundling tripod motions.
185enum BundleConstraints {
186 BUNDLE_NO_CONSTRAINTS = 0,
187 BUNDLE_NO_TRANSLATION = 1,
188};
189
190// The intrinsics need to get combined into a single parameter block; use these
191// enums to index instead of numeric constants.
192enum {
193 OFFSET_FOCAL_LENGTH,
194 OFFSET_PRINCIPAL_POINT_X,
195 OFFSET_PRINCIPAL_POINT_Y,
196 OFFSET_K1,
197 OFFSET_K2,
198 OFFSET_K3,
199 OFFSET_P1,
200 OFFSET_P2,
201};
202
203// Returns a pointer to the camera corresponding to a image.
204EuclideanCamera *CameraForImage(vector<EuclideanCamera> *all_cameras,
205 const int image) {
206 if (image < 0 || image >= all_cameras->size()) {
207 return NULL;
208 }
209 EuclideanCamera *camera = &(*all_cameras)[image];
210 if (camera->image == -1) {
211 return NULL;
212 }
213 return camera;
214}
215
216const EuclideanCamera *CameraForImage(
217 const vector<EuclideanCamera> &all_cameras,
218 const int image) {
219 if (image < 0 || image >= all_cameras.size()) {
220 return NULL;
221 }
222 const EuclideanCamera *camera = &all_cameras[image];
223 if (camera->image == -1) {
224 return NULL;
225 }
226 return camera;
227}
228
229// Returns maximal image number at which marker exists.
230int MaxImage(const vector<Marker> &all_markers) {
231 if (all_markers.size() == 0) {
232 return -1;
233 }
234
235 int max_image = all_markers[0].image;
236 for (int i = 1; i < all_markers.size(); i++) {
237 max_image = std::max(max_image, all_markers[i].image);
238 }
239 return max_image;
240}
241
242// Returns a pointer to the point corresponding to a track.
243EuclideanPoint *PointForTrack(vector<EuclideanPoint> *all_points,
244 const int track) {
245 if (track < 0 || track >= all_points->size()) {
246 return NULL;
247 }
248 EuclideanPoint *point = &(*all_points)[track];
249 if (point->track == -1) {
250 return NULL;
251 }
252 return point;
253}
254
255// Reader of binary file which makes sure possibly needed endian
256// conversion happens when loading values like floats and integers.
257//
258// File's endian type is reading from a first character of file, which
259// could either be V for big endian or v for little endian. This
260// means you need to design file format assuming first character
261// denotes file endianness in this way.
262class EndianAwareFileReader {
263 public:
264 EndianAwareFileReader(void) : file_descriptor_(-1) {
265 // Get an endian type of the host machine.
266 union {
267 unsigned char bytes[4];
268 uint32_t value;
269 } endian_test = { { 0, 1, 2, 3 } };
270 host_endian_type_ = endian_test.value;
271 file_endian_type_ = host_endian_type_;
272 }
273
274 ~EndianAwareFileReader(void) {
275 if (file_descriptor_ > 0) {
276 close(file_descriptor_);
277 }
278 }
279
280 bool OpenFile(const std::string &file_name) {
281 file_descriptor_ = open(file_name.c_str(), O_RDONLY | O_BINARY);
282 if (file_descriptor_ < 0) {
283 return false;
284 }
285 // Get an endian tpye of data in the file.
286 unsigned char file_endian_type_flag = Read<unsigned char>();
287 if (file_endian_type_flag == 'V') {
288 file_endian_type_ = kBigEndian;
289 } else if (file_endian_type_flag == 'v') {
290 file_endian_type_ = kLittleEndian;
291 } else {
292 LOG(FATAL) << "Problem file is stored in unknown endian type.";
293 }
294 return true;
295 }
296
297 // Read value from the file, will switch endian if needed.
298 template <typename T>
299 T Read(void) const {
300 T value;
301 CHECK_GT(read(file_descriptor_, &value, sizeof(value)), 0);
302 // Switch endian type if file contains data in different type
303 // that current machine.
304 if (file_endian_type_ != host_endian_type_) {
305 value = SwitchEndian<T>(value);
306 }
307 return value;
308 }
309 private:
310 static const long int kLittleEndian = 0x03020100ul;
311 static const long int kBigEndian = 0x00010203ul;
312
313 // Switch endian type between big to little.
314 template <typename T>
315 T SwitchEndian(const T value) const {
316 if (sizeof(T) == 4) {
317 unsigned int temp_value = static_cast<unsigned int>(value);
318 return ((temp_value >> 24)) |
319 ((temp_value << 8) & 0x00ff0000) |
320 ((temp_value >> 8) & 0x0000ff00) |
321 ((temp_value << 24));
322 } else if (sizeof(T) == 1) {
323 return value;
324 } else {
325 LOG(FATAL) << "Entered non-implemented part of endian switching function.";
326 }
327 }
328
329 int host_endian_type_;
330 int file_endian_type_;
331 int file_descriptor_;
332};
333
334// Read 3x3 column-major matrix from the file
335void ReadMatrix3x3(const EndianAwareFileReader &file_reader,
336 Mat3 *matrix) {
337 for (int i = 0; i < 9; i++) {
338 (*matrix)(i % 3, i / 3) = file_reader.Read<float>();
339 }
340}
341
342// Read 3-vector from file
343void ReadVector3(const EndianAwareFileReader &file_reader,
344 Vec3 *vector) {
345 for (int i = 0; i < 3; i++) {
346 (*vector)(i) = file_reader.Read<float>();
347 }
348}
349
350// Reads a bundle adjustment problem from the file.
351//
352// file_name denotes from which file to read the problem.
353// camera_intrinsics will contain initial camera intrinsics values.
354//
355// all_cameras is a vector of all reconstructed cameras to be optimized,
356// vector element with number i will contain camera for image i.
357//
358// all_points is a vector of all reconstructed 3D points to be optimized,
359// vector element with number i will contain point for track i.
360//
361// all_markers is a vector of all tracked markers existing in
362// the problem. Only used for reprojection error calculation, stay
363// unchanged during optimization.
364//
365// Returns false if any kind of error happened during
366// reading.
367bool ReadProblemFromFile(const std::string &file_name,
368 double camera_intrinsics[8],
369 vector<EuclideanCamera> *all_cameras,
370 vector<EuclideanPoint> *all_points,
371 bool *is_image_space,
372 vector<Marker> *all_markers) {
373 EndianAwareFileReader file_reader;
374 if (!file_reader.OpenFile(file_name)) {
375 return false;
376 }
377
378 // Read markers' space flag.
379 unsigned char is_image_space_flag = file_reader.Read<unsigned char>();
380 if (is_image_space_flag == 'P') {
381 *is_image_space = true;
382 } else if (is_image_space_flag == 'N') {
383 *is_image_space = false;
384 } else {
385 LOG(FATAL) << "Problem file contains markers stored in unknown space.";
386 }
387
388 // Read camera intrinsics.
389 for (int i = 0; i < 8; i++) {
390 camera_intrinsics[i] = file_reader.Read<float>();
391 }
392
393 // Read all cameras.
394 int number_of_cameras = file_reader.Read<int>();
395 for (int i = 0; i < number_of_cameras; i++) {
396 EuclideanCamera camera;
397
398 camera.image = file_reader.Read<int>();
399 ReadMatrix3x3(file_reader, &camera.R);
400 ReadVector3(file_reader, &camera.t);
401
402 if (camera.image >= all_cameras->size()) {
403 all_cameras->resize(camera.image + 1);
404 }
405
406 (*all_cameras)[camera.image].image = camera.image;
407 (*all_cameras)[camera.image].R = camera.R;
408 (*all_cameras)[camera.image].t = camera.t;
409 }
410
411 LOG(INFO) << "Read " << number_of_cameras << " cameras.";
412
413 // Read all reconstructed 3D points.
414 int number_of_points = file_reader.Read<int>();
415 for (int i = 0; i < number_of_points; i++) {
416 EuclideanPoint point;
417
418 point.track = file_reader.Read<int>();
419 ReadVector3(file_reader, &point.X);
420
421 if (point.track >= all_points->size()) {
422 all_points->resize(point.track + 1);
423 }
424
425 (*all_points)[point.track].track = point.track;
426 (*all_points)[point.track].X = point.X;
427 }
428
429 LOG(INFO) << "Read " << number_of_points << " points.";
430
431 // And finally read all markers.
432 int number_of_markers = file_reader.Read<int>();
433 for (int i = 0; i < number_of_markers; i++) {
434 Marker marker;
435
436 marker.image = file_reader.Read<int>();
437 marker.track = file_reader.Read<int>();
438 marker.x = file_reader.Read<float>();
439 marker.y = file_reader.Read<float>();
440
441 all_markers->push_back(marker);
442 }
443
444 LOG(INFO) << "Read " << number_of_markers << " markers.";
445
446 return true;
447}
448
449// Apply camera intrinsics to the normalized point to get image coordinates.
450// This applies the radial lens distortion to a point which is in normalized
451// camera coordinates (i.e. the principal point is at (0, 0)) to get image
452// coordinates in pixels. Templated for use with autodifferentiation.
453template <typename T>
454inline void ApplyRadialDistortionCameraIntrinsics(const T &focal_length_x,
455 const T &focal_length_y,
456 const T &principal_point_x,
457 const T &principal_point_y,
458 const T &k1,
459 const T &k2,
460 const T &k3,
461 const T &p1,
462 const T &p2,
463 const T &normalized_x,
464 const T &normalized_y,
465 T *image_x,
466 T *image_y) {
467 T x = normalized_x;
468 T y = normalized_y;
469
470 // Apply distortion to the normalized points to get (xd, yd).
471 T r2 = x*x + y*y;
472 T r4 = r2 * r2;
473 T r6 = r4 * r2;
474 T r_coeff = 1.0 + k1 * r2 + k2 * r4 + k3 * r6;
475 T xd = x * r_coeff + 2.0 * p1 * x * y + p2 * (r2 + 2.0 * x * x);
476 T yd = y * r_coeff + 2.0 * p2 * x * y + p1 * (r2 + 2.0 * y * y);
477
478 // Apply focal length and principal point to get the final image coordinates.
479 *image_x = focal_length_x * xd + principal_point_x;
480 *image_y = focal_length_y * yd + principal_point_y;
481}
482
483// Cost functor which computes reprojection error of 3D point X
484// on camera defined by angle-axis rotation and it's translation
485// (which are in the same block due to optimization reasons).
486//
487// This functor uses a radial distortion model.
488struct OpenCVReprojectionError {
489 OpenCVReprojectionError(const double observed_x, const double observed_y)
490 : observed_x(observed_x), observed_y(observed_y) {}
491
492 template <typename T>
493 bool operator()(const T* const intrinsics,
494 const T* const R_t, // Rotation denoted by angle axis
495 // followed with translation
496 const T* const X, // Point coordinates 3x1.
497 T* residuals) const {
498 // Unpack the intrinsics.
499 const T& focal_length = intrinsics[OFFSET_FOCAL_LENGTH];
500 const T& principal_point_x = intrinsics[OFFSET_PRINCIPAL_POINT_X];
501 const T& principal_point_y = intrinsics[OFFSET_PRINCIPAL_POINT_Y];
502 const T& k1 = intrinsics[OFFSET_K1];
503 const T& k2 = intrinsics[OFFSET_K2];
504 const T& k3 = intrinsics[OFFSET_K3];
505 const T& p1 = intrinsics[OFFSET_P1];
506 const T& p2 = intrinsics[OFFSET_P2];
507
508 // Compute projective coordinates: x = RX + t.
509 T x[3];
510
511 ceres::AngleAxisRotatePoint(R_t, X, x);
512 x[0] += R_t[3];
513 x[1] += R_t[4];
514 x[2] += R_t[5];
515
516 // Compute normalized coordinates: x /= x[2].
517 T xn = x[0] / x[2];
518 T yn = x[1] / x[2];
519
520 T predicted_x, predicted_y;
521
522 // Apply distortion to the normalized points to get (xd, yd).
523 // TODO(keir): Do early bailouts for zero distortion; these are expensive
524 // jet operations.
525 ApplyRadialDistortionCameraIntrinsics(focal_length,
526 focal_length,
527 principal_point_x,
528 principal_point_y,
529 k1, k2, k3,
530 p1, p2,
531 xn, yn,
532 &predicted_x,
533 &predicted_y);
534
535 // The error is the difference between the predicted and observed position.
536 residuals[0] = predicted_x - observed_x;
537 residuals[1] = predicted_y - observed_y;
538
539 return true;
540 }
541
542 const double observed_x;
543 const double observed_y;
544};
545
546// Print a message to the log which camera intrinsics are gonna to be optimized.
547void BundleIntrinsicsLogMessage(const int bundle_intrinsics) {
548 if (bundle_intrinsics == BUNDLE_NO_INTRINSICS) {
549 LOG(INFO) << "Bundling only camera positions.";
550 } else {
551 std::string bundling_message = "";
552
553#define APPEND_BUNDLING_INTRINSICS(name, flag) \
554 if (bundle_intrinsics & flag) { \
555 if (!bundling_message.empty()) { \
556 bundling_message += ", "; \
557 } \
558 bundling_message += name; \
559 } (void)0
560
561 APPEND_BUNDLING_INTRINSICS("f", BUNDLE_FOCAL_LENGTH);
562 APPEND_BUNDLING_INTRINSICS("px, py", BUNDLE_PRINCIPAL_POINT);
563 APPEND_BUNDLING_INTRINSICS("k1", BUNDLE_RADIAL_K1);
564 APPEND_BUNDLING_INTRINSICS("k2", BUNDLE_RADIAL_K2);
565 APPEND_BUNDLING_INTRINSICS("p1", BUNDLE_TANGENTIAL_P1);
566 APPEND_BUNDLING_INTRINSICS("p2", BUNDLE_TANGENTIAL_P2);
567
568 LOG(INFO) << "Bundling " << bundling_message << ".";
569 }
570}
571
572// Print a message to the log containing all the camera intriniscs values.
573void PrintCameraIntrinsics(const char *text, const double *camera_intrinsics) {
574 std::ostringstream intrinsics_output;
575
576 intrinsics_output << "f=" << camera_intrinsics[OFFSET_FOCAL_LENGTH];
577
578 intrinsics_output <<
579 " cx=" << camera_intrinsics[OFFSET_PRINCIPAL_POINT_X] <<
580 " cy=" << camera_intrinsics[OFFSET_PRINCIPAL_POINT_Y];
581
582#define APPEND_DISTORTION_COEFFICIENT(name, offset) \
583 { \
584 if (camera_intrinsics[offset] != 0.0) { \
585 intrinsics_output << " " name "=" << camera_intrinsics[offset]; \
586 } \
587 } (void)0
588
589 APPEND_DISTORTION_COEFFICIENT("k1", OFFSET_K1);
590 APPEND_DISTORTION_COEFFICIENT("k2", OFFSET_K2);
591 APPEND_DISTORTION_COEFFICIENT("k3", OFFSET_K3);
592 APPEND_DISTORTION_COEFFICIENT("p1", OFFSET_P1);
593 APPEND_DISTORTION_COEFFICIENT("p2", OFFSET_P2);
594
595#undef APPEND_DISTORTION_COEFFICIENT
596
597 LOG(INFO) << text << intrinsics_output.str();
598}
599
600// Get a vector of camera's rotations denoted by angle axis
601// conjuncted with translations into single block
602//
603// Element with index i matches to a rotation+translation for
604// camera at image i.
605vector<Vec6> PackCamerasRotationAndTranslation(
606 const vector<Marker> &all_markers,
607 const vector<EuclideanCamera> &all_cameras) {
608 vector<Vec6> all_cameras_R_t;
609 int max_image = MaxImage(all_markers);
610
611 all_cameras_R_t.resize(max_image + 1);
612
613 for (int i = 0; i <= max_image; i++) {
614 const EuclideanCamera *camera = CameraForImage(all_cameras, i);
615
616 if (!camera) {
617 continue;
618 }
619
620 ceres::RotationMatrixToAngleAxis(&camera->R(0, 0),
621 &all_cameras_R_t[i](0));
622 all_cameras_R_t[i].tail<3>() = camera->t;
623 }
624
625 return all_cameras_R_t;
626}
627
628// Convert cameras rotations fro mangle axis back to rotation matrix.
629void UnpackCamerasRotationAndTranslation(
630 const vector<Marker> &all_markers,
631 const vector<Vec6> &all_cameras_R_t,
632 vector<EuclideanCamera> *all_cameras) {
633 int max_image = MaxImage(all_markers);
634
635 for (int i = 0; i <= max_image; i++) {
636 EuclideanCamera *camera = CameraForImage(all_cameras, i);
637
638 if (!camera) {
639 continue;
640 }
641
642 ceres::AngleAxisToRotationMatrix(&all_cameras_R_t[i](0),
643 &camera->R(0, 0));
644 camera->t = all_cameras_R_t[i].tail<3>();
645 }
646}
647
648void EuclideanBundleCommonIntrinsics(const vector<Marker> &all_markers,
649 const int bundle_intrinsics,
650 const int bundle_constraints,
651 double *camera_intrinsics,
652 vector<EuclideanCamera> *all_cameras,
653 vector<EuclideanPoint> *all_points) {
654 PrintCameraIntrinsics("Original intrinsics: ", camera_intrinsics);
655
656 ceres::Problem::Options problem_options;
657 ceres::Problem problem(problem_options);
658
659 // Convert cameras rotations to angle axis and merge with translation
660 // into single parameter block for maximal minimization speed
661 //
662 // Block for minimization has got the following structure:
663 // <3 elements for angle-axis> <3 elements for translation>
664 vector<Vec6> all_cameras_R_t =
665 PackCamerasRotationAndTranslation(all_markers, *all_cameras);
666
667 // Parameterization used to restrict camera motion for modal solvers.
668 ceres::SubsetParameterization *constant_transform_parameterization = NULL;
669 if (bundle_constraints & BUNDLE_NO_TRANSLATION) {
670 std::vector<int> constant_translation;
671
672 // First three elements are rotation, last three are translation.
673 constant_translation.push_back(3);
674 constant_translation.push_back(4);
675 constant_translation.push_back(5);
676
677 constant_transform_parameterization =
678 new ceres::SubsetParameterization(6, constant_translation);
679 }
680
681 int num_residuals = 0;
682 bool have_locked_camera = false;
683 for (int i = 0; i < all_markers.size(); ++i) {
684 const Marker &marker = all_markers[i];
685 EuclideanCamera *camera = CameraForImage(all_cameras, marker.image);
686 EuclideanPoint *point = PointForTrack(all_points, marker.track);
687 if (camera == NULL || point == NULL) {
688 continue;
689 }
690
691 // Rotation of camera denoted in angle axis followed with
692 // camera translaiton.
693 double *current_camera_R_t = &all_cameras_R_t[camera->image](0);
694
695 problem.AddResidualBlock(new ceres::AutoDiffCostFunction<
696 OpenCVReprojectionError, 2, 8, 6, 3>(
697 new OpenCVReprojectionError(
698 marker.x,
699 marker.y)),
700 NULL,
701 camera_intrinsics,
702 current_camera_R_t,
703 &point->X(0));
704
705 // We lock the first camera to better deal with scene orientation ambiguity.
706 if (!have_locked_camera) {
707 problem.SetParameterBlockConstant(current_camera_R_t);
708 have_locked_camera = true;
709 }
710
711 if (bundle_constraints & BUNDLE_NO_TRANSLATION) {
712 problem.SetParameterization(current_camera_R_t,
713 constant_transform_parameterization);
714 }
715
716 num_residuals++;
717 }
718 LOG(INFO) << "Number of residuals: " << num_residuals;
719
720 if (!num_residuals) {
721 LOG(INFO) << "Skipping running minimizer with zero residuals";
722 return;
723 }
724
725 BundleIntrinsicsLogMessage(bundle_intrinsics);
726
727 if (bundle_intrinsics == BUNDLE_NO_INTRINSICS) {
728 // No camera intrinsics are being refined,
729 // set the whole parameter block as constant for best performance.
730 problem.SetParameterBlockConstant(camera_intrinsics);
731 } else {
732 // Set the camera intrinsics that are not to be bundled as
733 // constant using some macro trickery.
734
735 std::vector<int> constant_intrinsics;
736#define MAYBE_SET_CONSTANT(bundle_enum, offset) \
737 if (!(bundle_intrinsics & bundle_enum)) { \
738 constant_intrinsics.push_back(offset); \
739 }
740 MAYBE_SET_CONSTANT(BUNDLE_FOCAL_LENGTH, OFFSET_FOCAL_LENGTH);
741 MAYBE_SET_CONSTANT(BUNDLE_PRINCIPAL_POINT, OFFSET_PRINCIPAL_POINT_X);
742 MAYBE_SET_CONSTANT(BUNDLE_PRINCIPAL_POINT, OFFSET_PRINCIPAL_POINT_Y);
743 MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K1, OFFSET_K1);
744 MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K2, OFFSET_K2);
745 MAYBE_SET_CONSTANT(BUNDLE_TANGENTIAL_P1, OFFSET_P1);
746 MAYBE_SET_CONSTANT(BUNDLE_TANGENTIAL_P2, OFFSET_P2);
747#undef MAYBE_SET_CONSTANT
748
749 // Always set K3 constant, it's not used at the moment.
750 constant_intrinsics.push_back(OFFSET_K3);
751
752 ceres::SubsetParameterization *subset_parameterization =
753 new ceres::SubsetParameterization(8, constant_intrinsics);
754
755 problem.SetParameterization(camera_intrinsics, subset_parameterization);
756 }
757
758 // Configure the solver.
759 ceres::Solver::Options options;
760 options.use_nonmonotonic_steps = true;
761 options.preconditioner_type = ceres::SCHUR_JACOBI;
762 options.linear_solver_type = ceres::ITERATIVE_SCHUR;
763 options.use_inner_iterations = true;
764 options.max_num_iterations = 100;
765 options.minimizer_progress_to_stdout = true;
766
767 // Solve!
768 ceres::Solver::Summary summary;
769 ceres::Solve(options, &problem, &summary);
770
771 std::cout << "Final report:\n" << summary.FullReport();
772
773 // Copy rotations and translations back.
774 UnpackCamerasRotationAndTranslation(all_markers,
775 all_cameras_R_t,
776 all_cameras);
777
778 PrintCameraIntrinsics("Final intrinsics: ", camera_intrinsics);
779}
780} // namespace
781
782int main(int argc, char **argv) {
783 CERES_GFLAGS_NAMESPACE::ParseCommandLineFlags(&argc, &argv, true);
784 google::InitGoogleLogging(argv[0]);
785
786 if (FLAGS_input.empty()) {
787 LOG(ERROR) << "Usage: libmv_bundle_adjuster --input=blender_problem";
788 return EXIT_FAILURE;
789 }
790
791 double camera_intrinsics[8];
792 vector<EuclideanCamera> all_cameras;
793 vector<EuclideanPoint> all_points;
794 bool is_image_space;
795 vector<Marker> all_markers;
796
797 if (!ReadProblemFromFile(FLAGS_input,
798 camera_intrinsics,
799 &all_cameras,
800 &all_points,
801 &is_image_space,
802 &all_markers)) {
803 LOG(ERROR) << "Error reading problem file";
804 return EXIT_FAILURE;
805 }
806
807 // If there's no refine_intrinsics passed via command line
808 // (in this case FLAGS_refine_intrinsics will be an empty string)
809 // we use problem's settings to detect whether intrinsics
810 // shall be refined or not.
811 //
812 // Namely, if problem has got markers stored in image (pixel)
813 // space, we do full intrinsics refinement. If markers are
814 // stored in normalized space, and refine_intrinsics is not
815 // set, no refining will happen.
816 //
817 // Using command line argument refine_intrinsics will explicitly
818 // declare which intrinsics need to be refined and in this case
819 // refining flags does not depend on problem at all.
820 int bundle_intrinsics = BUNDLE_NO_INTRINSICS;
821 if (FLAGS_refine_intrinsics.empty()) {
822 if (is_image_space) {
823 bundle_intrinsics = BUNDLE_FOCAL_LENGTH | BUNDLE_RADIAL;
824 }
825 } else {
826 if (FLAGS_refine_intrinsics == "radial") {
827 bundle_intrinsics = BUNDLE_FOCAL_LENGTH | BUNDLE_RADIAL;
828 } else if (FLAGS_refine_intrinsics != "none") {
829 LOG(ERROR) << "Unsupported value for refine-intrinsics";
830 return EXIT_FAILURE;
831 }
832 }
833
834 // Run the bundler.
835 EuclideanBundleCommonIntrinsics(all_markers,
836 bundle_intrinsics,
837 BUNDLE_NO_CONSTRAINTS,
838 camera_intrinsics,
839 &all_cameras,
840 &all_points);
841
842 return EXIT_SUCCESS;
843}