Jim Ostrowski | 2f2685f | 2023-03-25 11:57:54 -0700 | [diff] [blame^] | 1 | #include <numeric> |
| 2 | |
| 3 | #include "aos/configuration.h" |
| 4 | #include "aos/events/logging/log_reader.h" |
| 5 | #include "aos/events/simulated_event_loop.h" |
| 6 | #include "aos/init.h" |
| 7 | #include "aos/util/mcap_logger.h" |
| 8 | #include "frc971/control_loops/pose.h" |
| 9 | #include "frc971/control_loops/quaternion_utils.h" |
| 10 | #include "frc971/vision/calibration_generated.h" |
| 11 | #include "frc971/vision/charuco_lib.h" |
| 12 | #include "frc971/vision/target_mapper.h" |
| 13 | #include "frc971/vision/visualize_robot.h" |
| 14 | // clang-format off |
| 15 | // OpenCV eigen files must be included after Eigen includes |
| 16 | #include "opencv2/aruco.hpp" |
| 17 | #include "opencv2/calib3d.hpp" |
| 18 | #include "opencv2/core/eigen.hpp" |
| 19 | #include "opencv2/features2d.hpp" |
| 20 | #include "opencv2/highgui.hpp" |
| 21 | #include "opencv2/highgui/highgui.hpp" |
| 22 | #include "opencv2/imgproc.hpp" |
| 23 | // clang-format on |
| 24 | #include "y2023/constants/simulated_constants_sender.h" |
| 25 | #include "y2023/vision/aprilrobotics.h" |
| 26 | #include "y2023/vision/vision_util.h" |
| 27 | |
| 28 | DEFINE_string(config, "", |
| 29 | "If set, override the log's config file with this one."); |
| 30 | DEFINE_string(constants_path, "y2023/constants/constants.json", |
| 31 | "Path to the constant file"); |
| 32 | DEFINE_string(target_type, "charuco_diamond", |
| 33 | "Type of target being used [aruco, charuco, charuco_diamond]"); |
| 34 | DEFINE_int32(team_number, 0, |
| 35 | "Required: Use the calibration for a node with this team number"); |
| 36 | DEFINE_uint64( |
| 37 | wait_key, 1, |
| 38 | "Time in ms to wait between images (0 to wait indefinitely until click)"); |
| 39 | |
| 40 | // Calibrate extrinsic relationship between cameras using two targets |
| 41 | // seen jointly between cameras. Uses two types of information: 1) |
| 42 | // when a single camera sees two targets, we estimate the pose between |
| 43 | // targets, and 2) when two separate cameras each see a target, we can |
| 44 | // use the pose between targets to estimate the pose between cameras. |
| 45 | |
| 46 | // We then create the extrinsics for the robot by starting with the |
| 47 | // given extrinsic for camera 1 (between imu/robot and camera frames) |
| 48 | // and then map each subsequent camera based on the data collected and |
| 49 | // the extrinsic poses computed here. |
| 50 | |
| 51 | // TODO<Jim>: Should export a new extrinsic file for each camera |
| 52 | // TODO<Jim>: Not currently using estimate from pi1->pi4-- should do full |
| 53 | // estimation, and probably also include camera->imu extrinsics from all |
| 54 | // cameras, not just pi1 |
| 55 | // TODO<Jim>: Should add ability to do this with apriltags, since they're on the |
| 56 | // field |
| 57 | |
| 58 | namespace y2023 { |
| 59 | namespace vision { |
| 60 | using frc971::vision::DataAdapter; |
| 61 | using frc971::vision::ImageCallback; |
| 62 | using frc971::vision::PoseUtils; |
| 63 | using frc971::vision::TargetMap; |
| 64 | using frc971::vision::TargetMapper; |
| 65 | namespace calibration = frc971::vision::calibration; |
| 66 | |
| 67 | static constexpr double kImagePeriodMs = |
| 68 | 1.0 / 30.0 * 1000.0; // Image capture period in ms |
| 69 | |
| 70 | // Change reference frame from camera to robot |
| 71 | Eigen::Affine3d CameraToRobotDetection(Eigen::Affine3d H_camera_target, |
| 72 | Eigen::Affine3d extrinsics) { |
| 73 | const Eigen::Affine3d H_robot_camera = extrinsics; |
| 74 | const Eigen::Affine3d H_robot_target = H_robot_camera * H_camera_target; |
| 75 | return H_robot_target; |
| 76 | } |
| 77 | |
| 78 | struct TimestampedPiDetection { |
| 79 | aos::distributed_clock::time_point time; |
| 80 | // Pose of target relative to robot |
| 81 | Eigen::Affine3d H_camera_target; |
| 82 | // name of pi |
| 83 | std::string pi_name; |
| 84 | int board_id; |
| 85 | }; |
| 86 | |
| 87 | TimestampedPiDetection last_observation; |
| 88 | std::vector<std::pair<TimestampedPiDetection, TimestampedPiDetection>> |
| 89 | detection_list; |
| 90 | std::vector<TimestampedPiDetection> two_board_extrinsics_list; |
| 91 | |
| 92 | // TODO<jim>: Implement variance calcs |
| 93 | Eigen::Affine3d ComputeAveragePose( |
| 94 | std::vector<Eigen::Vector3d> &translation_list, |
| 95 | std::vector<Eigen::Vector4d> &rotation_list, |
| 96 | Eigen::Vector3d *translation_variance = nullptr, |
| 97 | Eigen::Vector3d *rotation_variance = nullptr) { |
| 98 | Eigen::Vector3d avg_translation = |
| 99 | std::accumulate(translation_list.begin(), translation_list.end(), |
| 100 | Eigen::Vector3d{0, 0, 0}) / |
| 101 | translation_list.size(); |
| 102 | |
| 103 | // TODO<Jim>: Use QuaternionMean from quaternion_utils.cc (but this |
| 104 | // requires a fixed number of quaternions to be averaged |
| 105 | Eigen::Vector4d avg_rotation = |
| 106 | std::accumulate(rotation_list.begin(), rotation_list.end(), |
| 107 | Eigen::Vector4d{0, 0, 0, 0}) / |
| 108 | rotation_list.size(); |
| 109 | // Normalize, so it's a valid quaternion |
| 110 | avg_rotation = avg_rotation / avg_rotation.norm(); |
| 111 | Eigen::Quaterniond avg_rotation_q{avg_rotation[0], avg_rotation[1], |
| 112 | avg_rotation[2], avg_rotation[3]}; |
| 113 | Eigen::Translation3d translation(avg_translation); |
| 114 | Eigen::Affine3d return_pose = translation * avg_rotation_q; |
| 115 | if (translation_variance != nullptr) { |
| 116 | *translation_variance = Eigen::Vector3d(); |
| 117 | } |
| 118 | if (rotation_variance != nullptr) { |
| 119 | LOG(INFO) << *rotation_variance; |
| 120 | } |
| 121 | |
| 122 | return return_pose; |
| 123 | } |
| 124 | |
| 125 | Eigen::Affine3d ComputeAveragePose( |
| 126 | std::vector<Eigen::Affine3d> &pose_list, |
| 127 | Eigen::Vector3d *translation_variance = nullptr, |
| 128 | Eigen::Vector3d *rotation_variance = nullptr) { |
| 129 | std::vector<Eigen::Vector3d> translation_list; |
| 130 | std::vector<Eigen::Vector4d> rotation_list; |
| 131 | |
| 132 | for (Eigen::Affine3d pose : pose_list) { |
| 133 | translation_list.push_back(pose.translation()); |
| 134 | Eigen::Quaterniond quat(pose.rotation().matrix()); |
| 135 | rotation_list.push_back( |
| 136 | Eigen::Vector4d(quat.w(), quat.x(), quat.y(), quat.z())); |
| 137 | } |
| 138 | |
| 139 | return ComputeAveragePose(translation_list, rotation_list, |
| 140 | translation_variance, rotation_variance); |
| 141 | } |
| 142 | |
| 143 | Eigen::Affine3d ComputeAveragePose( |
| 144 | std::vector<TimestampedPiDetection> &pose_list, |
| 145 | Eigen::Vector3d *translation_variance = nullptr, |
| 146 | Eigen::Vector3d *rotation_variance = nullptr) { |
| 147 | std::vector<Eigen::Vector3d> translation_list; |
| 148 | std::vector<Eigen::Vector4d> rotation_list; |
| 149 | |
| 150 | for (TimestampedPiDetection pose : pose_list) { |
| 151 | translation_list.push_back(pose.H_camera_target.translation()); |
| 152 | Eigen::Quaterniond quat(pose.H_camera_target.rotation().matrix()); |
| 153 | rotation_list.push_back( |
| 154 | Eigen::Vector4d(quat.w(), quat.x(), quat.y(), quat.z())); |
| 155 | } |
| 156 | return ComputeAveragePose(translation_list, rotation_list, |
| 157 | translation_variance, rotation_variance); |
| 158 | } |
| 159 | |
| 160 | void ProcessImage(aos::EventLoop *event_loop, cv::Mat rgb_image, |
| 161 | const aos::monotonic_clock::time_point eof, |
| 162 | aos::distributed_clock::time_point distributed_eof, |
| 163 | frc971::vision::CharucoExtractor &charuco_extractor, |
| 164 | std::string pi_name) { |
| 165 | std::vector<cv::Vec4i> charuco_ids; |
| 166 | std::vector<std::vector<cv::Point2f>> charuco_corners; |
| 167 | bool valid = false; |
| 168 | std::vector<Eigen::Vector3d> rvecs_eigen; |
| 169 | std::vector<Eigen::Vector3d> tvecs_eigen; |
| 170 | charuco_extractor.ProcessImage(rgb_image, eof, event_loop->monotonic_now(), |
| 171 | charuco_ids, charuco_corners, valid, |
| 172 | rvecs_eigen, tvecs_eigen); |
| 173 | |
| 174 | if (valid) { |
| 175 | if (tvecs_eigen.size() == 2) { |
| 176 | VLOG(2) << "Saw two boards in same view from " << pi_name; |
| 177 | // Handle when we see two boards at once |
| 178 | std::vector<TimestampedPiDetection> detections; |
| 179 | for (uint i = 0; i < tvecs_eigen.size(); i++) { |
| 180 | Eigen::Quaternion<double> rotation( |
| 181 | frc971::controls::ToQuaternionFromRotationVector(rvecs_eigen[i])); |
| 182 | Eigen::Translation3d translation(tvecs_eigen[i]); |
| 183 | Eigen::Affine3d H_camera_board = translation * rotation; |
| 184 | TimestampedPiDetection new_observation{ |
| 185 | .time = distributed_eof, |
| 186 | .H_camera_target = H_camera_board, |
| 187 | .pi_name = pi_name, |
| 188 | .board_id = charuco_ids[i][0]}; |
| 189 | detections.emplace_back(new_observation); |
| 190 | } |
| 191 | Eigen::Affine3d H_boardA_boardB = |
| 192 | detections[0].H_camera_target.inverse() * |
| 193 | detections[1].H_camera_target; |
| 194 | |
| 195 | TimestampedPiDetection board_extrinsic{.time = distributed_eof, |
| 196 | .H_camera_target = H_boardA_boardB, |
| 197 | .pi_name = pi_name, |
| 198 | .board_id = charuco_ids[0][0]}; |
| 199 | |
| 200 | // Make sure we've got them in the right order (sorted by id) |
| 201 | if (detections[0].board_id < detections[1].board_id) { |
| 202 | two_board_extrinsics_list.push_back(board_extrinsic); |
| 203 | } else { |
| 204 | // Flip them around |
| 205 | board_extrinsic.H_camera_target = |
| 206 | board_extrinsic.H_camera_target.inverse(); |
| 207 | board_extrinsic.board_id = charuco_ids[1][0]; |
| 208 | two_board_extrinsics_list.push_back(board_extrinsic); |
| 209 | } |
| 210 | } else { |
| 211 | VLOG(1) << "Saw single board in camera " << pi_name; |
| 212 | Eigen::Quaternion<double> rotation( |
| 213 | frc971::controls::ToQuaternionFromRotationVector(rvecs_eigen[0])); |
| 214 | Eigen::Translation3d translation(tvecs_eigen[0]); |
| 215 | Eigen::Affine3d H_camera_board = translation * rotation; |
| 216 | TimestampedPiDetection new_observation{.time = distributed_eof, |
| 217 | .H_camera_target = H_camera_board, |
| 218 | .pi_name = pi_name, |
| 219 | .board_id = charuco_ids[0][0]}; |
| 220 | |
| 221 | VLOG(2) << "Checking versus last result from " << last_observation.pi_name |
| 222 | << " at time " << last_observation.time << " with delta time = " |
| 223 | << std::abs((distributed_eof - last_observation.time).count()); |
| 224 | // Only take two observations if they're near enough to each other |
| 225 | // in time. This should be within +/- kImagePeriodMs of each other (e.g., |
| 226 | // +/-16.666ms for 30 Hz capture). This should make sure |
| 227 | // we're always getting the closest images, and not miss too many possible |
| 228 | // pairs, between cameras |
| 229 | // TODO<Jim>: Should also check that (rotational) velocity of the robot is |
| 230 | // small |
| 231 | if (std::abs((distributed_eof - last_observation.time).count()) < |
| 232 | static_cast<int>(kImagePeriodMs / 2.0 * 1000000)) { |
| 233 | Eigen::Affine3d H_camera1_board = last_observation.H_camera_target; |
| 234 | Eigen::Affine3d H_camera1_camera2 = |
| 235 | H_camera1_board * H_camera_board.inverse(); |
| 236 | VLOG(1) << "Storing observation between " << last_observation.pi_name |
| 237 | << ", target " << last_observation.board_id << " and " |
| 238 | << new_observation.pi_name << ", target " |
| 239 | << new_observation.board_id << " is " |
| 240 | << H_camera1_camera2.matrix(); |
| 241 | // Sort by pi numbering |
| 242 | if (last_observation.pi_name < new_observation.pi_name) { |
| 243 | detection_list.push_back( |
| 244 | std::pair(last_observation, new_observation)); |
| 245 | } else if (last_observation.pi_name > new_observation.pi_name) { |
| 246 | detection_list.push_back( |
| 247 | std::pair(new_observation, last_observation)); |
| 248 | } else { |
| 249 | LOG(WARNING) << "Got 2 observations in a row from same pi. Probably " |
| 250 | "not too much of an issue???"; |
| 251 | } |
| 252 | } else { |
| 253 | VLOG(2) << "Storing observation for " << pi_name << " at time " |
| 254 | << distributed_eof; |
| 255 | last_observation = new_observation; |
| 256 | } |
| 257 | } |
| 258 | } |
| 259 | if (FLAGS_visualize) { |
| 260 | std::string image_name = pi_name + " Image"; |
| 261 | cv::Mat rgb_small; |
| 262 | cv::resize(rgb_image, rgb_small, cv::Size(), 0.5, 0.5); |
| 263 | cv::imshow(image_name, rgb_small); |
| 264 | cv::waitKey(FLAGS_wait_key); |
| 265 | } |
| 266 | } |
| 267 | |
| 268 | void ExtrinsicsMain(int argc, char *argv[]) { |
| 269 | std::vector<DataAdapter::TimestampedDetection> timestamped_target_detections; |
| 270 | |
| 271 | std::optional<aos::FlatbufferDetachedBuffer<aos::Configuration>> config = |
| 272 | (FLAGS_config.empty() |
| 273 | ? std::nullopt |
| 274 | : std::make_optional(aos::configuration::ReadConfig(FLAGS_config))); |
| 275 | |
| 276 | // open logfiles |
| 277 | aos::logger::LogReader reader( |
| 278 | aos::logger::SortParts(aos::logger::FindLogs(argc, argv)), |
| 279 | config.has_value() ? &config->message() : nullptr); |
| 280 | |
| 281 | constexpr size_t kNumPis = 4; |
| 282 | for (size_t i = 1; i <= kNumPis; i++) { |
| 283 | reader.RemapLoggedChannel(absl::StrFormat("/pi%u/camera", i), |
| 284 | "frc971.vision.TargetMap"); |
| 285 | reader.RemapLoggedChannel(absl::StrFormat("/pi%u/camera", i), |
| 286 | "foxglove.ImageAnnotations"); |
| 287 | reader.RemapLoggedChannel(absl::StrFormat("/pi%u/constants", i), |
| 288 | "y2023.Constants"); |
| 289 | } |
| 290 | reader.RemapLoggedChannel("/imu/constants", "y2023.Constants"); |
| 291 | reader.Register(); |
| 292 | |
| 293 | SendSimulationConstants(reader.event_loop_factory(), FLAGS_team_number, |
| 294 | FLAGS_constants_path); |
| 295 | |
| 296 | LOG(INFO) << "Using target type " << FLAGS_target_type; |
| 297 | std::vector<std::string> node_list; |
| 298 | node_list.push_back("pi1"); |
| 299 | node_list.push_back("pi2"); |
| 300 | node_list.push_back("pi3"); |
| 301 | node_list.push_back("pi4"); |
| 302 | |
| 303 | std::vector<std::unique_ptr<aos::EventLoop>> detection_event_loops; |
| 304 | std::vector<frc971::vision::CharucoExtractor *> charuco_extractors; |
| 305 | std::vector<frc971::vision::ImageCallback *> image_callbacks; |
| 306 | std::vector<Eigen::Affine3d> default_extrinsics; |
| 307 | |
| 308 | for (uint i = 0; i < node_list.size(); i++) { |
| 309 | std::string node = node_list[i]; |
| 310 | const aos::Node *pi = |
| 311 | aos::configuration::GetNode(reader.configuration(), node.c_str()); |
| 312 | |
| 313 | detection_event_loops.emplace_back( |
| 314 | reader.event_loop_factory()->MakeEventLoop( |
| 315 | (node + "_detection").c_str(), pi)); |
| 316 | |
| 317 | frc971::constants::ConstantsFetcher<y2023::Constants> constants_fetcher( |
| 318 | detection_event_loops.back().get()); |
| 319 | aos::FlatbufferDetachedBuffer<calibration::CameraCalibration> intrinsics = |
| 320 | aos::RecursiveCopyFlatBuffer(y2023::vision::FindCameraCalibration( |
| 321 | constants_fetcher.constants(), node)); |
| 322 | |
| 323 | const calibration::CameraCalibration *calibration = |
| 324 | FindCameraCalibration(constants_fetcher.constants(), node); |
| 325 | |
| 326 | frc971::vision::TargetType target_type = |
| 327 | frc971::vision::TargetTypeFromString(FLAGS_target_type); |
| 328 | frc971::vision::CharucoExtractor *charuco_ext = |
| 329 | new frc971::vision::CharucoExtractor(calibration, target_type); |
| 330 | charuco_extractors.emplace_back(charuco_ext); |
| 331 | |
| 332 | cv::Mat extrinsics_cv = CameraExtrinsics(calibration).value(); |
| 333 | Eigen::Matrix4d extrinsics_matrix; |
| 334 | cv::cv2eigen(extrinsics_cv, extrinsics_matrix); |
| 335 | const auto ext_H_robot_pi = Eigen::Affine3d(extrinsics_matrix); |
| 336 | default_extrinsics.emplace_back(ext_H_robot_pi); |
| 337 | |
| 338 | LOG(INFO) << "Got extrinsics for " << node << " as\n" |
| 339 | << default_extrinsics.back().matrix(); |
| 340 | |
| 341 | frc971::vision::ImageCallback *image_callback = |
| 342 | new frc971::vision::ImageCallback( |
| 343 | detection_event_loops[i].get(), "/" + node_list[i] + "/camera", |
| 344 | [&reader, &charuco_extractors, &detection_event_loops, &node_list, |
| 345 | i](cv::Mat rgb_image, const aos::monotonic_clock::time_point eof) { |
| 346 | aos::distributed_clock::time_point pi_distributed_time = |
| 347 | reader.event_loop_factory() |
| 348 | ->GetNodeEventLoopFactory( |
| 349 | detection_event_loops[i].get()->node()) |
| 350 | ->ToDistributedClock(eof); |
| 351 | ProcessImage(detection_event_loops[i].get(), rgb_image, eof, |
| 352 | pi_distributed_time, *charuco_extractors[i], |
| 353 | node_list[i]); |
| 354 | }); |
| 355 | |
| 356 | image_callbacks.emplace_back(image_callback); |
| 357 | } |
| 358 | |
| 359 | const auto ext_H_robot_piA = default_extrinsics[0]; |
| 360 | const auto ext_H_robot_piB = default_extrinsics[1]; |
| 361 | |
| 362 | reader.event_loop_factory()->Run(); |
| 363 | |
| 364 | for (auto node : node_list) { |
| 365 | std::vector<TimestampedPiDetection> pose_list; |
| 366 | for (auto ext : two_board_extrinsics_list) { |
| 367 | if (ext.pi_name == node) { |
| 368 | pose_list.push_back(ext); |
| 369 | } |
| 370 | } |
| 371 | Eigen::Affine3d avg_pose = ComputeAveragePose(pose_list); |
| 372 | VLOG(1) << "Estimate from " << node << " with " << pose_list.size() |
| 373 | << " observations is:\n" |
| 374 | << avg_pose.matrix(); |
| 375 | } |
| 376 | Eigen::Affine3d avg_pose = ComputeAveragePose(two_board_extrinsics_list); |
| 377 | LOG(INFO) << "Estimate of two board pose using all nodes with " |
| 378 | << two_board_extrinsics_list.size() << " observations is:\n" |
| 379 | << avg_pose.matrix() << "\n"; |
| 380 | |
| 381 | LOG(INFO) << "Got " << detection_list.size() << " extrinsic observations"; |
| 382 | Eigen::Affine3d H_target0_target1 = avg_pose; |
| 383 | int base_target_id = detection_list[0].first.board_id; |
| 384 | std::vector<Eigen::Affine3d> H_cameraA_cameraB_list; |
| 385 | std::vector<Eigen::Affine3d> updated_extrinsics; |
| 386 | // Use the first node's extrinsics as our base, and fix from there |
| 387 | updated_extrinsics.push_back(default_extrinsics[0]); |
| 388 | LOG(INFO) << "Default extrinsic for node " << node_list[0] << " is " |
| 389 | << default_extrinsics[0].matrix(); |
| 390 | for (uint i = 0; i < node_list.size() - 1; i++) { |
| 391 | H_cameraA_cameraB_list.clear(); |
| 392 | for (auto [poseA, poseB] : detection_list) { |
| 393 | if ((poseA.pi_name == node_list[i]) && |
| 394 | (poseB.pi_name == node_list[i + 1])) { |
| 395 | Eigen::Affine3d H_cameraA_target0 = poseA.H_camera_target; |
| 396 | // If this pose isn't referenced to target0, correct that |
| 397 | if (poseA.board_id != base_target_id) { |
| 398 | H_cameraA_target0 = H_cameraA_target0 * H_target0_target1.inverse(); |
| 399 | } |
| 400 | |
| 401 | Eigen::Affine3d H_cameraB_target0 = poseB.H_camera_target; |
| 402 | if (poseB.board_id != base_target_id) { |
| 403 | H_cameraB_target0 = H_cameraB_target0 * H_target0_target1.inverse(); |
| 404 | } |
| 405 | Eigen::Affine3d H_cameraA_cameraB = |
| 406 | H_cameraA_target0 * H_cameraB_target0.inverse(); |
| 407 | H_cameraA_cameraB_list.push_back(H_cameraA_cameraB); |
| 408 | } |
| 409 | } |
| 410 | CHECK(H_cameraA_cameraB_list.size() > 0) |
| 411 | << "Failed with zero poses for node " << node_list[i]; |
| 412 | if (H_cameraA_cameraB_list.size() > 0) { |
| 413 | Eigen::Affine3d avg_H_cameraA_cameraB = |
| 414 | ComputeAveragePose(H_cameraA_cameraB_list); |
| 415 | LOG(INFO) << "From " << node_list[i] << " to " << node_list[i + 1] |
| 416 | << " found " << H_cameraA_cameraB_list.size() |
| 417 | << " observations, and the average pose is:\n" |
| 418 | << avg_H_cameraA_cameraB.matrix(); |
| 419 | Eigen::Affine3d default_H_cameraA_camera_B = |
| 420 | default_extrinsics[i].inverse() * default_extrinsics[i + 1]; |
| 421 | LOG(INFO) << "Compare this to that from default values:\n" |
| 422 | << default_H_cameraA_camera_B.matrix(); |
| 423 | // Next extrinsic is just previous one * avg_delta_pose |
| 424 | Eigen::Affine3d next_extrinsic = |
| 425 | updated_extrinsics.back() * avg_H_cameraA_cameraB; |
| 426 | LOG(INFO) |
| 427 | << "Difference between averaged and default delta poses has |T| = " |
| 428 | << (avg_H_cameraA_cameraB * default_H_cameraA_camera_B.inverse()) |
| 429 | .translation() |
| 430 | .norm() |
| 431 | << " and |R| = " |
| 432 | << Eigen::AngleAxisd( |
| 433 | (avg_H_cameraA_cameraB * default_H_cameraA_camera_B.inverse()) |
| 434 | .rotation()) |
| 435 | .angle(); |
| 436 | updated_extrinsics.push_back(next_extrinsic); |
| 437 | LOG(INFO) << "Default Extrinsic for " << node_list[i + 1] << " is \n" |
| 438 | << default_extrinsics[i + 1].matrix(); |
| 439 | LOG(INFO) << "--> Updated Extrinsic for " << node_list[i + 1] << " is \n" |
| 440 | << next_extrinsic.matrix(); |
| 441 | } |
| 442 | } |
| 443 | |
| 444 | // Cleanup |
| 445 | for (uint i = 0; i < image_callbacks.size(); i++) { |
| 446 | delete charuco_extractors[i]; |
| 447 | delete image_callbacks[i]; |
| 448 | } |
| 449 | } |
| 450 | } // namespace vision |
| 451 | } // namespace y2023 |
| 452 | |
| 453 | int main(int argc, char **argv) { |
| 454 | aos::InitGoogle(&argc, &argv); |
| 455 | y2023::vision::ExtrinsicsMain(argc, argv); |
| 456 | } |