blob: 67613a1078d90df9a7f5b23c0f7b6cf26fc966f9 [file] [log] [blame]
Brian Silverman4770c7d2020-02-17 20:34:42 -08001#include <opencv2/calib3d.hpp>
Brian Silverman967e5df2020-02-09 16:43:34 -08002#include <opencv2/features2d.hpp>
3#include <opencv2/imgproc.hpp>
4
Brian Silverman9dd793b2020-01-31 23:52:21 -08005#include "aos/events/shm_event_loop.h"
Brian Silverman4770c7d2020-02-17 20:34:42 -08006#include "aos/flatbuffer_merge.h"
Brian Silverman9dd793b2020-01-31 23:52:21 -08007#include "aos/init.h"
Brian Silverman4770c7d2020-02-17 20:34:42 -08008#include "aos/network/team_number.h"
Brian Silverman9dd793b2020-01-31 23:52:21 -08009
Brian Silverman967e5df2020-02-09 16:43:34 -080010#include "y2020/vision/sift/sift971.h"
11#include "y2020/vision/sift/sift_generated.h"
12#include "y2020/vision/sift/sift_training_generated.h"
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -080013#include "y2020/vision/tools/python_code/sift_training_data.h"
Brian Silverman9dd793b2020-01-31 23:52:21 -080014#include "y2020/vision/v4l2_reader.h"
Brian Silverman967e5df2020-02-09 16:43:34 -080015#include "y2020/vision/vision_generated.h"
Brian Silverman9dd793b2020-01-31 23:52:21 -080016
17namespace frc971 {
18namespace vision {
19namespace {
20
Brian Silverman967e5df2020-02-09 16:43:34 -080021class CameraReader {
22 public:
23 CameraReader(aos::EventLoop *event_loop,
24 const sift::TrainingData *training_data, V4L2Reader *reader,
25 cv::FlannBasedMatcher *matcher)
26 : event_loop_(event_loop),
27 training_data_(training_data),
Brian Silverman4770c7d2020-02-17 20:34:42 -080028 camera_calibration_(FindCameraCalibration()),
Brian Silverman967e5df2020-02-09 16:43:34 -080029 reader_(reader),
30 matcher_(matcher),
31 image_sender_(event_loop->MakeSender<CameraImage>("/camera")),
32 result_sender_(
33 event_loop->MakeSender<sift::ImageMatchResult>("/camera")),
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -080034 detailed_result_sender_(
35 event_loop->MakeSender<sift::ImageMatchResult>("/camera/detailed")),
Brian Silverman967e5df2020-02-09 16:43:34 -080036 read_image_timer_(event_loop->AddTimer([this]() {
37 ReadImage();
38 read_image_timer_->Setup(event_loop_->monotonic_now());
39 })) {
40 CopyTrainingFeatures();
41 // Technically we don't need to do this, but doing it now avoids the first
42 // match attempt being slow.
43 matcher_->train();
44
45 event_loop->OnRun(
46 [this]() { read_image_timer_->Setup(event_loop_->monotonic_now()); });
47 }
48
49 private:
Brian Silverman4770c7d2020-02-17 20:34:42 -080050 const sift::CameraCalibration *FindCameraCalibration() const;
51
Brian Silverman967e5df2020-02-09 16:43:34 -080052 // Copies the information from training_data_ into matcher_.
53 void CopyTrainingFeatures();
54 // Processes an image (including sending the results).
55 void ProcessImage(const CameraImage &image);
56 // Reads an image, and then performs all of our processing on it.
57 void ReadImage();
58
59 flatbuffers::Offset<
60 flatbuffers::Vector<flatbuffers::Offset<sift::ImageMatch>>>
61 PackImageMatches(flatbuffers::FlatBufferBuilder *fbb,
62 const std::vector<std::vector<cv::DMatch>> &matches);
63 flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<sift::Feature>>>
64 PackFeatures(flatbuffers::FlatBufferBuilder *fbb,
65 const std::vector<cv::KeyPoint> &keypoints,
66 const cv::Mat &descriptors);
67
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -080068 void SendImageMatchResult(const CameraImage &image,
69 const std::vector<cv::KeyPoint> &keypoints,
70 const cv::Mat &descriptors,
71 const std::vector<std::vector<cv::DMatch>> &matches,
72 const std::vector<cv::Mat> &camera_target_list,
Jim Ostrowskie4264262020-02-29 00:27:24 -080073 const std::vector<cv::Mat> &field_camera_list,
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -080074 aos::Sender<sift::ImageMatchResult> *result_sender,
75 bool send_details);
76
Brian Silverman4d4a70d2020-02-17 13:03:19 -080077 // Returns the 3D location for the specified training feature.
Brian Silverman4770c7d2020-02-17 20:34:42 -080078 cv::Point3f Training3dPoint(int training_image_index,
79 int feature_index) const {
Brian Silverman4d4a70d2020-02-17 13:03:19 -080080 const sift::KeypointFieldLocation *const location =
81 training_data_->images()
82 ->Get(training_image_index)
83 ->features()
84 ->Get(feature_index)
85 ->field_location();
86 return cv::Point3f(location->x(), location->y(), location->z());
87 }
88
Brian Silverman4770c7d2020-02-17 20:34:42 -080089 const sift::TransformationMatrix *FieldToTarget(int training_image_index) {
90 return training_data_->images()
91 ->Get(training_image_index)
92 ->field_to_target();
93 }
94
Brian Silverman4d4a70d2020-02-17 13:03:19 -080095 int number_training_images() const {
96 return training_data_->images()->size();
97 }
98
Brian Silverman4770c7d2020-02-17 20:34:42 -080099 cv::Mat CameraIntrinsics() const {
100 const cv::Mat result(3, 3, CV_32F,
101 const_cast<void *>(static_cast<const void *>(
102 camera_calibration_->intrinsics()->data())));
103 CHECK_EQ(result.total(), camera_calibration_->intrinsics()->size());
104 return result;
105 }
106
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -0800107 cv::Mat CameraDistCoeffs() const {
108 const cv::Mat result(5, 1, CV_32F,
109 const_cast<void *>(static_cast<const void *>(
110 camera_calibration_->dist_coeffs()->data())));
111 CHECK_EQ(result.total(), camera_calibration_->dist_coeffs()->size());
112 return result;
113 }
114
Brian Silverman967e5df2020-02-09 16:43:34 -0800115 aos::EventLoop *const event_loop_;
116 const sift::TrainingData *const training_data_;
Brian Silverman4770c7d2020-02-17 20:34:42 -0800117 const sift::CameraCalibration *const camera_calibration_;
Brian Silverman967e5df2020-02-09 16:43:34 -0800118 V4L2Reader *const reader_;
119 cv::FlannBasedMatcher *const matcher_;
120 aos::Sender<CameraImage> image_sender_;
121 aos::Sender<sift::ImageMatchResult> result_sender_;
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -0800122 aos::Sender<sift::ImageMatchResult> detailed_result_sender_;
Brian Silverman967e5df2020-02-09 16:43:34 -0800123 // We schedule this immediately to read an image. Having it on a timer means
124 // other things can run on the event loop in between.
125 aos::TimerHandler *const read_image_timer_;
126
127 const std::unique_ptr<frc971::vision::SIFT971_Impl> sift_{
128 new frc971::vision::SIFT971_Impl()};
129};
130
Brian Silverman4770c7d2020-02-17 20:34:42 -0800131const sift::CameraCalibration *CameraReader::FindCameraCalibration() const {
132 const std::string_view node_name = event_loop_->node()->name()->string_view();
133 const int team_number = aos::network::GetTeamNumber();
134 for (const sift::CameraCalibration *candidate :
135 *training_data_->camera_calibrations()) {
136 if (candidate->node_name()->string_view() != node_name) {
137 continue;
138 }
139 if (candidate->team_number() != team_number) {
140 continue;
141 }
142 return candidate;
143 }
144 LOG(FATAL) << ": Failed to find camera calibration for " << node_name
145 << " on " << team_number;
146}
147
Brian Silverman967e5df2020-02-09 16:43:34 -0800148void CameraReader::CopyTrainingFeatures() {
149 for (const sift::TrainingImage *training_image : *training_data_->images()) {
150 cv::Mat features(training_image->features()->size(), 128, CV_32F);
Jim Ostrowski38bb70b2020-02-21 20:46:10 -0800151 for (size_t i = 0; i < training_image->features()->size(); ++i) {
Brian Silverman967e5df2020-02-09 16:43:34 -0800152 const sift::Feature *feature_table = training_image->features()->Get(i);
Brian Silverman4770c7d2020-02-17 20:34:42 -0800153
154 // We don't need this information right now, but make sure it's here to
155 // avoid crashes that only occur when specific features are matched.
156 CHECK(feature_table->has_field_location());
157
Brian Silverman967e5df2020-02-09 16:43:34 -0800158 const flatbuffers::Vector<float> *const descriptor =
159 feature_table->descriptor();
160 CHECK_EQ(descriptor->size(), 128u) << ": Unsupported feature size";
161 cv::Mat(1, descriptor->size(), CV_32F,
162 const_cast<void *>(static_cast<const void *>(descriptor->data())))
163 .copyTo(features(cv::Range(i, i + 1), cv::Range(0, 128)));
164 }
165 matcher_->add(features);
166 }
167}
168
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -0800169void CameraReader::SendImageMatchResult(
170 const CameraImage &image, const std::vector<cv::KeyPoint> &keypoints,
171 const cv::Mat &descriptors,
172 const std::vector<std::vector<cv::DMatch>> &matches,
173 const std::vector<cv::Mat> &camera_target_list,
Jim Ostrowskie4264262020-02-29 00:27:24 -0800174 const std::vector<cv::Mat> &field_camera_list,
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -0800175 aos::Sender<sift::ImageMatchResult> *result_sender, bool send_details) {
176 auto builder = result_sender->MakeBuilder();
Brian Silverman4770c7d2020-02-17 20:34:42 -0800177 const auto camera_calibration_offset =
178 aos::CopyFlatBuffer(camera_calibration_, builder.fbb());
179
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -0800180 flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<sift::Feature>>>
181 features_offset;
Austin Schuh6f3640a2020-02-28 22:13:36 -0800182 flatbuffers::Offset<
183 flatbuffers::Vector<flatbuffers::Offset<sift::ImageMatch>>>
184 image_matches_offset;
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -0800185 if (send_details) {
186 features_offset = PackFeatures(builder.fbb(), keypoints, descriptors);
Austin Schuh6f3640a2020-02-28 22:13:36 -0800187 image_matches_offset = PackImageMatches(builder.fbb(), matches);
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -0800188 }
189
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -0800190 std::vector<flatbuffers::Offset<sift::CameraPose>> camera_poses;
191
Jim Ostrowskie4264262020-02-29 00:27:24 -0800192 CHECK_EQ(camera_target_list.size(), field_camera_list.size());
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -0800193 for (size_t i = 0; i < camera_target_list.size(); ++i) {
194 cv::Mat camera_target = camera_target_list[i];
195 CHECK(camera_target.isContinuous());
196 const auto data_offset = builder.fbb()->CreateVector<float>(
197 reinterpret_cast<float *>(camera_target.data), camera_target.total());
Austin Schuh6f3640a2020-02-28 22:13:36 -0800198 const flatbuffers::Offset<sift::TransformationMatrix> transform_offset =
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -0800199 sift::CreateTransformationMatrix(*builder.fbb(), data_offset);
Jim Ostrowskie4264262020-02-29 00:27:24 -0800200
201 cv::Mat field_camera = field_camera_list[i];
202 CHECK(field_camera.isContinuous());
203 const auto fc_data_offset = builder.fbb()->CreateVector<float>(
204 reinterpret_cast<float *>(field_camera.data), field_camera.total());
205 const flatbuffers::Offset<sift::TransformationMatrix> fc_transform_offset =
206 sift::CreateTransformationMatrix(*builder.fbb(), fc_data_offset);
207
Austin Schuh6f3640a2020-02-28 22:13:36 -0800208 const flatbuffers::Offset<sift::TransformationMatrix>
209 field_to_target_offset =
210 aos::CopyFlatBuffer(FieldToTarget(i), builder.fbb());
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -0800211
212 sift::CameraPose::Builder pose_builder(*builder.fbb());
213 pose_builder.add_camera_to_target(transform_offset);
Jim Ostrowskie4264262020-02-29 00:27:24 -0800214 pose_builder.add_field_to_camera(fc_transform_offset);
Austin Schuh6f3640a2020-02-28 22:13:36 -0800215 pose_builder.add_field_to_target(field_to_target_offset);
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -0800216 camera_poses.emplace_back(pose_builder.Finish());
217 }
218 const auto camera_poses_offset = builder.fbb()->CreateVector(camera_poses);
219
220 sift::ImageMatchResult::Builder result_builder(*builder.fbb());
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -0800221 result_builder.add_camera_poses(camera_poses_offset);
222 if (send_details) {
Austin Schuh6f3640a2020-02-28 22:13:36 -0800223 result_builder.add_image_matches(image_matches_offset);
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -0800224 result_builder.add_features(features_offset);
225 }
226 result_builder.add_image_monotonic_timestamp_ns(
227 image.monotonic_timestamp_ns());
228 result_builder.add_camera_calibration(camera_calibration_offset);
229
230 // TODO<Jim>: Need to add target point computed from matches and
231 // mapped by homography
232 builder.Send(result_builder.Finish());
233}
234
235void CameraReader::ProcessImage(const CameraImage &image) {
Brian Silverman967e5df2020-02-09 16:43:34 -0800236 // First, we need to extract the brightness information. This can't really be
237 // fused into the beginning of the SIFT algorithm because the algorithm needs
238 // to look at the base image directly. It also only takes 2ms on our images.
239 // This is converting from YUYV to a grayscale image.
Jim Ostrowski38bb70b2020-02-21 20:46:10 -0800240 cv::Mat image_mat(image.rows(), image.cols(), CV_8U);
Brian Silverman967e5df2020-02-09 16:43:34 -0800241 CHECK(image_mat.isContinuous());
242 const int number_pixels = image.rows() * image.cols();
243 for (int i = 0; i < number_pixels; ++i) {
244 reinterpret_cast<uint8_t *>(image_mat.data)[i] =
245 image.data()->data()[i * 2];
246 }
247
248 // Next, grab the features from the image.
249 std::vector<cv::KeyPoint> keypoints;
250 cv::Mat descriptors;
251 sift_->detectAndCompute(image_mat, cv::noArray(), keypoints, descriptors);
252
253 // Then, match those features against our training data.
254 std::vector<std::vector<cv::DMatch>> matches;
255 matcher_->knnMatch(/* queryDescriptors */ descriptors, matches, /* k */ 2);
Brian Silverman4770c7d2020-02-17 20:34:42 -0800256
257 struct PerImageMatches {
258 std::vector<const std::vector<cv::DMatch> *> matches;
259 std::vector<cv::Point3f> training_points_3d;
260 std::vector<cv::Point2f> query_points;
261 };
262 std::vector<PerImageMatches> per_image_matches(number_training_images());
263
264 // Pull out the good matches which we want for each image.
265 // Discard the bad matches per Lowe's ratio test.
266 // (Lowe originally proposed 0.7 ratio, but 0.75 was later proposed as a
267 // better option. We'll go with the more conservative (fewer, better matches)
268 // for now).
269 for (const std::vector<cv::DMatch> &match : matches) {
270 CHECK_EQ(2u, match.size());
271 CHECK_LE(match[0].distance, match[1].distance);
272 CHECK_LT(match[0].imgIdx, number_training_images());
273 CHECK_LT(match[1].imgIdx, number_training_images());
274 CHECK_EQ(match[0].queryIdx, match[1].queryIdx);
275 if (!(match[0].distance < 0.7 * match[1].distance)) {
276 continue;
277 }
278
279 const int training_image = match[0].imgIdx;
280 CHECK_LT(training_image, static_cast<int>(per_image_matches.size()));
281 PerImageMatches *const per_image = &per_image_matches[training_image];
282 per_image->matches.push_back(&match);
283 per_image->training_points_3d.push_back(
284 Training3dPoint(training_image, match[0].trainIdx));
285
286 const cv::KeyPoint &keypoint = keypoints[match[0].queryIdx];
287 per_image->query_points.push_back(keypoint.pt);
288 }
289
290 // The minimum number of matches in a training image for us to use it.
291 static constexpr int kMinimumMatchCount = 10;
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -0800292 std::vector<cv::Mat> camera_target_list;
Jim Ostrowskie4264262020-02-29 00:27:24 -0800293 std::vector<cv::Mat> field_camera_list;
Brian Silverman4770c7d2020-02-17 20:34:42 -0800294
Brian Silverman4770c7d2020-02-17 20:34:42 -0800295 for (size_t i = 0; i < per_image_matches.size(); ++i) {
296 const PerImageMatches &per_image = per_image_matches[i];
297 if (per_image.matches.size() < kMinimumMatchCount) {
298 continue;
299 }
300
Jim Ostrowskie4264262020-02-29 00:27:24 -0800301 // Transformations (rotations and translations) for various
302 // coordinate frames. R_XXXX_vec is the Rodrigues (angle-axis)
303 // representation the 3x3 rotation R_XXXX
304 // Tranform from camera to target frame
Jim Ostrowski295f2a12020-02-26 22:22:44 -0800305 cv::Mat R_camera_target_vec, R_camera_target, T_camera_target;
Jim Ostrowskie4264262020-02-29 00:27:24 -0800306 // Tranform from camera to field origin (global) reference frame
307 cv::Mat R_camera_field_vec, R_camera_field, T_camera_field;
308 // Inverse of camera to field-- defines location of camera in
309 // global (field) reference frame
310 cv::Mat R_field_camera_vec, R_field_camera, T_field_camera;
311
Jim Ostrowski295f2a12020-02-26 22:22:44 -0800312 // Compute the pose of the camera (global origin relative to camera)
Brian Silverman4770c7d2020-02-17 20:34:42 -0800313 cv::solvePnPRansac(per_image.training_points_3d, per_image.query_points,
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -0800314 CameraIntrinsics(), CameraDistCoeffs(),
Jim Ostrowskie4264262020-02-29 00:27:24 -0800315 R_camera_field_vec, T_camera_field);
Brian Silverman4770c7d2020-02-17 20:34:42 -0800316
Jim Ostrowskie4264262020-02-29 00:27:24 -0800317 // Get matrix version of R_camera_field
318 cv::Rodrigues(R_camera_field_vec, R_camera_field);
319
320 // Compute H_field_camera = H_camera_field^-1
321 R_field_camera = R_camera_field.t();
322 T_field_camera = -R_field_camera.dot(T_camera_field);
323 T_field_camera = T_field_camera.t();
324
325 // Extract the field_target transformation
326 const cv::Mat H_field_target(4, 4, CV_32F,
327 const_cast<void *>(static_cast<const void *>(
328 FieldToTarget(i)->data())));
329 const cv::Mat R_field_target =
330 H_field_target(cv::Range(0, 3), cv::Range(0, 3));
331 const cv::Mat T_field_target =
332 H_field_target(cv::Range(3, 4), cv::Range(0, 3));
333
334 // Use it to get the relative pose from camera to target
335 R_camera_target = R_camera_field.dot(R_field_target);
336 T_camera_target = R_camera_field.dot(T_field_target) + T_camera_field;
337
338 // Convert Camera from angle-axis (3x1) to homogenous (3x3) representation
339 // and turn T into 3x1 vector
340 cv::Rodrigues(R_camera_target_vec, R_camera_target);
341 T_camera_target = T_camera_target.t();
342
343 // Set H_camera_target
Brian Silverman4770c7d2020-02-17 20:34:42 -0800344 {
Jim Ostrowski38bb70b2020-02-21 20:46:10 -0800345 CHECK_EQ(cv::Size(3, 3), R_camera_target.size());
346 CHECK_EQ(cv::Size(3, 1), T_camera_target.size());
347 cv::Mat camera_target = cv::Mat::zeros(4, 4, CV_32F);
348 R_camera_target.copyTo(camera_target(cv::Range(0, 3), cv::Range(0, 3)));
349 T_camera_target.copyTo(camera_target(cv::Range(3, 4), cv::Range(0, 3)));
350 camera_target.at<float>(3, 3) = 1;
351 CHECK(camera_target.isContinuous());
Jim Ostrowskie4264262020-02-29 00:27:24 -0800352 camera_target_list.push_back(camera_target.clone());
353 }
354
355 // Set H_field_camera
356 {
357 CHECK_EQ(cv::Size(3, 3), R_field_camera.size());
358 CHECK_EQ(cv::Size(3, 1), T_field_camera.size());
359 cv::Mat field_camera = cv::Mat::zeros(4, 4, CV_32F);
360 R_field_camera.copyTo(field_camera(cv::Range(0, 3), cv::Range(0, 3)));
361 T_field_camera.copyTo(field_camera(cv::Range(3, 4), cv::Range(0, 3)));
362 field_camera.at<float>(3, 3) = 1;
363 CHECK(field_camera.isContinuous());
364 field_camera_list.push_back(field_camera.clone());
Brian Silverman4770c7d2020-02-17 20:34:42 -0800365 }
Brian Silverman4770c7d2020-02-17 20:34:42 -0800366 }
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -0800367 // Now, send our two messages-- one large, with details for remote
368 // debugging(features), and one smaller
369 SendImageMatchResult(image, keypoints, descriptors, matches,
Jim Ostrowskie4264262020-02-29 00:27:24 -0800370 camera_target_list, field_camera_list,
371 &detailed_result_sender_, true);
Jim Ostrowskiad5d8a72020-02-28 00:15:26 -0800372 SendImageMatchResult(image, keypoints, descriptors, matches,
Jim Ostrowskie4264262020-02-29 00:27:24 -0800373 camera_target_list, field_camera_list, &result_sender_,
374 false);
Brian Silverman967e5df2020-02-09 16:43:34 -0800375}
376
377void CameraReader::ReadImage() {
378 if (!reader_->ReadLatestImage()) {
379 LOG(INFO) << "No image, sleeping";
380 std::this_thread::sleep_for(std::chrono::milliseconds(10));
381 return;
382 }
383
384 ProcessImage(reader_->LatestImage());
385
386 reader_->SendLatestImage();
387}
388
389flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<sift::ImageMatch>>>
390CameraReader::PackImageMatches(
391 flatbuffers::FlatBufferBuilder *fbb,
392 const std::vector<std::vector<cv::DMatch>> &matches) {
393 // First, we need to pull out all the matches for each image. Might as well
394 // build up the Match tables at the same time.
Brian Silverman4d4a70d2020-02-17 13:03:19 -0800395 std::vector<std::vector<sift::Match>> per_image_matches(
396 number_training_images());
Brian Silverman967e5df2020-02-09 16:43:34 -0800397 for (const std::vector<cv::DMatch> &image_matches : matches) {
398 for (const cv::DMatch &image_match : image_matches) {
Brian Silverman4d4a70d2020-02-17 13:03:19 -0800399 CHECK_LT(image_match.imgIdx, number_training_images());
400 per_image_matches[image_match.imgIdx].emplace_back();
401 sift::Match *const match = &per_image_matches[image_match.imgIdx].back();
402 match->mutate_query_feature(image_match.queryIdx);
403 match->mutate_train_feature(image_match.trainIdx);
404 match->mutate_distance(image_match.distance);
Brian Silverman967e5df2020-02-09 16:43:34 -0800405 }
406 }
407
408 // Then, we need to build up each ImageMatch table.
409 std::vector<flatbuffers::Offset<sift::ImageMatch>> image_match_tables;
410 for (size_t i = 0; i < per_image_matches.size(); ++i) {
Brian Silverman4d4a70d2020-02-17 13:03:19 -0800411 const std::vector<sift::Match> &this_image_matches = per_image_matches[i];
Brian Silverman967e5df2020-02-09 16:43:34 -0800412 if (this_image_matches.empty()) {
413 continue;
414 }
Brian Silverman4d4a70d2020-02-17 13:03:19 -0800415 const auto vector_offset = fbb->CreateVectorOfStructs(this_image_matches);
Brian Silverman967e5df2020-02-09 16:43:34 -0800416 sift::ImageMatch::Builder image_builder(*fbb);
417 image_builder.add_train_image(i);
418 image_builder.add_matches(vector_offset);
419 image_match_tables.emplace_back(image_builder.Finish());
420 }
421
422 return fbb->CreateVector(image_match_tables);
423}
424
425flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<sift::Feature>>>
426CameraReader::PackFeatures(flatbuffers::FlatBufferBuilder *fbb,
427 const std::vector<cv::KeyPoint> &keypoints,
428 const cv::Mat &descriptors) {
429 const int number_features = keypoints.size();
430 CHECK_EQ(descriptors.rows, number_features);
431 std::vector<flatbuffers::Offset<sift::Feature>> features_vector(
432 number_features);
433 for (int i = 0; i < number_features; ++i) {
434 const auto submat = descriptors(cv::Range(i, i + 1), cv::Range(0, 128));
435 CHECK(submat.isContinuous());
436 const auto descriptor_offset =
437 fbb->CreateVector(reinterpret_cast<float *>(submat.data), 128);
438 sift::Feature::Builder feature_builder(*fbb);
439 feature_builder.add_descriptor(descriptor_offset);
440 feature_builder.add_x(keypoints[i].pt.x);
441 feature_builder.add_y(keypoints[i].pt.y);
442 feature_builder.add_size(keypoints[i].size);
443 feature_builder.add_angle(keypoints[i].angle);
444 feature_builder.add_response(keypoints[i].response);
445 feature_builder.add_octave(keypoints[i].octave);
446 CHECK_EQ(-1, keypoints[i].class_id)
447 << ": Not sure what to do with a class id";
448 features_vector[i] = feature_builder.Finish();
449 }
450 return fbb->CreateVector(features_vector);
451}
452
Brian Silverman9dd793b2020-01-31 23:52:21 -0800453void CameraReaderMain() {
454 aos::FlatbufferDetachedBuffer<aos::Configuration> config =
455 aos::configuration::ReadConfig("config.json");
456
Austin Schuh75589cb2020-02-26 22:21:37 -0800457 const auto training_data_bfbs = SiftTrainingData();
Brian Silverman967e5df2020-02-09 16:43:34 -0800458 const sift::TrainingData *const training_data =
459 flatbuffers::GetRoot<sift::TrainingData>(training_data_bfbs.data());
460 {
461 flatbuffers::Verifier verifier(
462 reinterpret_cast<const uint8_t *>(training_data_bfbs.data()),
463 training_data_bfbs.size());
464 CHECK(training_data->Verify(verifier));
465 }
466
467 const auto index_params = cv::makePtr<cv::flann::IndexParams>();
468 index_params->setAlgorithm(cvflann::FLANN_INDEX_KDTREE);
469 index_params->setInt("trees", 5);
470 const auto search_params =
471 cv::makePtr<cv::flann::SearchParams>(/* checks */ 50);
472 cv::FlannBasedMatcher matcher(index_params, search_params);
473
Brian Silverman9dd793b2020-01-31 23:52:21 -0800474 aos::ShmEventLoop event_loop(&config.message());
Brian Silverman62956e72020-02-26 21:04:05 -0800475
476 // First, log the data for future reference.
477 {
478 aos::Sender<sift::TrainingData> training_data_sender =
479 event_loop.MakeSender<sift::TrainingData>("/camera");
480 training_data_sender.Send(
481 aos::FlatbufferString<sift::TrainingData>(training_data_bfbs));
482 }
483
Brian Silverman9dd793b2020-01-31 23:52:21 -0800484 V4L2Reader v4l2_reader(&event_loop, "/dev/video0");
Jim Ostrowski38bb70b2020-02-21 20:46:10 -0800485 CameraReader camera_reader(&event_loop, training_data, &v4l2_reader,
486 &matcher);
Brian Silverman9dd793b2020-01-31 23:52:21 -0800487
Brian Silverman967e5df2020-02-09 16:43:34 -0800488 event_loop.Run();
Brian Silverman9dd793b2020-01-31 23:52:21 -0800489}
490
491} // namespace
492} // namespace vision
493} // namespace frc971
494
Brian Silverman9dd793b2020-01-31 23:52:21 -0800495int main(int argc, char **argv) {
496 aos::InitGoogle(&argc, &argv);
497 frc971::vision::CameraReaderMain();
498}