Split camera_reader into a lib and main
Change-Id: If3ace42d69d4abf2aea501457e7c0757426688c4
Signed-off-by: James Kuszmaul <jabukuszmaul@gmail.com>
diff --git a/y2020/vision/BUILD b/y2020/vision/BUILD
index b43829d..7f57e3a 100644
--- a/y2020/vision/BUILD
+++ b/y2020/vision/BUILD
@@ -39,8 +39,25 @@
cc_binary(
name = "camera_reader",
srcs = [
+ "camera_reader_main.cc",
+ ],
+ target_compatible_with = ["@platforms//os:linux"],
+ visibility = ["//y2020:__subpackages__"],
+ deps = [
+ ":camera_reader_lib",
+ "//aos:init",
+ "//aos/events:shm_event_loop",
+ ],
+)
+
+cc_library(
+ name = "camera_reader_lib",
+ srcs = [
"camera_reader.cc",
],
+ hdrs = [
+ "camera_reader.h",
+ ],
data = [
"//y2020:config",
],
@@ -50,8 +67,7 @@
":v4l2_reader",
":vision_fbs",
"//aos:flatbuffer_merge",
- "//aos:init",
- "//aos/events:shm_event_loop",
+ "//aos/events:event_loop",
"//aos/network:team_number",
"//third_party:opencv",
"//y2020/vision/sift:sift971",
diff --git a/y2020/vision/camera_reader.cc b/y2020/vision/camera_reader.cc
index b13c19c..64e7948 100644
--- a/y2020/vision/camera_reader.cc
+++ b/y2020/vision/camera_reader.cc
@@ -1,12 +1,13 @@
+#include "y2020/vision/camera_reader.h"
+
#include <math.h>
#include <opencv2/calib3d.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/imgproc.hpp>
-#include "aos/events/shm_event_loop.h"
+#include "aos/events/event_loop.h"
#include "aos/flatbuffer_merge.h"
-#include "aos/init.h"
#include "aos/network/team_number.h"
#include "y2020/vision/sift/sift971.h"
#include "y2020/vision/sift/sift_generated.h"
@@ -15,10 +16,6 @@
#include "y2020/vision/v4l2_reader.h"
#include "y2020/vision/vision_generated.h"
-// config used to allow running camera_reader independently. E.g.,
-// bazel run //y2020/vision:camera_reader -- --config y2020/config.json
-// --override_hostname pi-7971-1 --ignore_timestamps true
-DEFINE_string(config, "config.json", "Path to the config file to use.");
DEFINE_bool(skip_sift, false,
"If true don't run any feature extraction. Just forward images.");
DEFINE_bool(ransac_pose, false,
@@ -28,156 +25,6 @@
namespace frc971 {
namespace vision {
-namespace {
-
-class CameraReader {
- public:
- CameraReader(aos::EventLoop *event_loop,
- const sift::TrainingData *training_data, V4L2Reader *reader,
- const cv::Ptr<cv::flann::IndexParams> &index_params,
- const cv::Ptr<cv::flann::SearchParams> &search_params)
- : event_loop_(event_loop),
- training_data_(training_data),
- camera_calibration_(FindCameraCalibration()),
- reader_(reader),
- image_sender_(event_loop->MakeSender<CameraImage>("/camera")),
- result_sender_(
- event_loop->MakeSender<sift::ImageMatchResult>("/camera")),
- detailed_result_sender_(
- event_loop->MakeSender<sift::ImageMatchResult>("/camera/detailed")),
- read_image_timer_(event_loop->AddTimer([this]() { ReadImage(); })) {
- for (int ii = 0; ii < number_training_images(); ++ii) {
- matchers_.push_back(cv::FlannBasedMatcher(index_params, search_params));
- prev_camera_field_R_vec_list_.push_back(cv::Mat::zeros(3, 1, CV_32F));
- prev_camera_field_T_list_.push_back(cv::Mat::zeros(3, 1, CV_32F));
- }
- CopyTrainingFeatures();
-
- for (auto &matcher : matchers_) {
- matcher.train();
- }
-
- event_loop->OnRun(
- [this]() { read_image_timer_->Setup(event_loop_->monotonic_now()); });
- }
-
- private:
- const sift::CameraCalibration *FindCameraCalibration() const;
-
- // Copies the information from training_data_ into matcher_.
- void CopyTrainingFeatures();
- // Processes an image (including sending the results).
- void ProcessImage(const CameraImage &image);
- // Reads an image, and then performs all of our processing on it.
- void ReadImage();
-
- flatbuffers::Offset<
- flatbuffers::Vector<flatbuffers::Offset<sift::ImageMatch>>>
- PackImageMatches(flatbuffers::FlatBufferBuilder *fbb,
- const std::vector<std::vector<cv::DMatch>> &matches);
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<sift::Feature>>>
- PackFeatures(flatbuffers::FlatBufferBuilder *fbb,
- const std::vector<cv::KeyPoint> &keypoints,
- const cv::Mat &descriptors);
-
- void SendImageMatchResult(const CameraImage &image,
- const std::vector<cv::KeyPoint> &keypoints,
- const cv::Mat &descriptors,
- const std::vector<std::vector<cv::DMatch>> &matches,
- const std::vector<cv::Mat> &camera_target_list,
- const std::vector<cv::Mat> &field_camera_list,
- const std::vector<cv::Point2f> &target_point_vector,
- const std::vector<float> &target_radius_vector,
- const std::vector<int> &training_image_indices,
- const std::vector<int> &homography_feature_counts,
- aos::Sender<sift::ImageMatchResult> *result_sender,
- bool send_details);
-
- // Returns the 2D (image) location for the specified training feature.
- cv::Point2f Training2dPoint(int training_image_index,
- int feature_index) const {
- const float x = training_data_->images()
- ->Get(training_image_index)
- ->features()
- ->Get(feature_index)
- ->x();
- const float y = training_data_->images()
- ->Get(training_image_index)
- ->features()
- ->Get(feature_index)
- ->y();
- return cv::Point2f(x, y);
- }
-
- // Returns the 3D location for the specified training feature.
- cv::Point3f Training3dPoint(int training_image_index,
- int feature_index) const {
- const sift::KeypointFieldLocation *const location =
- training_data_->images()
- ->Get(training_image_index)
- ->features()
- ->Get(feature_index)
- ->field_location();
- return cv::Point3f(location->x(), location->y(), location->z());
- }
-
- const sift::TransformationMatrix *FieldToTarget(int training_image_index) {
- return training_data_->images()
- ->Get(training_image_index)
- ->field_to_target();
- }
-
- void TargetLocation(int training_image_index, cv::Point2f &target_location,
- float &target_radius) {
- target_location.x =
- training_data_->images()->Get(training_image_index)->target_point_x();
- target_location.y =
- training_data_->images()->Get(training_image_index)->target_point_y();
- target_radius = training_data_->images()
- ->Get(training_image_index)
- ->target_point_radius();
- }
-
- int number_training_images() const {
- return training_data_->images()->size();
- }
-
- cv::Mat CameraIntrinsics() const {
- const cv::Mat result(3, 3, CV_32F,
- const_cast<void *>(static_cast<const void *>(
- camera_calibration_->intrinsics()->data())));
- CHECK_EQ(result.total(), camera_calibration_->intrinsics()->size());
- return result;
- }
-
- cv::Mat CameraDistCoeffs() const {
- const cv::Mat result(5, 1, CV_32F,
- const_cast<void *>(static_cast<const void *>(
- camera_calibration_->dist_coeffs()->data())));
- CHECK_EQ(result.total(), camera_calibration_->dist_coeffs()->size());
- return result;
- }
-
- aos::EventLoop *const event_loop_;
- const sift::TrainingData *const training_data_;
- const sift::CameraCalibration *const camera_calibration_;
- V4L2Reader *const reader_;
- std::vector<cv::FlannBasedMatcher> matchers_;
- aos::Sender<CameraImage> image_sender_;
- aos::Sender<sift::ImageMatchResult> result_sender_;
- aos::SendFailureCounter result_failure_counter_;
- aos::Sender<sift::ImageMatchResult> detailed_result_sender_;
- // We schedule this immediately to read an image. Having it on a timer means
- // other things can run on the event loop in between.
- aos::TimerHandler *const read_image_timer_;
-
- // Storage for when we want to use the previous estimates of pose
- std::vector<cv::Mat> prev_camera_field_R_vec_list_;
- std::vector<cv::Mat> prev_camera_field_T_list_;
-
- const std::unique_ptr<frc971::vision::SIFT971_Impl> sift_{
- new frc971::vision::SIFT971_Impl()};
-};
const sift::CameraCalibration *CameraReader::FindCameraCalibration() const {
const std::string_view node_name = event_loop_->node()->name()->string_view();
@@ -694,43 +541,5 @@
return fbb->CreateVector(features_vector);
}
-void CameraReaderMain() {
- aos::FlatbufferDetachedBuffer<aos::Configuration> config =
- aos::configuration::ReadConfig(FLAGS_config);
-
- const aos::FlatbufferSpan<sift::TrainingData> training_data(
- SiftTrainingData());
- CHECK(training_data.Verify());
-
- const auto index_params = cv::makePtr<cv::flann::IndexParams>();
- index_params->setAlgorithm(cvflann::FLANN_INDEX_KDTREE);
- index_params->setInt("trees", 5);
- const auto search_params =
- cv::makePtr<cv::flann::SearchParams>(/* checks */ 50);
- cv::FlannBasedMatcher matcher(index_params, search_params);
-
- aos::ShmEventLoop event_loop(&config.message());
-
- // First, log the data for future reference.
- {
- aos::Sender<sift::TrainingData> training_data_sender =
- event_loop.MakeSender<sift::TrainingData>("/camera");
- CHECK_EQ(training_data_sender.Send(training_data),
- aos::RawSender::Error::kOk);
- }
-
- V4L2Reader v4l2_reader(&event_loop, "/dev/video0");
- CameraReader camera_reader(&event_loop, &training_data.message(),
- &v4l2_reader, index_params, search_params);
-
- event_loop.Run();
-}
-
-} // namespace
} // namespace vision
} // namespace frc971
-
-int main(int argc, char **argv) {
- aos::InitGoogle(&argc, &argv);
- frc971::vision::CameraReaderMain();
-}
diff --git a/y2020/vision/camera_reader.h b/y2020/vision/camera_reader.h
new file mode 100644
index 0000000..c05ebea
--- /dev/null
+++ b/y2020/vision/camera_reader.h
@@ -0,0 +1,174 @@
+#ifndef Y2020_VISION_CAMERA_READER_H_
+#define Y2020_VISION_CAMERA_READER_H_
+
+#include <math.h>
+
+#include <opencv2/calib3d.hpp>
+#include <opencv2/features2d.hpp>
+#include <opencv2/imgproc.hpp>
+
+#include "aos/events/event_loop.h"
+#include "aos/flatbuffer_merge.h"
+#include "aos/network/team_number.h"
+#include "y2020/vision/sift/sift971.h"
+#include "y2020/vision/sift/sift_generated.h"
+#include "y2020/vision/sift/sift_training_generated.h"
+#include "y2020/vision/tools/python_code/sift_training_data.h"
+#include "y2020/vision/v4l2_reader.h"
+#include "y2020/vision/vision_generated.h"
+
+namespace frc971 {
+namespace vision {
+
+class CameraReader {
+ public:
+ CameraReader(aos::EventLoop *event_loop,
+ const sift::TrainingData *training_data, V4L2Reader *reader,
+ const cv::Ptr<cv::flann::IndexParams> &index_params,
+ const cv::Ptr<cv::flann::SearchParams> &search_params)
+ : event_loop_(event_loop),
+ training_data_(training_data),
+ camera_calibration_(FindCameraCalibration()),
+ reader_(reader),
+ image_sender_(event_loop->MakeSender<CameraImage>("/camera")),
+ result_sender_(
+ event_loop->MakeSender<sift::ImageMatchResult>("/camera")),
+ detailed_result_sender_(
+ event_loop->MakeSender<sift::ImageMatchResult>("/camera/detailed")),
+ read_image_timer_(event_loop->AddTimer([this]() { ReadImage(); })) {
+ for (int ii = 0; ii < number_training_images(); ++ii) {
+ matchers_.push_back(cv::FlannBasedMatcher(index_params, search_params));
+ prev_camera_field_R_vec_list_.push_back(cv::Mat::zeros(3, 1, CV_32F));
+ prev_camera_field_T_list_.push_back(cv::Mat::zeros(3, 1, CV_32F));
+ }
+ CopyTrainingFeatures();
+
+ for (auto &matcher : matchers_) {
+ matcher.train();
+ }
+
+ event_loop->OnRun(
+ [this]() { read_image_timer_->Setup(event_loop_->monotonic_now()); });
+ }
+
+ private:
+ const sift::CameraCalibration *FindCameraCalibration() const;
+
+ // Copies the information from training_data_ into matcher_.
+ void CopyTrainingFeatures();
+ // Processes an image (including sending the results).
+ void ProcessImage(const CameraImage &image);
+ // Reads an image, and then performs all of our processing on it.
+ void ReadImage();
+
+ flatbuffers::Offset<
+ flatbuffers::Vector<flatbuffers::Offset<sift::ImageMatch>>>
+ PackImageMatches(flatbuffers::FlatBufferBuilder *fbb,
+ const std::vector<std::vector<cv::DMatch>> &matches);
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<sift::Feature>>>
+ PackFeatures(flatbuffers::FlatBufferBuilder *fbb,
+ const std::vector<cv::KeyPoint> &keypoints,
+ const cv::Mat &descriptors);
+
+ void SendImageMatchResult(const CameraImage &image,
+ const std::vector<cv::KeyPoint> &keypoints,
+ const cv::Mat &descriptors,
+ const std::vector<std::vector<cv::DMatch>> &matches,
+ const std::vector<cv::Mat> &camera_target_list,
+ const std::vector<cv::Mat> &field_camera_list,
+ const std::vector<cv::Point2f> &target_point_vector,
+ const std::vector<float> &target_radius_vector,
+ const std::vector<int> &training_image_indices,
+ const std::vector<int> &homography_feature_counts,
+ aos::Sender<sift::ImageMatchResult> *result_sender,
+ bool send_details);
+
+ // Returns the 2D (image) location for the specified training feature.
+ cv::Point2f Training2dPoint(int training_image_index,
+ int feature_index) const {
+ const float x = training_data_->images()
+ ->Get(training_image_index)
+ ->features()
+ ->Get(feature_index)
+ ->x();
+ const float y = training_data_->images()
+ ->Get(training_image_index)
+ ->features()
+ ->Get(feature_index)
+ ->y();
+ return cv::Point2f(x, y);
+ }
+
+ // Returns the 3D location for the specified training feature.
+ cv::Point3f Training3dPoint(int training_image_index,
+ int feature_index) const {
+ const sift::KeypointFieldLocation *const location =
+ training_data_->images()
+ ->Get(training_image_index)
+ ->features()
+ ->Get(feature_index)
+ ->field_location();
+ return cv::Point3f(location->x(), location->y(), location->z());
+ }
+
+ const sift::TransformationMatrix *FieldToTarget(int training_image_index) {
+ return training_data_->images()
+ ->Get(training_image_index)
+ ->field_to_target();
+ }
+
+ void TargetLocation(int training_image_index, cv::Point2f &target_location,
+ float &target_radius) {
+ target_location.x =
+ training_data_->images()->Get(training_image_index)->target_point_x();
+ target_location.y =
+ training_data_->images()->Get(training_image_index)->target_point_y();
+ target_radius = training_data_->images()
+ ->Get(training_image_index)
+ ->target_point_radius();
+ }
+
+ int number_training_images() const {
+ return training_data_->images()->size();
+ }
+
+ cv::Mat CameraIntrinsics() const {
+ const cv::Mat result(3, 3, CV_32F,
+ const_cast<void *>(static_cast<const void *>(
+ camera_calibration_->intrinsics()->data())));
+ CHECK_EQ(result.total(), camera_calibration_->intrinsics()->size());
+ return result;
+ }
+
+ cv::Mat CameraDistCoeffs() const {
+ const cv::Mat result(5, 1, CV_32F,
+ const_cast<void *>(static_cast<const void *>(
+ camera_calibration_->dist_coeffs()->data())));
+ CHECK_EQ(result.total(), camera_calibration_->dist_coeffs()->size());
+ return result;
+ }
+
+ aos::EventLoop *const event_loop_;
+ const sift::TrainingData *const training_data_;
+ const sift::CameraCalibration *const camera_calibration_;
+ V4L2Reader *const reader_;
+ std::vector<cv::FlannBasedMatcher> matchers_;
+ aos::Sender<CameraImage> image_sender_;
+ aos::Sender<sift::ImageMatchResult> result_sender_;
+ aos::SendFailureCounter result_failure_counter_;
+ aos::Sender<sift::ImageMatchResult> detailed_result_sender_;
+ // We schedule this immediately to read an image. Having it on a timer means
+ // other things can run on the event loop in between.
+ aos::TimerHandler *const read_image_timer_;
+
+ // Storage for when we want to use the previous estimates of pose
+ std::vector<cv::Mat> prev_camera_field_R_vec_list_;
+ std::vector<cv::Mat> prev_camera_field_T_list_;
+
+ const std::unique_ptr<frc971::vision::SIFT971_Impl> sift_{
+ new frc971::vision::SIFT971_Impl()};
+};
+
+} // namespace vision
+} // namespace frc971
+#endif // Y2020_VISION_CAMERA_READER_H_
diff --git a/y2020/vision/camera_reader_main.cc b/y2020/vision/camera_reader_main.cc
new file mode 100644
index 0000000..c7fec43
--- /dev/null
+++ b/y2020/vision/camera_reader_main.cc
@@ -0,0 +1,52 @@
+#include "aos/events/shm_event_loop.h"
+#include "aos/init.h"
+#include "y2020/vision/camera_reader.h"
+
+// config used to allow running camera_reader independently. E.g.,
+// bazel run //y2020/vision:camera_reader -- --config y2020/config.json
+// --override_hostname pi-7971-1 --ignore_timestamps true
+DEFINE_string(config, "config.json", "Path to the config file to use.");
+namespace frc971 {
+namespace vision {
+namespace {
+
+void CameraReaderMain() {
+ aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+ aos::configuration::ReadConfig(FLAGS_config);
+
+ const aos::FlatbufferSpan<sift::TrainingData> training_data(
+ SiftTrainingData());
+ CHECK(training_data.Verify());
+
+ const auto index_params = cv::makePtr<cv::flann::IndexParams>();
+ index_params->setAlgorithm(cvflann::FLANN_INDEX_KDTREE);
+ index_params->setInt("trees", 5);
+ const auto search_params =
+ cv::makePtr<cv::flann::SearchParams>(/* checks */ 50);
+ cv::FlannBasedMatcher matcher(index_params, search_params);
+
+ aos::ShmEventLoop event_loop(&config.message());
+
+ // First, log the data for future reference.
+ {
+ aos::Sender<sift::TrainingData> training_data_sender =
+ event_loop.MakeSender<sift::TrainingData>("/camera");
+ CHECK_EQ(training_data_sender.Send(training_data),
+ aos::RawSender::Error::kOk);
+ }
+
+ V4L2Reader v4l2_reader(&event_loop, "/dev/video0");
+ CameraReader camera_reader(&event_loop, &training_data.message(),
+ &v4l2_reader, index_params, search_params);
+
+ event_loop.Run();
+}
+
+} // namespace
+} // namespace vision
+} // namespace frc971
+
+int main(int argc, char **argv) {
+ aos::InitGoogle(&argc, &argv);
+ frc971::vision::CameraReaderMain();
+}