Adding in a stripped down camera_reader

This camera_reader will serve as routing point of images getting processed
on the pi's

Change-Id: I8e7472d44d6cfec1e8453be55f03234476f106a4
Signed-off-by: Jim Ostrowski <yimmy13@gmail.com>
diff --git a/y2022/vision/BUILD b/y2022/vision/BUILD
index c461365..d65814a 100644
--- a/y2022/vision/BUILD
+++ b/y2022/vision/BUILD
@@ -6,9 +6,35 @@
     target_compatible_with = ["@platforms//os:linux"],
     visibility = ["//y2022:__subpackages__"],
     deps = [
+        ":camera_reader_lib",
         "//aos:init",
         "//aos/events:shm_event_loop",
-        "//y2020/vision:camera_reader_lib",
+    ],
+)
+
+cc_library(
+    name = "camera_reader_lib",
+    srcs = [
+        "camera_reader.cc",
+    ],
+    hdrs = [
+        "camera_reader.h",
+    ],
+    data = [
+        "//y2022:config",
+    ],
+    target_compatible_with = ["@platforms//os:linux"],
+    visibility = ["//y2022:__subpackages__"],
+    deps = [
+        "//aos:flatbuffer_merge",
+        "//aos/events:event_loop",
+        "//aos/network:team_number",
+        "//frc971/vision:v4l2_reader",
+        "//frc971/vision:vision_fbs",
+        "//third_party:opencv",
+        "//y2020/vision/sift:sift_fbs",
+        "//y2020/vision/sift:sift_training_fbs",
+        "//y2020/vision/tools/python_code:sift_training_data",
     ],
 )
 
diff --git a/y2022/vision/camera_reader.cc b/y2022/vision/camera_reader.cc
new file mode 100644
index 0000000..4fa6651
--- /dev/null
+++ b/y2022/vision/camera_reader.cc
@@ -0,0 +1,69 @@
+#include "y2022/vision/camera_reader.h"
+
+#include <math.h>
+
+#include <opencv2/imgproc.hpp>
+
+#include "aos/events/event_loop.h"
+#include "aos/flatbuffer_merge.h"
+#include "aos/network/team_number.h"
+#include "frc971/vision/v4l2_reader.h"
+#include "frc971/vision/vision_generated.h"
+#include "y2020/vision/sift/sift_generated.h"
+#include "y2020/vision/sift/sift_training_generated.h"
+#include "y2020/vision/tools/python_code/sift_training_data.h"
+
+namespace y2022 {
+namespace vision {
+
+using namespace frc971::vision;
+
+const sift::CameraCalibration *CameraReader::FindCameraCalibration() const {
+  const std::string_view node_name = event_loop_->node()->name()->string_view();
+  const int team_number = aos::network::GetTeamNumber();
+  for (const sift::CameraCalibration *candidate :
+       *training_data_->camera_calibrations()) {
+    if (candidate->node_name()->string_view() != node_name) {
+      continue;
+    }
+    if (candidate->team_number() != team_number) {
+      continue;
+    }
+    return candidate;
+  }
+  LOG(FATAL) << ": Failed to find camera calibration for " << node_name
+             << " on " << team_number;
+}
+
+void CameraReader::ProcessImage(const CameraImage &image) {
+  // Remember, we're getting YUYV images, so we start by converting to RGB
+
+  // TOOD: Need to code this up for blob detection
+  cv::Mat image_mat(image.rows(), image.cols(), CV_8U);
+  CHECK(image_mat.isContinuous());
+  const int number_pixels = image.rows() * image.cols();
+  for (int i = 0; i < number_pixels; ++i) {
+    reinterpret_cast<uint8_t *>(image_mat.data)[i] =
+        image.data()->data()[i * 2];
+  }
+
+  // Now, send our two messages-- one large, with details for remote
+  // debugging(features), and one smaller
+  // TODO: Send blobdetection and pose results
+}
+
+void CameraReader::ReadImage() {
+  if (!reader_->ReadLatestImage()) {
+    read_image_timer_->Setup(event_loop_->monotonic_now() +
+                             std::chrono::milliseconds(10));
+    return;
+  }
+
+  ProcessImage(reader_->LatestImage());
+
+  reader_->SendLatestImage();
+  read_image_timer_->Setup(event_loop_->monotonic_now());
+}
+
+}  // namespace vision
+}  // namespace y2022
diff --git a/y2022/vision/camera_reader.h b/y2022/vision/camera_reader.h
new file mode 100644
index 0000000..c302b15
--- /dev/null
+++ b/y2022/vision/camera_reader.h
@@ -0,0 +1,78 @@
+#ifndef Y2022_VISION_CAMERA_READER_H_
+#define Y2022_VISION_CAMERA_READER_H_
+
+#include <math.h>
+
+#include <opencv2/calib3d.hpp>
+#include <opencv2/features2d.hpp>
+#include <opencv2/imgproc.hpp>
+
+#include "aos/events/event_loop.h"
+#include "aos/flatbuffer_merge.h"
+#include "aos/network/team_number.h"
+#include "frc971/vision/v4l2_reader.h"
+#include "frc971/vision/vision_generated.h"
+#include "y2020/vision/sift/sift_generated.h"
+#include "y2020/vision/sift/sift_training_generated.h"
+#include "y2020/vision/tools/python_code/sift_training_data.h"
+
+namespace y2022 {
+namespace vision {
+
+using namespace frc971::vision;
+
+// TODO<Jim/Milind>: Need to add in senders to send out the blob data/stats
+
+class CameraReader {
+ public:
+  CameraReader(aos::EventLoop *event_loop,
+               const sift::TrainingData *training_data, V4L2Reader *reader)
+      : event_loop_(event_loop),
+        training_data_(training_data),
+        camera_calibration_(FindCameraCalibration()),
+        reader_(reader),
+        image_sender_(event_loop->MakeSender<CameraImage>("/camera")),
+        read_image_timer_(event_loop->AddTimer([this]() { ReadImage(); })) {
+    event_loop->OnRun(
+        [this]() { read_image_timer_->Setup(event_loop_->monotonic_now()); });
+  }
+
+ private:
+  const sift::CameraCalibration *FindCameraCalibration() const;
+
+  // Processes an image (including sending the results).
+  void ProcessImage(const CameraImage &image);
+
+  // Reads an image, and then performs all of our processing on it.
+  void ReadImage();
+
+  cv::Mat CameraIntrinsics() const {
+    const cv::Mat result(3, 3, CV_32F,
+                         const_cast<void *>(static_cast<const void *>(
+                             camera_calibration_->intrinsics()->data())));
+    CHECK_EQ(result.total(), camera_calibration_->intrinsics()->size());
+    return result;
+  }
+
+  cv::Mat CameraDistCoeffs() const {
+    const cv::Mat result(5, 1, CV_32F,
+                         const_cast<void *>(static_cast<const void *>(
+                             camera_calibration_->dist_coeffs()->data())));
+    CHECK_EQ(result.total(), camera_calibration_->dist_coeffs()->size());
+    return result;
+  }
+
+  aos::EventLoop *const event_loop_;
+  const sift::TrainingData *const training_data_;
+  const sift::CameraCalibration *const camera_calibration_;
+  V4L2Reader *const reader_;
+  aos::Sender<CameraImage> image_sender_;
+
+  // We schedule this immediately to read an image. Having it on a timer means
+  // other things can run on the event loop in between.
+  aos::TimerHandler *const read_image_timer_;
+};
+
+}  // namespace vision
+}  // namespace y2022
+#endif  // Y2022_VISION_CAMERA_READER_H_
diff --git a/y2022/vision/camera_reader_main.cc b/y2022/vision/camera_reader_main.cc
index ee65cfe..6f65a6d 100644
--- a/y2022/vision/camera_reader_main.cc
+++ b/y2022/vision/camera_reader_main.cc
@@ -1,6 +1,6 @@
 #include "aos/events/shm_event_loop.h"
 #include "aos/init.h"
-#include "y2020/vision/camera_reader.h"
+#include "y2022/vision/camera_reader.h"
 
 // config used to allow running camera_reader independently.  E.g.,
 // bazel run //y2022/vision:camera_reader -- --config y2022/config.json
@@ -22,13 +22,6 @@
       SiftTrainingData());
   CHECK(training_data.Verify());
 
-  const auto index_params = cv::makePtr<cv::flann::IndexParams>();
-  index_params->setAlgorithm(cvflann::FLANN_INDEX_KDTREE);
-  index_params->setInt("trees", 5);
-  const auto search_params =
-      cv::makePtr<cv::flann::SearchParams>(/* checks */ 50);
-  cv::FlannBasedMatcher matcher(index_params, search_params);
-
   aos::ShmEventLoop event_loop(&config.message());
 
   // First, log the data for future reference.
@@ -40,11 +33,11 @@
   }
 
   V4L2Reader v4l2_reader(&event_loop, "/dev/video0");
-  CameraReader camera_reader(&event_loop, &training_data.message(),
-                             &v4l2_reader, index_params, search_params);
-
   v4l2_reader.SetExposure(FLAGS_exposure);
 
+  CameraReader camera_reader(&event_loop, &training_data.message(),
+                             &v4l2_reader);
+
   event_loop.Run();
 }