Checking for target_estimated and image timestamps alignment

compares the target_timestamp and the image_timestamp

Change-Id: Ifefcb00202fb13f8157b9b2506a940a165fbad69
Signed-off-by: Henry Speiser <henry@speiser.net>
diff --git a/y2022/vision/camera_reader.cc b/y2022/vision/camera_reader.cc
index b8e666e..5b808a2 100644
--- a/y2022/vision/camera_reader.cc
+++ b/y2022/vision/camera_reader.cc
@@ -80,10 +80,9 @@
 }
 }  // namespace
 
-void CameraReader::ProcessImage(cv::Mat image_mat) {
+void CameraReader::ProcessImage(cv::Mat image_mat,
+                                int64_t image_monotonic_timestamp_ns) {
   BlobDetector::BlobResult blob_result;
-  blob_result.binarized_image =
-      cv::Mat::zeros(cv::Size(image_mat.cols, image_mat.rows), CV_8UC1);
   BlobDetector::ExtractBlobs(image_mat, &blob_result);
   auto builder = target_estimate_sender_.MakeBuilder();
   flatbuffers::Offset<BlobResultFbs> blob_result_offset;
@@ -114,7 +113,8 @@
       &target_estimate_builder);
   target_estimate_builder.add_blob_result(blob_result_offset);
   target_estimate_builder.add_camera_calibration(camera_calibration_offset);
-
+  target_estimate_builder.add_image_monotonic_timestamp_ns(
+      image_monotonic_timestamp_ns);
   builder.CheckOk(builder.Send(target_estimate_builder.Finish()));
 }
 
@@ -129,7 +129,10 @@
       std::this_thread::sleep_for(std::chrono::milliseconds(50));
       LOG(INFO) << "Reading file " << file;
       cv::Mat bgr_image = cv::imread(file.c_str());
-      ProcessImage(bgr_image);
+      // TODO (Henry) convert to YUYV
+      int64_t timestamp =
+          aos::monotonic_clock::now().time_since_epoch().count();
+      ProcessImage(bgr_image, timestamp);
     }
     event_loop_->Exit();
     return;
@@ -142,16 +145,13 @@
   }
 
   const CameraImage &image = reader_->LatestImage();
-  cv::Mat image_mat(image.rows(), image.cols(), CV_8U);
-  CHECK(image_mat.isContinuous());
 
-  const int number_pixels = image.rows() * image.cols();
-  for (int i = 0; i < number_pixels; ++i) {
-    reinterpret_cast<uint8_t *>(image_mat.data)[i] =
-        image.data()->data()[i * 2];
-  }
+  cv::Mat image_color_mat(cv::Size(image.cols(), image.rows()), CV_8UC2,
+                          (void *)image.data()->data());
+  cv::Mat image_mat(cv::Size(image.cols(), image.rows()), CV_8UC3);
+  cv::cvtColor(image_color_mat, image_mat, cv::COLOR_YUV2BGR_YUYV);
 
-  ProcessImage(image_mat);
+  ProcessImage(image_mat, image.monotonic_timestamp_ns());
 
   reader_->SendLatestImage();
   read_image_timer_->Setup(event_loop_->monotonic_now());
diff --git a/y2022/vision/camera_reader.h b/y2022/vision/camera_reader.h
index d3dd202..949247f 100644
--- a/y2022/vision/camera_reader.h
+++ b/y2022/vision/camera_reader.h
@@ -58,7 +58,7 @@
   const calibration::CameraCalibration *FindCameraCalibration() const;
 
   // Processes an image (including sending the results).
-  void ProcessImage(cv::Mat image);
+  void ProcessImage(cv::Mat image, int64_t image_monotonic_timestamp_ns);
 
   // Reads an image, and then performs all of our processing on it.
   void ReadImage();
diff --git a/y2022/vision/target_estimate.fbs b/y2022/vision/target_estimate.fbs
index 2621634..7dbc69f 100644
--- a/y2022/vision/target_estimate.fbs
+++ b/y2022/vision/target_estimate.fbs
@@ -41,8 +41,14 @@
 
   blob_result:BlobResultFbs (id: 2);
 
+  // Contains the duration between the epoch and the nearest point
+  // in time from when it was called.
+  image_monotonic_timestamp_ns:int64 (id: 3);
+
   // Information about the camera which took this image.
-  camera_calibration:frc971.vision.calibration.CameraCalibration (id: 3);
+  camera_calibration:frc971.vision.calibration.CameraCalibration (id: 4);
+
+  // TODO(milind): add confidence
 }
 
 root_type TargetEstimate;
diff --git a/y2022/vision/viewer.cc b/y2022/vision/viewer.cc
index f655f91..ef503e2 100644
--- a/y2022/vision/viewer.cc
+++ b/y2022/vision/viewer.cc
@@ -23,6 +23,9 @@
 namespace vision {
 namespace {
 
+using namespace frc971::vision;
+
+std::map<int64_t, BlobDetector::BlobResult> target_est_map;
 aos::Fetcher<frc971::vision::CameraImage> image_fetcher;
 aos::Fetcher<y2022::vision::TargetEstimate> target_estimate_fetcher;
 
@@ -53,26 +56,38 @@
 }
 
 bool DisplayLoop() {
+  int64_t target_timestamp = 0;
+  if (target_estimate_fetcher.Fetch()) {
+    const TargetEstimate *target_est = target_estimate_fetcher.get();
+    CHECK(target_est != nullptr)
+        << "Got null when trying to fetch target estimate";
+
+    target_timestamp = target_est->image_monotonic_timestamp_ns();
+    if (target_est->blob_result()->filtered_blobs()->size() > 0) {
+      VLOG(2) << "Got blobs for timestamp " << target_est << "\n";
+    }
+    // Store the TargetEstimate data so we can match timestamp with image
+    target_est_map[target_timestamp] = BlobDetector::BlobResult(
+        {cv::Mat(), FbsToCvBlobs(*target_est->blob_result()->filtered_blobs()),
+         FbsToCvBlobs(*target_est->blob_result()->unfiltered_blobs()),
+         FbsToBlobStats(*target_est->blob_result()->blob_stats()),
+         cv::Point{target_est->blob_result()->centroid()->x(),
+                   target_est->blob_result()->centroid()->y()}});
+    // Only keep last 10 matches
+    while (target_est_map.size() > 10u) {
+      target_est_map.erase(target_est_map.begin());
+    }
+  }
   int64_t image_timestamp = 0;
-  const frc971::vision::CameraImage *image;
-  // Read next image
   if (!image_fetcher.Fetch()) {
-    LOG(INFO) << "Couldn't fetch image";
+    VLOG(2) << "Couldn't fetch image";
     return true;
   }
-
-  image = image_fetcher.get();
+  const CameraImage *image = image_fetcher.get();
   CHECK(image != nullptr) << "Couldn't read image";
   image_timestamp = image->monotonic_timestamp_ns();
   VLOG(2) << "Got image at timestamp: " << image_timestamp;
 
-  // TODO(Milind) Store the target estimates and match them by timestamp to make
-  // sure we're getting the right one.
-  const TargetEstimate *target_est = nullptr;
-  if (target_estimate_fetcher.Fetch()) {
-    target_est = target_estimate_fetcher.get();
-  }
-
   // Create color image:
   cv::Mat image_color_mat(cv::Size(image->cols(), image->rows()), CV_8UC2,
                           (void *)image->data()->data());
@@ -84,19 +99,19 @@
     return false;
   }
 
-  LOG(INFO) << image->monotonic_timestamp_ns() << ": # unfiltered blobs: "
-            << target_est->blob_result()->unfiltered_blobs()->size()
-            << "; # filtered blobs: "
-            << target_est->blob_result()->filtered_blobs()->size();
+  auto target_est_it = target_est_map.find(image_timestamp);
+  if (target_est_it != target_est_map.end()) {
+    LOG(INFO) << image->monotonic_timestamp_ns() << ": # unfiltered blobs: "
+              << target_est_it->second.unfiltered_blobs.size()
+              << "; # filtered blobs: "
+              << target_est_it->second.filtered_blobs.size();
 
-  cv::Mat ret_image(cv::Size(image->cols(), image->rows()), CV_8UC3);
-  if (target_est != nullptr) {
-    BlobDetector::DrawBlobs(
-        ret_image, FbsToCvBlobs(*target_est->blob_result()->filtered_blobs()),
-        FbsToCvBlobs(*target_est->blob_result()->unfiltered_blobs()),
-        FbsToBlobStats(*target_est->blob_result()->blob_stats()),
-        cv::Point{target_est->blob_result()->centroid()->x(),
-                  target_est->blob_result()->centroid()->y()});
+    cv::Mat ret_image =
+        cv::Mat::zeros(cv::Size(image->cols(), image->rows()), CV_8UC3);
+    BlobDetector::DrawBlobs(ret_image, target_est_it->second.filtered_blobs,
+                            target_est_it->second.unfiltered_blobs,
+                            target_est_it->second.blob_stats,
+                            target_est_it->second.centroid);
     cv::imshow("blobs", ret_image);
   }
 
@@ -139,8 +154,6 @@
       ::std::chrono::milliseconds(100));
 
   event_loop.Run();
-
-  image_fetcher = aos::Fetcher<frc971::vision::CameraImage>();
 }
 }  // namespace
 }  // namespace vision