Put blob detection results in a struct
Passing around 5 variables got really clunky.
Signed-off-by: Milind Upadhyay <milind.upadhyay@gmail.com>
Change-Id: I27b073635377c344aa02b01a094169ef90aff56e
diff --git a/y2022/vision/blob_detector.cc b/y2022/vision/blob_detector.cc
index aba3550..a732762 100644
--- a/y2022/vision/blob_detector.cc
+++ b/y2022/vision/blob_detector.cc
@@ -293,18 +293,16 @@
cv::circle(view_image, centroid, 3, cv::Scalar(255, 255, 0), cv::FILLED);
}
-void BlobDetector::ExtractBlobs(
- cv::Mat rgb_image, cv::Mat &binarized_image,
- std::vector<std::vector<cv::Point>> &filtered_blobs,
- std::vector<std::vector<cv::Point>> &unfiltered_blobs,
- std::vector<BlobStats> &blob_stats, cv::Point ¢roid) {
+void BlobDetector::ExtractBlobs(cv::Mat rgb_image,
+ BlobDetector::BlobResult *blob_result) {
auto start = aos::monotonic_clock::now();
- binarized_image = ThresholdImage(rgb_image);
- unfiltered_blobs = FindBlobs(binarized_image);
- blob_stats = ComputeStats(unfiltered_blobs);
- auto filtered_pair = FilterBlobs(unfiltered_blobs, blob_stats);
- filtered_blobs = filtered_pair.first;
- centroid = filtered_pair.second;
+ blob_result->binarized_image = ThresholdImage(rgb_image);
+ blob_result->unfiltered_blobs = FindBlobs(blob_result->binarized_image);
+ blob_result->blob_stats = ComputeStats(blob_result->unfiltered_blobs);
+ auto filtered_pair =
+ FilterBlobs(blob_result->unfiltered_blobs, blob_result->blob_stats);
+ blob_result->filtered_blobs = filtered_pair.first;
+ blob_result->centroid = filtered_pair.second;
auto end = aos::monotonic_clock::now();
LOG(INFO) << "Blob detection elapsed time: "
<< std::chrono::duration<double, std::milli>(end - start).count()
diff --git a/y2022/vision/blob_detector.h b/y2022/vision/blob_detector.h
index f8a4ab4..4077e2c 100644
--- a/y2022/vision/blob_detector.h
+++ b/y2022/vision/blob_detector.h
@@ -16,7 +16,15 @@
size_t num_points;
};
+ struct BlobResult {
+ cv::Mat binarized_image;
+ std::vector<std::vector<cv::Point>> filtered_blobs, unfiltered_blobs;
+ std::vector<BlobStats> blob_stats;
+ cv::Point centroid;
+ };
+
BlobDetector() {}
+
// Given an image, threshold it to find "green" pixels
// Input: Color image
// Output: Grayscale (binarized) image with green pixels set to 255
@@ -44,11 +52,7 @@
const std::vector<std::vector<cv::Point>> &unfiltered_blobs,
const std::vector<BlobStats> &blob_stats, cv::Point centroid);
- static void ExtractBlobs(
- cv::Mat rgb_image, cv::Mat &binarized_image,
- std::vector<std::vector<cv::Point>> &filtered_blobs,
- std::vector<std::vector<cv::Point>> &unfiltered_blobs,
- std::vector<BlobStats> &blob_stats, cv::Point ¢roid);
+ static void ExtractBlobs(cv::Mat rgb_image, BlobResult *blob_result);
};
} // namespace vision
} // namespace y2022
diff --git a/y2022/vision/camera_reader.cc b/y2022/vision/camera_reader.cc
index e1de75a..db87cb0 100644
--- a/y2022/vision/camera_reader.cc
+++ b/y2022/vision/camera_reader.cc
@@ -1,17 +1,16 @@
#include "y2022/vision/camera_reader.h"
-#include <cmath>
#include <chrono>
+#include <cmath>
#include <thread>
-#include <opencv2/imgproc.hpp>
-
#include "aos/events/event_loop.h"
#include "aos/events/shm_event_loop.h"
#include "aos/flatbuffer_merge.h"
#include "aos/network/team_number.h"
#include "frc971/vision/v4l2_reader.h"
#include "frc971/vision/vision_generated.h"
+#include "opencv2/imgproc.hpp"
#include "y2022/vision/blob_detector.h"
#include "y2022/vision/calibration_generated.h"
#include "y2022/vision/target_estimator.h"
@@ -82,25 +81,23 @@
} // namespace
void CameraReader::ProcessImage(cv::Mat image_mat) {
- // Remember, we're getting YUYV images, so we start by converting to RGB
-
- std::vector<std::vector<cv::Point>> filtered_blobs, unfiltered_blobs;
- std::vector<BlobDetector::BlobStats> blob_stats;
- cv::Mat binarized_image =
+ BlobDetector::BlobResult blob_result;
+ blob_result.binarized_image =
cv::Mat::zeros(cv::Size(image_mat.cols, image_mat.rows), CV_8UC1);
- cv::Point centroid;
- BlobDetector::ExtractBlobs(image_mat, binarized_image, filtered_blobs,
- unfiltered_blobs, blob_stats, centroid);
+ BlobDetector::ExtractBlobs(image_mat, &blob_result);
auto builder = target_estimate_sender_.MakeBuilder();
- flatbuffers::Offset<BlobResult> blob_result_offset;
+ flatbuffers::Offset<BlobResultFbs> blob_result_offset;
{
- const auto filtered_blobs_offset = CvBlobsToFbs(filtered_blobs, builder);
+ const auto filtered_blobs_offset =
+ CvBlobsToFbs(blob_result.filtered_blobs, builder);
const auto unfiltered_blobs_offset =
- CvBlobsToFbs(unfiltered_blobs, builder);
- const auto blob_stats_offset = BlobStatsToFbs(blob_stats, builder);
- const Point centroid_fbs = Point{centroid.x, centroid.y};
+ CvBlobsToFbs(blob_result.unfiltered_blobs, builder);
+ const auto blob_stats_offset =
+ BlobStatsToFbs(blob_result.blob_stats, builder);
+ const Point centroid_fbs =
+ Point{blob_result.centroid.x, blob_result.centroid.y};
- auto blob_result_builder = builder.MakeBuilder<BlobResult>();
+ auto blob_result_builder = builder.MakeBuilder<BlobResultFbs>();
blob_result_builder.add_filtered_blobs(filtered_blobs_offset);
blob_result_builder.add_unfiltered_blobs(unfiltered_blobs_offset);
blob_result_builder.add_blob_stats(blob_stats_offset);
@@ -109,9 +106,9 @@
}
auto target_estimate_builder = builder.MakeBuilder<TargetEstimate>();
- TargetEstimator::EstimateTargetLocation(centroid, CameraIntrinsics(),
- CameraExtrinsics(),
- &target_estimate_builder);
+ TargetEstimator::EstimateTargetLocation(
+ blob_result.centroid, CameraIntrinsics(), CameraExtrinsics(),
+ &target_estimate_builder);
target_estimate_builder.add_blob_result(blob_result_offset);
builder.CheckOk(builder.Send(target_estimate_builder.Finish()));
diff --git a/y2022/vision/target_estimate.fbs b/y2022/vision/target_estimate.fbs
index 207a37a..707014c 100644
--- a/y2022/vision/target_estimate.fbs
+++ b/y2022/vision/target_estimate.fbs
@@ -18,7 +18,7 @@
}
// Information for debugging blob detection
-table BlobResult {
+table BlobResultFbs {
// Blobs that passed the filtering step
filtered_blobs:[Blob] (id: 0);
// All detected blobs
@@ -37,7 +37,7 @@
// Positive means right of center, negative means left.
angle_to_target:double (id: 1);
- blob_result:BlobResult (id: 2);
+ blob_result:BlobResultFbs (id: 2);
// TODO(milind): add confidence
}