Actually call yolov5 inference
Signed-off-by: Filip Kujawa <filip.j.kujawa@gmail.com>
Change-Id: I693aa253d09e88f6000ee9ea5a8c68862ac47629
diff --git a/WORKSPACE b/WORKSPACE
index f411682..67c23a9 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -580,6 +580,13 @@
urls = ["https://www.frc971.org/Build-Dependencies/test_image_frc971.vision.CameraImage_2023.01.28.tar.gz"],
)
+http_file(
+ name = "game_pieces_edgetpu_model",
+ downloaded_file_path = "edgetpu_model.tflite",
+ sha256 = "af1cf86515d03690389845d015895aff734ab890141ca792813c1b5754900b4d",
+ urls = ["https://www.frc971.org/Build-Dependencies/models/2023/model_edgetpu_2023.04.09.tflite"],
+)
+
# Recompressed from libusb-1.0.21.7z.
http_file(
name = "libusb_1_0_windows",
@@ -1567,17 +1574,17 @@
http_archive(
name = "libedgetpu",
build_file = "//third_party:libedgetpu/libedgetpu.BUILD",
- sha256 = "d082df79a33309f58da697cce258acca96ceb12db40660fdbf7826289e4a037c",
+ sha256 = "c900faf2c9ea9599fda60c3d03ac43d0d7b34119659c9e35638b81cd14354b57",
strip_prefix = "libedgetpu-bazel",
- url = "https://www.frc971.org/Build-Dependencies/libedgetpu-1.0.tar.gz",
+ url = "https://www.frc971.org/Build-Dependencies/libedgetpu-ddfa7bde33c23afd8c2892182faa3e5b4e6ad94e.tar.gz",
)
http_archive(
name = "libtensorflowlite",
build_file = "//third_party:libtensorflowlite/libtensorflowlite.BUILD",
- sha256 = "0e3f8deac9c7cdf9aa5812ad6a87af318ed1cf08cb0c414aa494846b7fc15302",
+ sha256 = "a073dfddb3cb25113ba7eac6edb5569d0ae7988cad881d3f665e8ca0b8b85108",
strip_prefix = "tensorflow-bazel",
- url = "https://www.frc971.org/Build-Dependencies/tensorflow-2.8.0.tar.gz",
+ url = "https://www.frc971.org/Build-Dependencies/tensorflow-a4dfb8d1a71385bd6d122e4f27f86dcebb96712d.tar.gz",
)
http_archive(
diff --git a/third_party/libedgetpu/libedgetpu.BUILD b/third_party/libedgetpu/libedgetpu.BUILD
index 0289452..181c93a 100644
--- a/third_party/libedgetpu/libedgetpu.BUILD
+++ b/third_party/libedgetpu/libedgetpu.BUILD
@@ -1,11 +1,27 @@
cc_library(
- visibility = ["//visibility:public"],
name = "libedgetpu-k8",
- srcs = ["k8/libedgetpu.so.1.0"]
+ srcs = ["k8/libedgetpu.so.1.0"],
+ hdrs = glob(["include/**/*.h"]),
+ strip_include_prefix = "include",
+ visibility = ["//visibility:public"],
)
cc_library(
- visibility = ["//visibility:public"],
name = "libedgetpu-arm",
- srcs = ["arm/libedgetpu.so.1.0"]
-)
\ No newline at end of file
+ srcs = ["arm/libedgetpu.so.1.0"],
+ hdrs = glob(["include/**/*.h"]),
+ strip_include_prefix = "include",
+ visibility = ["//visibility:public"],
+)
+
+genrule(
+ name = "renamed_libedgetpu-arm",
+ srcs = [
+ "arm/libedgetpu.so.1.0",
+ ],
+ outs = [
+ "arm/libedgetpu.so.1",
+ ],
+ cmd = "cp $< $@",
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/libedgetpu/libedgetpu_build_script.sh b/third_party/libedgetpu/libedgetpu_build_script.sh
old mode 100644
new mode 100755
index 0eafccf..76e20db
--- a/third_party/libedgetpu/libedgetpu_build_script.sh
+++ b/third_party/libedgetpu/libedgetpu_build_script.sh
@@ -1,10 +1,18 @@
+#!/bin/bash
+#This script creates a compressed tarball file named libedgetpu-${GIT_VERSION}.tar.gz,
+# which contains the header files, libraries, and binaries needed to use Edge TPU on both arm and x86 architectures.
+# This script assumes you have Docker installed.
+#
# Clone the correct version of libedgetpu
git clone https://github.com/google-coral/libedgetpu.git
cd libedgetpu
+GIT_VERSION=ddfa7bde33c23afd8c2892182faa3e5b4e6ad94e
+git checkout ${GIT_VERSION}
# Build libedgetpu.so.1.0 for both arm and x86
DOCKER_CPUS="k8" DOCKER_IMAGE="ubuntu:18.04" DOCKER_TARGETS=libedgetpu make docker-build
DOCKER_CPUS="aarch64" DOCKER_IMAGE="debian:stretch" DOCKER_TARGETS=libedgetpu make docker-build
-# Create the directory for the tarball and move the resulting files into it
+# Create the directory for the tarball and move the resulting files into it
+rm -rf libedgetpu-bazel
mkdir libedgetpu-bazel
mkdir libedgetpu-bazel/arm
mkdir libedgetpu-bazel/k8
@@ -12,5 +20,7 @@
cp out/direct/k8/libedgetpu.so.1.0 libedgetpu-bazel/k8
# Copy header files to the include directory
-mkdir libedgetpu-bazel/include
-cp -r include/* libedgetpu-bazel/include/
+mkdir -p libedgetpu-bazel/include/tflite/
+rsync -zarv --include="*/" --include='*.h' --exclude='*' tflite/ libedgetpu-bazel/include/tflite/
+tar zcvf libedgetpu-${GIT_VERSION}.tar.gz libedgetpu-bazel
+
diff --git a/third_party/libtensorflowlite/libtensorflowlite.BUILD b/third_party/libtensorflowlite/libtensorflowlite.BUILD
index a6b837d..761ca76 100644
--- a/third_party/libtensorflowlite/libtensorflowlite.BUILD
+++ b/third_party/libtensorflowlite/libtensorflowlite.BUILD
@@ -1,16 +1,19 @@
cc_library(
- visibility = ["//visibility:public"],
name = "tensorflow-k8",
+ srcs = ["k8/libtensorflowlite.so"],
hdrs = glob(["include/**/*.h"]),
+ copts = ["-Wno-unused-parameter"],
strip_include_prefix = "include",
- srcs = ["k8/libtensorflowlite.so"]
+ visibility = ["//visibility:public"],
)
cc_library(
- visibility = ["//visibility:public"],
name = "tensorflow-arm",
+ srcs = ["arm/libtensorflowlite.so"],
hdrs = glob(["include/**/*.h"]),
+ copts = ["-Wno-unused-parameter"],
strip_include_prefix = "include",
- srcs = ["arm/libtensorflowlite.so"]
+ visibility = ["//visibility:public"],
)
+exports_files(["arm/libtensorflowlite.so"])
diff --git a/third_party/libtensorflowlite/tensorflow_build_script.sh b/third_party/libtensorflowlite/tensorflow_build_script.sh
old mode 100644
new mode 100755
index 6c44353..8bde976
--- a/third_party/libtensorflowlite/tensorflow_build_script.sh
+++ b/third_party/libtensorflowlite/tensorflow_build_script.sh
@@ -1,11 +1,18 @@
+#!/bin/bash
+# This script creates a compressed tarball file named tensorflow-${GIT_VERSION}.tar.gz,
+# which contains the header files, libraries, and binaries needed to use Tensorflow Lite on both arm and x86 architectures.
+# This script assumes you have bazelisk and necessary permissions.
+#
# Clone and checkout the correct version of Tensorflow
git clone https://github.com/tensorflow/tensorflow.git tensorflow_src
cd tensorflow_src
-git checkout v2.8.0
+GIT_VERSION=a4dfb8d1a71385bd6d122e4f27f86dcebb96712d
+git checkout $GIT_VERSION
# Build libtensorflowlite.so for both arm and x86
-bazel build --config=elinux_aarch64 -c opt //tensorflow/lite:libtensorflowlite.so
-bazel build --config=native_arch_linux -c opt //tensorflow/lite:libtensorflowlite.so
-# Create the directory for the tarball and move the resulting files into it
+bazelisk build --config=elinux_aarch64 -c opt //tensorflow/lite:libtensorflowlite.so
+bazelisk build --config=native_arch_linux -c opt //tensorflow/lite:libtensorflowlite.so
+# Create the directory for the tarball and move the resulting files into it
+rm -rf tensorflow-bazel
mkdir tensorflow-bazel
mkdir tensorflow-bazel/arm
mkdir tensorflow-bazel/k8
@@ -13,5 +20,8 @@
cp bazel-out/k8-opt/bin/tensorflow/lite/libtensorflowlite.so tensorflow-bazel/k8
# Copy header files to the include directory
- mkdir -p tensorflow-bazel/tensorflow/core/util
- rsync -zarv --include='*/' --include='*.h' --exclude='*' tensorflow/core/util tensorflow-bazel/tensorflow/core/util
\ No newline at end of file
+mkdir -p tensorflow-bazel/include/tensorflow/
+mkdir -p tensorflow-bazel/include/flatbuffers/
+rsync -zarv --include="*/" --include='*.h' --exclude='*' tensorflow/ tensorflow-bazel/include/tensorflow/
+rsync -zarv --include="*/" --include='*.h' --exclude='*' bazel-out/../../../external/flatbuffers/include/flatbuffers/ tensorflow-bazel/include/flatbuffers/
+tar zcvf tensorflow-${GIT_VERSION}.tar.gz tensorflow-bazel
diff --git a/y2023/BUILD b/y2023/BUILD
index 08713b7..f85f080 100644
--- a/y2023/BUILD
+++ b/y2023/BUILD
@@ -72,6 +72,9 @@
"//y2023/constants:constants.json",
"//y2023/vision:image_streamer_start",
"//y2023/www:www_files",
+ "@game_pieces_edgetpu_model//file",
+ "@libedgetpu//:arm/libedgetpu.so.1",
+ "@libtensorflowlite//:arm/libtensorflowlite.so",
],
dirs = [
"//y2023/www:www_files",
diff --git a/y2023/vision/BUILD b/y2023/vision/BUILD
index 0221aad..dfcf396 100644
--- a/y2023/vision/BUILD
+++ b/y2023/vision/BUILD
@@ -224,11 +224,13 @@
],
data = [
"//y2023:aos_config",
+ "@game_pieces_edgetpu_model//file",
],
target_compatible_with = ["@platforms//os:linux"],
visibility = ["//y2023:__subpackages__"],
deps = [
":game_pieces_fbs",
+ ":yolov5_lib",
"//aos/events:event_loop",
"//aos/events:shm_event_loop",
"//frc971/vision:vision_fbs",
@@ -273,6 +275,7 @@
name = "yolov5_lib",
srcs = ["yolov5.cc"],
hdrs = ["yolov5.h"],
+ copts = ["-Wno-unused-parameter"],
deps = [
"//third_party:opencv",
"@com_github_gflags_gflags//:gflags",
diff --git a/y2023/vision/game_pieces.cc b/y2023/vision/game_pieces.cc
index 0e2546f..68cc5b2 100644
--- a/y2023/vision/game_pieces.cc
+++ b/y2023/vision/game_pieces.cc
@@ -5,6 +5,8 @@
#include "aos/events/event_loop.h"
#include "aos/events/shm_event_loop.h"
#include "frc971/vision/vision_generated.h"
+#include "y2023/vision/yolov5.h"
+#include <chrono>
// The best_x and best_y are pixel (x, y) cordinates. The 'best'
// game piece is picked on proximity to the specified cordinates.
@@ -24,18 +26,34 @@
namespace vision {
GamePiecesDetector::GamePiecesDetector(aos::EventLoop *event_loop)
: game_pieces_sender_(event_loop->MakeSender<GamePieces>("/camera")) {
+ LOG(INFO) << "Before load model in constr";
+
+ model = MakeYOLOV5();
+ model->LoadModel("edgetpu_model.tflite");
+
+ LOG(INFO) << "After load model in constr";
event_loop->MakeWatcher("/camera", [this](const CameraImage &camera_image) {
this->ProcessImage(camera_image);
});
}
-// TODO(FILIP): Actually do inference.
-
void GamePiecesDetector::ProcessImage(const CameraImage &image) {
- // Param is not used for now.
- (void)image;
+ auto start = std::chrono::high_resolution_clock::now();
+ LOG(INFO) << reinterpret_cast<const void*>(image.data()->data());
+ cv::Mat image_color_mat(cv::Size(image.cols(), image.rows()), CV_8UC2,
+ (void *)image.data()->data());
+ std::vector<Detection> detections;
+ cv::Mat image_mat(cv::Size(image.cols(), image.rows()), CV_8UC3);
+ LOG(INFO) << reinterpret_cast<void*>(image_mat.ptr());
+ cv::cvtColor(image_color_mat, image_mat, cv::COLOR_YUV2BGR_YUYV);
+ LOG(INFO) << reinterpret_cast<void*>(image_mat.ptr());
- const int detection_count = 5;
+ detections = model->ProcessImage(image_mat);
+ LOG(INFO) << reinterpret_cast<void*>(image_mat.ptr());
+ LOG(INFO) << reinterpret_cast<void*>(image_color_mat.ptr());
+
+ auto stop = std::chrono::high_resolution_clock::now();
+ LOG(INFO) << "INFERENCE TIME " << std::chrono::duration_cast<std::chrono::milliseconds>(stop - start).count();
auto builder = game_pieces_sender_.MakeBuilder();
@@ -45,23 +63,30 @@
int best_distance_index = 0;
srand(time(0));
- for (int i = 0; i < detection_count; i++) {
- int h = rand() % 1000;
- int w = rand() % 1000;
- int x = rand() % 250;
- int y = rand() % 250;
-
+ for (size_t i = 0; i < detections.size(); i++) {
auto box_builder = builder.MakeBuilder<Box>();
- box_builder.add_h(h);
- box_builder.add_w(w);
- box_builder.add_x(x);
- box_builder.add_y(y);
+ box_builder.add_h(detections[i].box.height);
+ box_builder.add_w(detections[i].box.width);
+ box_builder.add_x(detections[i].box.x);
+ box_builder.add_y(detections[i].box.y);
auto box_offset = box_builder.Finish();
auto game_piece_builder = builder.MakeBuilder<GamePiece>();
- game_piece_builder.add_piece_class(y2023::vision::Class::CONE_DOWN);
+ switch (detections[i].class_id) {
+ case 0:
+ game_piece_builder.add_piece_class(Class::CONE_DOWN);
+ break;
+ case 1:
+ game_piece_builder.add_piece_class(Class::CONE_UP);
+ break;
+ case 2:
+ game_piece_builder.add_piece_class(Class::CUBE);
+ break;
+ default:
+ game_piece_builder.add_piece_class(Class::CONE_DOWN);
+ }
game_piece_builder.add_box(box_offset);
- game_piece_builder.add_confidence(0.9);
+ game_piece_builder.add_confidence(detections[i].confidence);
auto game_piece = game_piece_builder.Finish();
game_pieces_offsets.push_back(game_piece);
@@ -69,8 +94,8 @@
// Inference returns the top left corner of the bounding box
// but we want the center of the box for this.
- const int center_x = x + w / 2;
- const int center_y = y + h / 2;
+ const int center_x = detections[i].box.x + detections[i].box.width / 2;
+ const int center_y = detections[i].box.y + detections[i].box.height / 2;
// Find difference between target x, y and the x, y
// of the bounding box using Euclidean distance.
@@ -85,8 +110,7 @@
}
};
- flatbuffers::FlatBufferBuilder fbb;
- auto game_pieces_vector = fbb.CreateVector(game_pieces_offsets);
+ auto game_pieces_vector = builder.fbb()->CreateVector(game_pieces_offsets);
auto game_pieces_builder = builder.MakeBuilder<GamePieces>();
game_pieces_builder.add_game_pieces(game_pieces_vector);
@@ -96,4 +120,4 @@
}
} // namespace vision
-} // namespace y2023
\ No newline at end of file
+} // namespace y2023
diff --git a/y2023/vision/game_pieces.h b/y2023/vision/game_pieces.h
index a41d52a..a99a99b 100644
--- a/y2023/vision/game_pieces.h
+++ b/y2023/vision/game_pieces.h
@@ -5,6 +5,8 @@
#include "frc971/vision/vision_generated.h"
#include "y2023/vision/game_pieces_generated.h"
+#include "y2023/vision/yolov5.h"
+
namespace y2023 {
namespace vision {
@@ -20,6 +22,7 @@
private:
aos::Sender<GamePieces> game_pieces_sender_;
+ std::unique_ptr<YOLOV5> model;
};
} // namespace vision
} // namespace y2023
diff --git a/y2023/vision/yolov5.cc b/y2023/vision/yolov5.cc
index 7f5aa2a..17d4ad5 100644
--- a/y2023/vision/yolov5.cc
+++ b/y2023/vision/yolov5.cc
@@ -1,11 +1,13 @@
#include "yolov5.h"
+#include <tensorflow/lite/c/common.h>
#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/model.h>
+#include <tflite/public/edgetpu.h>
#include <tflite/public/edgetpu_c.h>
-#include <opencv2/core.hpp>
+#include <opencv2/dnn.hpp>
#include "gflags/gflags.h"
#include "glog/logging.h"
@@ -36,7 +38,7 @@
private:
// Convert an OpenCV Mat object to a tensor input
// that can be fed to the TensorFlow Lite model.
- void ConvertCVMatToTensor(const cv::Mat &src, uint8_t *in);
+ void ConvertCVMatToTensor(cv::Mat src, uint8_t *in);
// Resizes, converts color space, and converts
// image data type before inference.
@@ -75,54 +77,100 @@
static constexpr int kClassIdOffset = 5;
};
-std::unique_ptr<YOLOV5> MakeYOLOV5() {
- YOLOV5Impl *yolo = new YOLOV5Impl();
- return std::unique_ptr<YOLOV5>(yolo);
-}
+std::unique_ptr<YOLOV5> MakeYOLOV5() { return std::make_unique<YOLOV5Impl>(); }
void YOLOV5Impl::LoadModel(const std::string path) {
- model_ = tflite::FlatBufferModel::BuildFromFile(path.c_str());
+ LOG(INFO) << "Load model: start";
+
+
+ tflite::ops::builtin::BuiltinOpResolver resolver;
+
+ model_ = tflite::FlatBufferModel::VerifyAndBuildFromFile(path.c_str());
+
+ /*
+ auto model_impl = model_->GetModel();
+ model_impl->subgraphs();
+ LOG(INFO) << model_impl;
+ LOG(INFO) << model_impl->subgraphs();
+ auto subgraphs = model_impl->subgraphs();
+ LOG(INFO) << subgraphs->size();
+ LOG(INFO) << subgraphs->Get(0)->inputs()->size();
+ LOG(INFO) << subgraphs->Get(0)->inputs()->Get(0);
+ (void)subgraphs;
+ */
+
+ LOG(INFO) << "Load model: Build Model from file";
+
CHECK(model_);
+ CHECK(model_->initialized());
+ CHECK_EQ(tflite::InterpreterBuilder(*model_, resolver)(&interpreter_),
+ kTfLiteOk);
+ LOG(INFO) << "Load model: Interpreter builder done";
+ /*
+ LOG(INFO) << &interpreter_->primary_subgraph();
+ LOG(INFO) << interpreter_->subgraph(0);
+ LOG(INFO) << interpreter_->subgraphs_size();
+ LOG(INFO) << interpreter_->subgraph(0)->inputs().size();
+ LOG(INFO) << interpreter_->inputs().size();
+ */
+
+ //interpreter_->SetExternalContext(kTfLiteEdgeTpuContext, edgetpu_context.get());
+ // LOG(INFO) << "After set external context";
+
size_t num_devices;
std::unique_ptr<edgetpu_device, decltype(&edgetpu_free_devices)> devices(
edgetpu_list_devices(&num_devices), &edgetpu_free_devices);
- const auto &device = devices.get()[0];
- CHECK_EQ(num_devices, 1ul);
- tflite::ops::builtin::BuiltinOpResolver resolver;
- CHECK_EQ(tflite::InterpreterBuilder(*model_, resolver)(&interpreter_),
- kTfLiteOk);
- auto *delegate =
- edgetpu_create_delegate(device.type, device.path, nullptr, 0);
+ //const auto &available_tpus =
+ // edgetpu::EdgeTpuManager::GetSingleton()->EnumerateEdgeTpu();
+ //LOG(INFO) << "Available tpus: " << available_tpus.size();
+
+ LOG(INFO) << "Load model: Getting devices";
+ CHECK_EQ(num_devices, 1ul);
+ const auto &device = devices.get()[0];
+ (void )device;
+ LOG(INFO) << "Load model: Got Device";
+
+ auto *delegate = edgetpu_create_delegate(device.type, device.path, nullptr, 0);
+
interpreter_->ModifyGraphWithDelegate(delegate);
- TfLiteStatus status = interpreter_->AllocateTensors();
- CHECK(status == kTfLiteOk);
+ TfLiteStatus status = interpreter_->AllocateTensors();
+ CHECK_EQ(status, kTfLiteOk);
+ CHECK(interpreter_);
+
+ LOG(INFO) << "Load model: Allocate tensors success";
input_ = interpreter_->inputs()[0];
+ LOG(INFO) << "After set inputs";
+ LOG(INFO) << input_;
TfLiteIntArray *dims = interpreter_->tensor(input_)->dims;
in_height_ = dims->data[1];
in_width_ = dims->data[2];
in_channels_ = dims->data[3];
in_type_ = interpreter_->tensor(input_)->type;
input_8_ = interpreter_->typed_tensor<uint8_t>(input_);
+
interpreter_->SetNumThreads(FLAGS_nthreads);
+
+ LOG(INFO) << "End of load";
}
-void YOLOV5Impl::Preprocess(cv::Mat image) {
- cv::resize(image, image, cv::Size(in_height_, in_width_), cv::INTER_CUBIC);
- cv::cvtColor(image, image, cv::COLOR_BGR2RGB);
- image.convertTo(image, CV_8U);
-}
-
-void YOLOV5Impl::ConvertCVMatToTensor(const cv::Mat &src, uint8_t *in) {
+void YOLOV5Impl::ConvertCVMatToTensor(cv::Mat src, uint8_t *in) {
CHECK(src.type() == CV_8UC3);
int n = 0, nc = src.channels(), ne = src.elemSize();
- for (int y = 0; y < src.rows; ++y)
- for (int x = 0; x < src.cols; ++x)
- for (int c = 0; c < nc; ++c)
+ LOG(INFO) << "ConvertCVMatToTensor - Rows " << src.rows;
+ LOG(INFO) << "ConvertCVMatToTensor - Cols " << src.cols;
+ for (int y = 0; y < src.rows; ++y) {
+ for (int x = 0; x < src.cols; ++x) {
+ for (int c = 0; c < nc; ++c) {
+ (void)ne;
+ (void)n;
in[n++] = src.data[y * src.step + x * ne + c];
+ }
+ }
+ }
}
std::vector<std::vector<float>> YOLOV5Impl::TensorToVector2D(
@@ -182,36 +230,55 @@
confidences.push_back(d.confidence);
}
- cv::dnn::NMSBoxes(boxes, confidences, FLAGS_conf_threshold,
- FLAGS_nms_threshold, *indices);
+ (void)indices;
+ // TODO(FILIP): Fix linker error.
+ // cv::dnn::NMSBoxes(boxes, confidences, FLAGS_conf_threshold,
+ // FLAGS_nms_threshold, *indices);
}
std::vector<Detection> YOLOV5Impl::ProcessImage(cv::Mat frame) {
img_height_ = frame.rows;
img_width_ = frame.cols;
- Preprocess(frame);
+ //Preprocess;
+ cv::resize(frame, frame, cv::Size(in_height_, in_width_), cv::INTER_CUBIC);
+ cv::cvtColor(frame, frame, cv::COLOR_BGR2RGB);
+ frame.convertTo(frame, CV_8U);
+
+ LOG(INFO) << "After preprocess - Before convert to tensor";
ConvertCVMatToTensor(frame, input_8_);
// Inference
+ LOG(INFO) << "Before Invoke";
TfLiteStatus status = interpreter_->Invoke();
CHECK_EQ(status, kTfLiteOk);
+ LOG(INFO) << "After invoke, status checked";
+
int output_tensor_index = interpreter_->outputs()[0];
TfLiteIntArray *out_dims = interpreter_->tensor(output_tensor_index)->dims;
int num_rows = out_dims->data[1];
int num_columns = out_dims->data[2];
TfLiteTensor *src_tensor = interpreter_->tensor(interpreter_->outputs()[0]);
+
std::vector<std::vector<float>> orig_preds =
TensorToVector2D(src_tensor, num_rows, num_columns);
+ LOG(INFO) << "After tensor to vector 2D";
std::vector<int> indices;
std::vector<Detection> detections;
NonMaximumSupression(orig_preds, num_rows, num_columns, &detections,
&indices);
-
+ LOG(INFO) << "After NMS";
+ for (size_t i = 0; i < interpreter_->outputs().size(); i++) {
+ LOG(INFO) << "Detection #" << i << " | " << interpreter_->outputs()[i];
+ }
+ if (detections.size() > 0) {
+ LOG(INFO) << "Detection ID: " << detections[0].class_id;
+ LOG(INFO) << "Confidence" << detections[0].confidence;
+ }
return detections;
};
diff --git a/y2023/vision/yolov5.h b/y2023/vision/yolov5.h
index ad04350..7e2a521 100644
--- a/y2023/vision/yolov5.h
+++ b/y2023/vision/yolov5.h
@@ -7,7 +7,6 @@
#include <fstream>
#include <iostream>
#include <opencv2/core.hpp>
-#include <opencv2/dnn.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
@@ -24,14 +23,14 @@
class YOLOV5 {
public:
- virtual ~YOLOV5();
+ virtual ~YOLOV5() {}
// Takes a model path as string and loads a pre-trained
// YOLOv5 model from the specified path.
- virtual void LoadModel(const std::string path);
+ virtual void LoadModel(const std::string path) = 0;
// Takes an image and returns a Detection.
- virtual std::vector<Detection> ProcessImage(cv::Mat image);
+ virtual std::vector<Detection> ProcessImage(cv::Mat image) = 0;
};
std::unique_ptr<YOLOV5> MakeYOLOV5();
diff --git a/y2023/y2023_logger.json b/y2023/y2023_logger.json
index df3a55b..829fa12 100644
--- a/y2023/y2023_logger.json
+++ b/y2023/y2023_logger.json
@@ -450,6 +450,7 @@
{
"name": "game_piece_detector",
"executable_name": "game_piece_detector",
+ "user": "pi",
"nodes": [
"logger"
]