Merge "Use the new 64 bit root filesystem for the pi's"
diff --git a/aos/events/logging/BUILD b/aos/events/logging/BUILD
index d190120..ec13f92 100644
--- a/aos/events/logging/BUILD
+++ b/aos/events/logging/BUILD
@@ -134,10 +134,10 @@
visibility = ["//visibility:public"],
deps = [
":buffer_encoder",
- ":crc32",
":logger_fbs",
"//aos:configuration_fbs",
"//aos/containers:resizeable_buffer",
+ "//aos/util:crc32",
"@com_github_google_flatbuffers//:flatbuffers",
"@com_github_google_glog//:glog",
"@com_google_absl//absl/types:span",
@@ -465,12 +465,3 @@
gen_reflections = 1,
target_compatible_with = ["@platforms//os:linux"],
)
-
-cc_library(
- name = "crc32",
- srcs = ["crc32.cc"],
- hdrs = ["crc32.h"],
- deps = [
- "@com_google_absl//absl/types:span",
- ],
-)
diff --git a/aos/events/logging/snappy_encoder.cc b/aos/events/logging/snappy_encoder.cc
index 4621273..8f46cf3 100644
--- a/aos/events/logging/snappy_encoder.cc
+++ b/aos/events/logging/snappy_encoder.cc
@@ -1,6 +1,6 @@
#include "aos/events/logging/snappy_encoder.h"
-#include "aos/events/logging/crc32.h"
+#include "aos/util/crc32.h"
#include "external/snappy/snappy.h"
namespace aos::logger {
diff --git a/aos/network/log_web_proxy_main.cc b/aos/network/log_web_proxy_main.cc
index 5c099a2..945d034 100644
--- a/aos/network/log_web_proxy_main.cc
+++ b/aos/network/log_web_proxy_main.cc
@@ -42,7 +42,8 @@
event_loop->SkipTimingReport();
- aos::web_proxy::WebProxy web_proxy(event_loop.get(), FLAGS_buffer_size);
+ aos::web_proxy::WebProxy web_proxy(
+ event_loop.get(), aos::web_proxy::StoreHistory::kYes, FLAGS_buffer_size);
web_proxy.SetDataPath(FLAGS_data_dir.c_str());
diff --git a/aos/network/web_proxy.cc b/aos/network/web_proxy.cc
index fdd8f1e..6d4f23f 100644
--- a/aos/network/web_proxy.cc
+++ b/aos/network/web_proxy.cc
@@ -28,7 +28,9 @@
namespace aos {
namespace web_proxy {
WebsocketHandler::WebsocketHandler(::seasocks::Server *server,
- aos::EventLoop *event_loop, int buffer_size)
+ aos::EventLoop *event_loop,
+ StoreHistory store_history,
+ int per_channel_buffer_size_bytes)
: server_(server),
config_(aos::CopyFlatBuffer(event_loop->configuration())),
event_loop_(event_loop) {
@@ -47,7 +49,10 @@
if (aos::configuration::ChannelIsReadableOnNode(channel, self)) {
auto fetcher = event_loop_->MakeRawFetcher(channel);
subscribers_.emplace_back(std::make_unique<aos::web_proxy::Subscriber>(
- std::move(fetcher), i, buffer_size));
+ std::move(fetcher), i, store_history,
+ per_channel_buffer_size_bytes < 0
+ ? -1
+ : per_channel_buffer_size_bytes / channel->max_size()));
} else {
subscribers_.emplace_back(nullptr);
}
@@ -126,19 +131,24 @@
global_epoll->DeleteFd(fd);
}
-WebProxy::WebProxy(aos::EventLoop *event_loop, int buffer_size)
- : WebProxy(event_loop, &internal_epoll_, buffer_size) {}
+WebProxy::WebProxy(aos::EventLoop *event_loop, StoreHistory store_history,
+ int per_channel_buffer_size_bytes)
+ : WebProxy(event_loop, &internal_epoll_, store_history,
+ per_channel_buffer_size_bytes) {}
-WebProxy::WebProxy(aos::ShmEventLoop *event_loop, int buffer_size)
- : WebProxy(event_loop, event_loop->epoll(), buffer_size) {}
+WebProxy::WebProxy(aos::ShmEventLoop *event_loop, StoreHistory store_history,
+ int per_channel_buffer_size_bytes)
+ : WebProxy(event_loop, event_loop->epoll(), store_history,
+ per_channel_buffer_size_bytes) {}
WebProxy::WebProxy(aos::EventLoop *event_loop, aos::internal::EPoll *epoll,
- int buffer_size)
+ StoreHistory store_history,
+ int per_channel_buffer_size_bytes)
: epoll_(epoll),
server_(std::make_shared<aos::seasocks::SeasocksLogger>(
::seasocks::Logger::Level::Info)),
- websocket_handler_(
- new WebsocketHandler(&server_, event_loop, buffer_size)) {
+ websocket_handler_(new WebsocketHandler(
+ &server_, event_loop, store_history, per_channel_buffer_size_bytes)) {
CHECK(!global_epoll);
global_epoll = epoll;
@@ -192,10 +202,13 @@
}
void Subscriber::RunIteration() {
- if (channels_.empty() && buffer_size_ == 0) {
+ if (channels_.empty() && (buffer_size_ == 0 || !store_history_)) {
+ fetcher_->Fetch();
+ message_buffer_.clear();
return;
}
+
while (fetcher_->FetchNext()) {
// If we aren't building up a buffer, short-circuit the FetchNext().
if (buffer_size_ == 0) {
@@ -271,12 +284,6 @@
ChannelInformation info;
info.transfer_method = transfer_method;
- // If we aren't keeping a buffer and there are no existing listeners, call
- // Fetch() to avoid falling behind on future calls to FetchNext().
- if (channels_.empty() && buffer_size_ == 0) {
- fetcher_->Fetch();
- }
-
channels_.emplace_back(std::make_pair(data_channel, info));
data_channel->set_on_message(
diff --git a/aos/network/web_proxy.fbs b/aos/network/web_proxy.fbs
index 6c85acb..f1d6645 100644
--- a/aos/network/web_proxy.fbs
+++ b/aos/network/web_proxy.fbs
@@ -64,7 +64,7 @@
enum TransferMethod : byte {
SUBSAMPLE,
- EVERYTHING_WITH_HISTORY,
+ LOSSLESS,
}
table ChannelRequest {
diff --git a/aos/network/web_proxy.h b/aos/network/web_proxy.h
index 0815ebf..baca26e 100644
--- a/aos/network/web_proxy.h
+++ b/aos/network/web_proxy.h
@@ -24,13 +24,19 @@
class Subscriber;
class ApplicationConnection;
+enum class StoreHistory {
+ kNo,
+ kYes,
+};
+
// Basic class that handles receiving new websocket connections. Creates a new
// Connection to manage the rest of the negotiation and data passing. When the
// websocket closes, it deletes the Connection.
class WebsocketHandler : public ::seasocks::WebSocket::Handler {
public:
WebsocketHandler(::seasocks::Server *server, aos::EventLoop *event_loop,
- int buffer_size);
+ StoreHistory store_history,
+ int per_channel_buffer_size_bytes);
void onConnect(::seasocks::WebSocket *sock) override;
void onData(::seasocks::WebSocket *sock, const uint8_t *data,
size_t size) override;
@@ -50,15 +56,28 @@
// Wrapper class that manages the seasocks server and WebsocketHandler.
class WebProxy {
public:
- WebProxy(aos::EventLoop *event_loop, int buffer_size);
- WebProxy(aos::ShmEventLoop *event_loop, int buffer_size);
+ // Constructs a WebProxy object for interacting with a webpage. store_history
+ // and per_channel_buffer_size_bytes specify how we manage delivering LOSSLESS
+ // messages to the client:
+ // * store_history specifies whether we should always buffer up data for all
+ // channels--even for messages that are played prior to the client
+ // connecting. This is mostly useful for log replay where the client
+ // will typically connect after the logfile has been fully loaded/replayed.
+ // * per_channel_buffer_size_bytes is the maximum amount of data to buffer
+ // up per channel (-1 will indicate infinite data, which is used during log
+ // replay). This is divided by the max_size per channel to determine
+ // how many messages to queue up.
+ WebProxy(aos::EventLoop *event_loop, StoreHistory store_history,
+ int per_channel_buffer_size_bytes);
+ WebProxy(aos::ShmEventLoop *event_loop, StoreHistory store_history,
+ int per_channel_buffer_size_bytes);
~WebProxy();
void SetDataPath(const char *path) { server_.setStaticPath(path); }
private:
WebProxy(aos::EventLoop *event_loop, aos::internal::EPoll *epoll,
- int buffer_size);
+ StoreHistory store_history, int per_channel_buffer_size_bytes);
aos::internal::EPoll internal_epoll_;
aos::internal::EPoll *const epoll_;
@@ -96,9 +115,10 @@
class Subscriber {
public:
Subscriber(std::unique_ptr<RawFetcher> fetcher, int channel_index,
- int buffer_size)
+ StoreHistory store_history, int buffer_size)
: fetcher_(std::move(fetcher)),
channel_index_(channel_index),
+ store_history_(store_history == StoreHistory::kYes),
buffer_size_(buffer_size) {}
void RunIteration();
@@ -133,6 +153,10 @@
std::unique_ptr<RawFetcher> fetcher_;
int channel_index_;
+ // If set, will always build up a buffer of the most recent buffer_size_
+ // messages. If store_history_ is *not* set we will only buffer up messages
+ // while there is an active listener.
+ bool store_history_;
int buffer_size_;
std::deque<Message> message_buffer_;
// The ScopedDataChannel that we use for actually sending data over WebRTC
diff --git a/aos/network/web_proxy_main.cc b/aos/network/web_proxy_main.cc
index 06fe942..f3ad926 100644
--- a/aos/network/web_proxy_main.cc
+++ b/aos/network/web_proxy_main.cc
@@ -6,7 +6,9 @@
DEFINE_string(config, "./config.json", "File path of aos configuration");
DEFINE_string(data_dir, "www", "Directory to serve data files from");
-DEFINE_int32(buffer_size, 0, "-1 if infinite, in # of messages / channel.");
+DEFINE_int32(buffer_size, 1000000,
+ "-1 if infinite, in bytes / channel. If there are no active "
+ "connections, will not store anything.");
int main(int argc, char **argv) {
aos::InitGoogle(&argc, &argv);
@@ -16,7 +18,8 @@
aos::ShmEventLoop event_loop(&config.message());
- aos::web_proxy::WebProxy web_proxy(&event_loop, FLAGS_buffer_size);
+ aos::web_proxy::WebProxy web_proxy(
+ &event_loop, aos::web_proxy::StoreHistory::kNo, FLAGS_buffer_size);
web_proxy.SetDataPath(FLAGS_data_dir.c_str());
event_loop.Run();
diff --git a/aos/network/www/proxy.ts b/aos/network/www/proxy.ts
index 5461899..5415400 100644
--- a/aos/network/www/proxy.ts
+++ b/aos/network/www/proxy.ts
@@ -117,7 +117,7 @@
name: string, type: string,
handler: (data: Uint8Array, sentTime: number) => void): void {
this.addHandlerImpl(
- name, type, TransferMethod.EVERYTHING_WITH_HISTORY, handler);
+ name, type, TransferMethod.LOSSLESS, handler);
}
/**
@@ -137,7 +137,7 @@
if (!this.handlerFuncs.has(channel.key())) {
this.handlerFuncs.set(channel.key(), []);
} else {
- if (method == TransferMethod.EVERYTHING_WITH_HISTORY) {
+ if (method == TransferMethod.LOSSLESS) {
console.warn(
'Behavior of multiple reliable handlers is currently poorly ' +
'defined and may not actually deliver all of the messages.');
diff --git a/aos/util/BUILD b/aos/util/BUILD
index 0314069..8df16e2 100644
--- a/aos/util/BUILD
+++ b/aos/util/BUILD
@@ -257,6 +257,15 @@
],
)
+cc_library(
+ name = "crc32",
+ srcs = ["crc32.cc"],
+ hdrs = ["crc32.h"],
+ deps = [
+ "@com_google_absl//absl/types:span",
+ ],
+)
+
py_library(
name = "python_init",
srcs = ["__init__.py"],
diff --git a/aos/events/logging/crc32.cc b/aos/util/crc32.cc
similarity index 95%
rename from aos/events/logging/crc32.cc
rename to aos/util/crc32.cc
index cd9d9fb..7f13f30 100644
--- a/aos/events/logging/crc32.cc
+++ b/aos/util/crc32.cc
@@ -1,4 +1,4 @@
-#include "aos/events/logging/crc32.h"
+#include "aos/util/crc32.h"
namespace aos {
@@ -53,9 +53,8 @@
return AccumulateCrc32(data, std::nullopt);
}
-uint32_t AccumulateCrc32(
- const absl::Span<uint8_t> data,
- std::optional<uint32_t> current_checksum) {
+uint32_t AccumulateCrc32(const absl::Span<uint8_t> data,
+ std::optional<uint32_t> current_checksum) {
uint32_t crc =
current_checksum.has_value() ? current_checksum.value() : 0xFF'FF'FF'FF;
for (const uint8_t n : data) {
diff --git a/aos/events/logging/crc32.h b/aos/util/crc32.h
similarity index 100%
rename from aos/events/logging/crc32.h
rename to aos/util/crc32.h
diff --git a/frc971/analysis/in_process_plotter.cc b/frc971/analysis/in_process_plotter.cc
index 0eaf719..cd52b1c 100644
--- a/frc971/analysis/in_process_plotter.cc
+++ b/frc971/analysis/in_process_plotter.cc
@@ -15,7 +15,7 @@
event_loop_factory_(&config_.message()),
event_loop_(event_loop_factory_.MakeEventLoop("plotter")),
plot_sender_(event_loop_->MakeSender<Plot>("/analysis")),
- web_proxy_(event_loop_.get(), -1),
+ web_proxy_(event_loop_.get(), aos::web_proxy::StoreHistory::kYes, -1),
builder_(plot_sender_.MakeBuilder()) {
web_proxy_.SetDataPath(kDataPath);
event_loop_->SkipTimingReport();
diff --git a/frc971/analysis/live_web_plotter_demo.sh b/frc971/analysis/live_web_plotter_demo.sh
index 4c4c9c4..45f3b92 100755
--- a/frc971/analysis/live_web_plotter_demo.sh
+++ b/frc971/analysis/live_web_plotter_demo.sh
@@ -1 +1 @@
-./aos/network/web_proxy_main --config=aos/network/www/test_config.json --data_dir=frc971/analysis
+./aos/network/web_proxy_main --config=aos/network/www/test_config.json --data_dir=frc971/analysis "$@"
diff --git a/frc971/control_loops/python/angular_system.py b/frc971/control_loops/python/angular_system.py
index cad3221..df6c0f6 100755
--- a/frc971/control_loops/python/angular_system.py
+++ b/frc971/control_loops/python/angular_system.py
@@ -20,6 +20,7 @@
kalman_q_vel,
kalman_q_voltage,
kalman_r_position,
+ radius = None,
dt=0.00505):
"""Constructs an AngularSystemParams object.
@@ -38,6 +39,7 @@
self.kalman_q_vel = kalman_q_vel
self.kalman_q_voltage = kalman_q_voltage
self.kalman_r_position = kalman_r_position
+ self.radius = radius
self.dt = dt
@@ -80,11 +82,17 @@
glog.debug('Controllability of %d',
numpy.linalg.matrix_rank(controllability))
glog.debug('J: %f', self.J)
- glog.debug('Stall torque: %f', self.motor.stall_torque / self.G)
- glog.debug('Stall acceleration: %f',
+ glog.debug('Stall torque: %f (N m)', self.motor.stall_torque / self.G)
+ if self.params.radius is not None:
+ glog.debug('Stall force: %f (N)',
+ self.motor.stall_torque / self.G / self.params.radius)
+ glog.debug('Stall force: %f (lbf)',
+ self.motor.stall_torque / self.G / self.params.radius * 0.224809)
+
+ glog.debug('Stall acceleration: %f (rad/s^2)',
self.motor.stall_torque / self.G / self.J)
- glog.debug('Free speed is %f',
+ glog.debug('Free speed is %f (rad/s)',
-self.B_continuous[1, 0] / self.A_continuous[1, 1] * 12.0)
self.Q = numpy.matrix([[(1.0 / (self.params.q_pos**2.0)), 0.0],
diff --git a/frc971/wpilib/imu.fbs b/frc971/wpilib/imu.fbs
index abd01e2..33bad72 100644
--- a/frc971/wpilib/imu.fbs
+++ b/frc971/wpilib/imu.fbs
@@ -45,6 +45,11 @@
// Operation section for more details on conditions that may cause this bit to
// be set to 1.
data_path_overrun:bool (id: 6);
+
+ // True indicates that the Raspberry Pi Pico recieved a packet
+ // from the imu that had a bad checksum but still sent a message
+ // containing a timestamp and encoder values.
+ checksum_mismatch:bool (id:7);
}
// Values returned from an IMU.
@@ -87,6 +92,24 @@
// converted from fpga_timestamp.
monotonic_timestamp_ns:long (id: 12);
+ // The timestamp when the values were captured by the Raspberry Pi Pico.
+ // This has microsecond precision.
+ pico_timestamp_us:int (id:20);
+
+ // The number of this reading produced from a 16-bit counter.
+ data_counter:int (id:19);
+
+ // The number of messages recieved by the Raspberry Pi that had bad checksums
+ failed_checksums:int (id:21);
+ // True if this packet has a bad checksum
+ // from the Raspberry Pi Pico to the Raspberry Pi.
+ checksum_failed:bool (id:22);
+
+ // The position of the left drivetrain encoder in encoder ticks
+ left_encoder:int (id: 17);
+ // The position of the right drivetrain encoder in encoder ticks
+ right_encoder:int (id: 18);
+
// For an ADIS16470, the DIAG_STAT value immediately after reset.
start_diag_stat:ADIS16470DiagStat (id: 13);
// For an ADIS16470, the DIAG_STAT value after the initial sensor self test we
diff --git a/y2020/vision/BUILD b/y2020/vision/BUILD
index 5156b59..75ef7aa 100644
--- a/y2020/vision/BUILD
+++ b/y2020/vision/BUILD
@@ -106,7 +106,10 @@
"//y2020:config",
],
target_compatible_with = ["@platforms//os:linux"],
- visibility = ["//y2020:__subpackages__"],
+ visibility = [
+ "//y2020:__subpackages__",
+ "//y2022:__subpackages__",
+ ],
deps = [
":charuco_lib",
"//aos:init",
diff --git a/y2020/vision/calibration_accumulator.cc b/y2020/vision/calibration_accumulator.cc
index e77c74b..9f550c5 100644
--- a/y2020/vision/calibration_accumulator.cc
+++ b/y2020/vision/calibration_accumulator.cc
@@ -22,10 +22,15 @@
using aos::monotonic_clock;
namespace chrono = std::chrono;
+constexpr double kG = 9.807;
+
void CalibrationData::AddCameraPose(
distributed_clock::time_point distributed_now, Eigen::Vector3d rvec,
Eigen::Vector3d tvec) {
- rot_trans_points_.emplace_back(distributed_now, std::make_pair(rvec, tvec));
+ // Always start with IMU reading...
+ if (!imu_points_.empty() && imu_points_[0].first < distributed_now) {
+ rot_trans_points_.emplace_back(distributed_now, std::make_pair(rvec, tvec));
+ }
}
void CalibrationData::AddImu(distributed_clock::time_point distributed_now,
@@ -167,7 +172,7 @@
data_->AddImu(imu_factory_->ToDistributedClock(monotonic_clock::time_point(
chrono::nanoseconds(imu->monotonic_timestamp_ns()))),
- gyro, accel);
+ gyro, accel * kG);
}
} // namespace vision
diff --git a/y2020/vision/calibration_accumulator.h b/y2020/vision/calibration_accumulator.h
index 0f1bff7..7bff9f0 100644
--- a/y2020/vision/calibration_accumulator.h
+++ b/y2020/vision/calibration_accumulator.h
@@ -13,11 +13,17 @@
namespace frc971 {
namespace vision {
+// This class provides an interface for an application to be notified of all
+// camera and IMU samples in order with the correct timestamps.
class CalibrationDataObserver {
public:
+ // Observes a camera sample at the corresponding time t, and with the
+ // corresponding rotation and translation vectors rt.
virtual void UpdateCamera(aos::distributed_clock::time_point t,
std::pair<Eigen::Vector3d, Eigen::Vector3d> rt) = 0;
+ // Observes an IMU sample at the corresponding time t, and with the
+ // corresponding angular velocity and linear acceleration vectors wa.
virtual void UpdateIMU(aos::distributed_clock::time_point t,
std::pair<Eigen::Vector3d, Eigen::Vector3d> wa) = 0;
};
diff --git a/y2020/vision/extrinsics_calibration.cc b/y2020/vision/extrinsics_calibration.cc
index c7ef752..3216c23 100644
--- a/y2020/vision/extrinsics_calibration.cc
+++ b/y2020/vision/extrinsics_calibration.cc
@@ -29,6 +29,8 @@
using aos::distributed_clock;
using aos::monotonic_clock;
+constexpr double kGravity = 9.8;
+
// The basic ideas here are taken from Kalibr.
// (https://github.com/ethz-asl/kalibr), but adapted to work with AOS, and to be
// simpler.
@@ -61,48 +63,95 @@
template <typename Scalar>
class CeresPoseFilter : public CalibrationDataObserver {
public:
+ typedef Eigen::Transform<Scalar, 3, Eigen::Affine> Affine3s;
+
CeresPoseFilter(Eigen::Quaternion<Scalar> initial_orientation,
Eigen::Quaternion<Scalar> imu_to_camera,
- Eigen::Matrix<Scalar, 3, 1> imu_bias)
+ Eigen::Matrix<Scalar, 3, 1> gyro_bias,
+ Eigen::Matrix<Scalar, 6, 1> initial_state,
+ Eigen::Quaternion<Scalar> board_to_world,
+ Eigen::Matrix<Scalar, 3, 1> imu_to_camera_translation,
+ Scalar gravity_scalar,
+ Eigen::Matrix<Scalar, 3, 1> accelerometer_bias)
: accel_(Eigen::Matrix<double, 3, 1>::Zero()),
omega_(Eigen::Matrix<double, 3, 1>::Zero()),
- imu_bias_(imu_bias),
+ imu_bias_(gyro_bias),
orientation_(initial_orientation),
- x_hat_(Eigen::Matrix<Scalar, 6, 1>::Zero()),
+ x_hat_(initial_state),
p_(Eigen::Matrix<Scalar, 6, 6>::Zero()),
- imu_to_camera_(imu_to_camera) {}
+ imu_to_camera_rotation_(imu_to_camera),
+ imu_to_camera_translation_(imu_to_camera_translation),
+ board_to_world_(board_to_world),
+ gravity_scalar_(gravity_scalar),
+ accelerometer_bias_(accelerometer_bias) {}
- virtual void ObserveCameraUpdate(distributed_clock::time_point /*t*/,
- Eigen::Vector3d /*board_to_camera_rotation*/,
- Eigen::Quaternion<Scalar> /*imu_to_world*/) {
- }
+ Scalar gravity_scalar() { return gravity_scalar_; }
+ virtual void ObserveCameraUpdate(
+ distributed_clock::time_point /*t*/,
+ Eigen::Vector3d /*board_to_camera_rotation*/,
+ Eigen::Quaternion<Scalar> /*imu_to_world_rotation*/,
+ Affine3s /*imu_to_world*/) {}
+
+ // Observes a camera measurement by applying a kalman filter correction and
+ // accumulating up the error associated with the step.
void UpdateCamera(distributed_clock::time_point t,
std::pair<Eigen::Vector3d, Eigen::Vector3d> rt) override {
Integrate(t);
- Eigen::Quaternion<Scalar> board_to_camera(
+ const Eigen::Quaternion<Scalar> board_to_camera_rotation(
frc971::controls::ToQuaternionFromRotationVector(rt.first)
.cast<Scalar>());
+ const Affine3s board_to_camera =
+ Eigen::Translation3d(rt.second).cast<Scalar>() *
+ board_to_camera_rotation;
+
+ const Affine3s imu_to_camera =
+ imu_to_camera_translation_ * imu_to_camera_rotation_;
// This converts us from (facing the board),
// x right, y up, z towards us -> x right, y away, z up.
// Confirmed to be right.
- Eigen::Quaternion<Scalar> board_to_world(
- Eigen::AngleAxisd(0.5 * M_PI, Eigen::Vector3d::UnitX()).cast<Scalar>());
// Want world -> imu rotation.
// world <- board <- camera <- imu.
- const Eigen::Quaternion<Scalar> imu_to_world =
- board_to_world * board_to_camera.inverse() * imu_to_camera_;
+ const Eigen::Quaternion<Scalar> imu_to_world_rotation =
+ board_to_world_ * board_to_camera_rotation.inverse() *
+ imu_to_camera_rotation_;
- const Eigen::Quaternion<Scalar> error(imu_to_world.inverse() *
+ const Affine3s imu_to_world =
+ board_to_world_ * board_to_camera.inverse() * imu_to_camera;
+
+ const Eigen::Matrix<Scalar, 3, 1> z =
+ imu_to_world * Eigen::Matrix<Scalar, 3, 1>::Zero();
+
+ Eigen::Matrix<Scalar, 3, 6> H = Eigen::Matrix<Scalar, 3, 6>::Zero();
+ H(0, 0) = static_cast<Scalar>(1.0);
+ H(1, 1) = static_cast<Scalar>(1.0);
+ H(2, 2) = static_cast<Scalar>(1.0);
+ const Eigen::Matrix<Scalar, 3, 1> y = z - H * x_hat_;
+
+ const Eigen::Matrix<double, 3, 3> R =
+ (::Eigen::DiagonalMatrix<double, 3>().diagonal() << ::std::pow(0.01, 2),
+ ::std::pow(0.01, 2), ::std::pow(0.01, 2))
+ .finished()
+ .asDiagonal();
+
+ const Eigen::Matrix<Scalar, 3, 3> S =
+ H * p_ * H.transpose() + R.cast<Scalar>();
+ const Eigen::Matrix<Scalar, 6, 3> K = p_ * H.transpose() * S.inverse();
+
+ x_hat_ += K * y;
+ p_ = (Eigen::Matrix<Scalar, 6, 6>::Identity() - K * H) * p_;
+
+ const Eigen::Quaternion<Scalar> error(imu_to_world_rotation.inverse() *
orientation());
errors_.emplace_back(
Eigen::Matrix<Scalar, 3, 1>(error.x(), error.y(), error.z()));
+ position_errors_.emplace_back(y);
- ObserveCameraUpdate(t, rt.first, imu_to_world);
+ ObserveCameraUpdate(t, rt.first, imu_to_world_rotation, imu_to_world);
}
virtual void ObserveIMUUpdate(
@@ -120,14 +169,16 @@
const Eigen::Quaternion<Scalar> &orientation() const { return orientation_; }
- std::vector<Eigen::Matrix<Scalar, 3, 1>> errors_;
-
- // Returns the angular errors for each camera sample.
size_t num_errors() const { return errors_.size(); }
Scalar errorx(size_t i) const { return errors_[i].x(); }
Scalar errory(size_t i) const { return errors_[i].y(); }
Scalar errorz(size_t i) const { return errors_[i].z(); }
+ size_t num_perrors() const { return position_errors_.size(); }
+ Scalar errorpx(size_t i) const { return position_errors_[i].x(); }
+ Scalar errorpy(size_t i) const { return position_errors_[i].y(); }
+ Scalar errorpz(size_t i) const { return position_errors_[i].z(); }
+
private:
Eigen::Matrix<Scalar, 46, 1> Pack(Eigen::Quaternion<Scalar> q,
Eigen::Matrix<Scalar, 6, 1> x_hat,
@@ -151,7 +202,7 @@
return std::make_tuple(q, x_hat, p);
}
- Eigen::Matrix<Scalar, 46, 1> Derivitive(
+ Eigen::Matrix<Scalar, 46, 1> Derivative(
const Eigen::Matrix<Scalar, 46, 1> &input) {
auto [q, x_hat, p] = UnPack(input);
@@ -160,25 +211,48 @@
omega_q.vec() = 0.5 * (omega_.cast<Scalar>() - imu_bias_);
Eigen::Matrix<Scalar, 4, 1> q_dot = (q * omega_q).coeffs();
- Eigen::Matrix<Scalar, 6, 1> x_hat_dot = Eigen::Matrix<Scalar, 6, 1>::Zero();
- x_hat_dot(0, 0) = x_hat(3, 0);
- x_hat_dot(1, 0) = x_hat(4, 0);
- x_hat_dot(2, 0) = x_hat(5, 0);
- x_hat_dot.template block<3, 1>(3, 0) = accel_.cast<Scalar>();
+ Eigen::Matrix<double, 6, 6> A = Eigen::Matrix<double, 6, 6>::Zero();
+ A(0, 3) = 1.0;
+ A(1, 4) = 1.0;
+ A(2, 5) = 1.0;
- Eigen::Matrix<Scalar, 6, 6> p_dot = Eigen::Matrix<Scalar, 6, 6>::Zero();
+ Eigen::Matrix<Scalar, 6, 1> x_hat_dot = A * x_hat;
+ x_hat_dot.template block<3, 1>(3, 0) =
+ orientation() * (accel_.cast<Scalar>() - accelerometer_bias_) -
+ Eigen::Vector3d(0, 0, kGravity).cast<Scalar>() * gravity_scalar_;
+
+ // Initialize the position noise to 0. If the solver is going to back-solve
+ // for the most likely starting position, let's just say that the noise is
+ // small.
+ constexpr double kPositionNoise = 0.0;
+ constexpr double kAccelerometerNoise = 2.3e-6 * 9.8;
+ constexpr double kIMUdt = 5.0e-4;
+ Eigen::Matrix<double, 6, 6> Q_dot(
+ (::Eigen::DiagonalMatrix<double, 6>().diagonal()
+ << ::std::pow(kPositionNoise, 2) / kIMUdt,
+ ::std::pow(kPositionNoise, 2) / kIMUdt,
+ ::std::pow(kPositionNoise, 2) / kIMUdt,
+ ::std::pow(kAccelerometerNoise, 2) / kIMUdt,
+ ::std::pow(kAccelerometerNoise, 2) / kIMUdt,
+ ::std::pow(kAccelerometerNoise, 2) / kIMUdt)
+ .finished()
+ .asDiagonal());
+ Eigen::Matrix<Scalar, 6, 6> p_dot = A.cast<Scalar>() * p +
+ p * A.transpose().cast<Scalar>() +
+ Q_dot.cast<Scalar>();
return Pack(Eigen::Quaternion<Scalar>(q_dot), x_hat_dot, p_dot);
}
virtual void ObserveIntegrated(distributed_clock::time_point /*t*/,
Eigen::Matrix<Scalar, 6, 1> /*x_hat*/,
- Eigen::Quaternion<Scalar> /*orientation*/) {}
+ Eigen::Quaternion<Scalar> /*orientation*/,
+ Eigen::Matrix<Scalar, 6, 6> /*p*/) {}
void Integrate(distributed_clock::time_point t) {
if (last_time_ != distributed_clock::min_time) {
Eigen::Matrix<Scalar, 46, 1> next = control_loops::RungeKutta(
- [this](auto r) { return Derivitive(r); },
+ [this](auto r) { return Derivative(r); },
Pack(orientation_, x_hat_, p_),
aos::time::DurationInSeconds(t - last_time_));
@@ -189,34 +263,42 @@
}
last_time_ = t;
- ObserveIntegrated(t, x_hat_, orientation_);
+ ObserveIntegrated(t, x_hat_, orientation_, p_);
}
Eigen::Matrix<double, 3, 1> accel_;
Eigen::Matrix<double, 3, 1> omega_;
Eigen::Matrix<Scalar, 3, 1> imu_bias_;
+ // IMU -> world quaternion
Eigen::Quaternion<Scalar> orientation_;
Eigen::Matrix<Scalar, 6, 1> x_hat_;
Eigen::Matrix<Scalar, 6, 6> p_;
distributed_clock::time_point last_time_ = distributed_clock::min_time;
- Eigen::Quaternion<Scalar> imu_to_camera_;
+ Eigen::Quaternion<Scalar> imu_to_camera_rotation_;
+ Eigen::Translation<Scalar, 3> imu_to_camera_translation_ =
+ Eigen::Translation3d(0, 0, 0).cast<Scalar>();
- // States outside the KF:
- // orientation quaternion
- //
+ Eigen::Quaternion<Scalar> board_to_world_;
+ Scalar gravity_scalar_;
+ Eigen::Matrix<Scalar, 3, 1> accelerometer_bias_;
// States:
// xyz position
// xyz velocity
//
// Inputs
// xyz accel
- // angular rates
//
// Measurement:
- // xyz position
- // orientation rotation vector
+ // xyz position from camera.
+ //
+ // Since the gyro is so good, we can just solve for the bias and initial
+ // position with the solver and see what it learns.
+
+ // Returns the angular errors for each camera sample.
+ std::vector<Eigen::Matrix<Scalar, 3, 1>> errors_;
+ std::vector<Eigen::Matrix<Scalar, 3, 1>> position_errors_;
};
// Subclass of the filter above which has plotting. This keeps debug code and
@@ -225,25 +307,48 @@
public:
PoseFilter(Eigen::Quaternion<double> initial_orientation,
Eigen::Quaternion<double> imu_to_camera,
- Eigen::Matrix<double, 3, 1> imu_bias)
- : CeresPoseFilter<double>(initial_orientation, imu_to_camera, imu_bias) {}
+ Eigen::Matrix<double, 3, 1> gyro_bias,
+ Eigen::Matrix<double, 6, 1> initial_state,
+ Eigen::Quaternion<double> board_to_world,
+ Eigen::Matrix<double, 3, 1> imu_to_camera_translation,
+ double gravity_scalar,
+ Eigen::Matrix<double, 3, 1> accelerometer_bias)
+ : CeresPoseFilter<double>(initial_orientation, imu_to_camera, gyro_bias,
+ initial_state, board_to_world,
+ imu_to_camera_translation, gravity_scalar,
+ accelerometer_bias) {}
void Plot() {
+ std::vector<double> rx;
+ std::vector<double> ry;
+ std::vector<double> rz;
std::vector<double> x;
std::vector<double> y;
std::vector<double> z;
+ std::vector<double> vx;
+ std::vector<double> vy;
+ std::vector<double> vz;
for (const Eigen::Quaternion<double> &q : orientations_) {
Eigen::Matrix<double, 3, 1> rotation_vector =
frc971::controls::ToRotationVectorFromQuaternion(q);
- x.emplace_back(rotation_vector(0, 0));
- y.emplace_back(rotation_vector(1, 0));
- z.emplace_back(rotation_vector(2, 0));
+ rx.emplace_back(rotation_vector(0, 0));
+ ry.emplace_back(rotation_vector(1, 0));
+ rz.emplace_back(rotation_vector(2, 0));
}
+ for (const Eigen::Matrix<double, 6, 1> &x_hat : x_hats_) {
+ x.emplace_back(x_hat(0));
+ y.emplace_back(x_hat(1));
+ z.emplace_back(x_hat(2));
+ vx.emplace_back(x_hat(3));
+ vy.emplace_back(x_hat(4));
+ vz.emplace_back(x_hat(5));
+ }
+
frc971::analysis::Plotter plotter;
plotter.AddFigure("position");
- plotter.AddLine(times_, x, "x_hat(0)");
- plotter.AddLine(times_, y, "x_hat(1)");
- plotter.AddLine(times_, z, "x_hat(2)");
+ plotter.AddLine(times_, rx, "x_hat(0)");
+ plotter.AddLine(times_, ry, "x_hat(1)");
+ plotter.AddLine(times_, rz, "x_hat(2)");
plotter.AddLine(ct, cx, "Camera x");
plotter.AddLine(ct, cy, "Camera y");
plotter.AddLine(ct, cz, "Camera z");
@@ -253,9 +358,9 @@
plotter.Publish();
plotter.AddFigure("error");
- plotter.AddLine(times_, x, "x_hat(0)");
- plotter.AddLine(times_, y, "x_hat(1)");
- plotter.AddLine(times_, z, "x_hat(2)");
+ plotter.AddLine(times_, rx, "x_hat(0)");
+ plotter.AddLine(times_, ry, "x_hat(1)");
+ plotter.AddLine(times_, rz, "x_hat(2)");
plotter.AddLine(ct, cerrx, "Camera error x");
plotter.AddLine(ct, cerry, "Camera error y");
plotter.AddLine(ct, cerrz, "Camera error z");
@@ -268,6 +373,9 @@
plotter.AddLine(imut, imu_x, "imu x");
plotter.AddLine(imut, imu_y, "imu y");
plotter.AddLine(imut, imu_z, "imu z");
+ plotter.AddLine(times_, rx, "rotation x");
+ plotter.AddLine(times_, ry, "rotation y");
+ plotter.AddLine(times_, rz, "rotation z");
plotter.Publish();
plotter.AddFigure("raw");
@@ -282,12 +390,27 @@
plotter.AddLine(ct, raw_cz, "Camera z");
plotter.Publish();
+ plotter.AddFigure("xyz vel");
+ plotter.AddLine(times_, x, "x");
+ plotter.AddLine(times_, y, "y");
+ plotter.AddLine(times_, z, "z");
+ plotter.AddLine(times_, vx, "vx");
+ plotter.AddLine(times_, vy, "vy");
+ plotter.AddLine(times_, vz, "vz");
+ plotter.AddLine(ct, camera_position_x, "Camera x");
+ plotter.AddLine(ct, camera_position_y, "Camera y");
+ plotter.AddLine(ct, camera_position_z, "Camera z");
+ plotter.Publish();
+
plotter.Spin();
}
void ObserveIntegrated(distributed_clock::time_point t,
Eigen::Matrix<double, 6, 1> x_hat,
- Eigen::Quaternion<double> orientation) override {
+ Eigen::Quaternion<double> orientation,
+ Eigen::Matrix<double, 6, 6> p) override {
+ VLOG(1) << t << " -> " << p;
+ VLOG(1) << t << " xhat -> " << x_hat.transpose();
times_.emplace_back(chrono::duration<double>(t.time_since_epoch()).count());
x_hats_.emplace_back(x_hat);
orientations_.emplace_back(orientation);
@@ -309,18 +432,19 @@
void ObserveCameraUpdate(distributed_clock::time_point t,
Eigen::Vector3d board_to_camera_rotation,
- Eigen::Quaternion<double> imu_to_world) override {
+ Eigen::Quaternion<double> imu_to_world_rotation,
+ Eigen::Affine3d imu_to_world) override {
raw_cx.emplace_back(board_to_camera_rotation(0, 0));
raw_cy.emplace_back(board_to_camera_rotation(1, 0));
raw_cz.emplace_back(board_to_camera_rotation(2, 0));
Eigen::Matrix<double, 3, 1> rotation_vector =
- frc971::controls::ToRotationVectorFromQuaternion(imu_to_world);
+ frc971::controls::ToRotationVectorFromQuaternion(imu_to_world_rotation);
ct.emplace_back(chrono::duration<double>(t.time_since_epoch()).count());
Eigen::Matrix<double, 3, 1> cerr =
frc971::controls::ToRotationVectorFromQuaternion(
- imu_to_world.inverse() * orientation());
+ imu_to_world_rotation.inverse() * orientation());
cx.emplace_back(rotation_vector(0, 0));
cy.emplace_back(rotation_vector(1, 0));
@@ -330,11 +454,20 @@
cerry.emplace_back(cerr(1, 0));
cerrz.emplace_back(cerr(2, 0));
- const Eigen::Vector3d world_gravity = imu_to_world * last_accel_;
+ const Eigen::Vector3d world_gravity =
+ imu_to_world_rotation * last_accel_ -
+ Eigen::Vector3d(0, 0, kGravity) * gravity_scalar();
+
+ const Eigen::Vector3d camera_position =
+ imu_to_world * Eigen::Vector3d::Zero();
world_gravity_x.emplace_back(world_gravity.x());
world_gravity_y.emplace_back(world_gravity.y());
world_gravity_z.emplace_back(world_gravity.z());
+
+ camera_position_x.emplace_back(camera_position.x());
+ camera_position_y.emplace_back(camera_position.y());
+ camera_position_z.emplace_back(camera_position.z());
}
std::vector<double> ct;
@@ -354,6 +487,9 @@
std::vector<double> imu_x;
std::vector<double> imu_y;
std::vector<double> imu_z;
+ std::vector<double> camera_position_x;
+ std::vector<double> camera_position_y;
+ std::vector<double> camera_position_z;
std::vector<double> imut;
std::vector<double> imu_ratex;
@@ -375,13 +511,29 @@
template <typename S>
bool operator()(const S *const q1, const S *const q2, const S *const v,
- S *residual) const {
+ const S *const p, const S *const btw, const S *const itc,
+ const S *const gravity_scalar_ptr,
+ const S *const accelerometer_bias_ptr, S *residual) const {
Eigen::Quaternion<S> initial_orientation(q1[3], q1[0], q1[1], q1[2]);
Eigen::Quaternion<S> mounting_orientation(q2[3], q2[0], q2[1], q2[2]);
- Eigen::Matrix<S, 3, 1> imu_bias(v[0], v[1], v[2]);
+ Eigen::Quaternion<S> board_to_world(btw[3], btw[0], btw[1], btw[2]);
+ Eigen::Matrix<S, 3, 1> gyro_bias(v[0], v[1], v[2]);
+ Eigen::Matrix<S, 6, 1> initial_state;
+ initial_state(0) = p[0];
+ initial_state(1) = p[1];
+ initial_state(2) = p[2];
+ initial_state(3) = p[3];
+ initial_state(4) = p[4];
+ initial_state(5) = p[5];
+ Eigen::Matrix<S, 3, 1> imu_to_camera_translation(itc[0], itc[1], itc[2]);
+ Eigen::Matrix<S, 3, 1> accelerometer_bias(accelerometer_bias_ptr[0],
+ accelerometer_bias_ptr[1],
+ accelerometer_bias_ptr[2]);
CeresPoseFilter<S> filter(initial_orientation, mounting_orientation,
- imu_bias);
+ gyro_bias, initial_state, board_to_world,
+ imu_to_camera_translation, *gravity_scalar_ptr,
+ accelerometer_bias);
data->ReviewData(&filter);
for (size_t i = 0; i < filter.num_errors(); ++i) {
@@ -390,6 +542,12 @@
residual[3 * i + 2] = filter.errorz(i);
}
+ for (size_t i = 0; i < filter.num_perrors(); ++i) {
+ residual[3 * filter.num_errors() + 3 * i + 0] = filter.errorpx(i);
+ residual[3 * filter.num_errors() + 3 * i + 1] = filter.errorpy(i);
+ residual[3 * filter.num_errors() + 3 * i + 2] = filter.errorpz(i);
+ }
+
return true;
}
};
@@ -437,17 +595,29 @@
LOG(INFO) << "Done with event_loop running";
// And now we have it, we can start processing it.
- Eigen::Quaternion<double> nominal_initial_orientation(
+ const Eigen::Quaternion<double> nominal_initial_orientation(
frc971::controls::ToQuaternionFromRotationVector(
Eigen::Vector3d(0.0, 0.0, M_PI)));
- Eigen::Quaternion<double> nominal_imu_to_camera(
+ const Eigen::Quaternion<double> nominal_imu_to_camera(
Eigen::AngleAxisd(-0.5 * M_PI, Eigen::Vector3d::UnitX()));
+ const Eigen::Quaternion<double> nominal_board_to_world(
+ Eigen::AngleAxisd(0.5 * M_PI, Eigen::Vector3d::UnitX()));
- Eigen::Quaternion<double> initial_orientation =
- Eigen::Quaternion<double>::Identity();
- Eigen::Quaternion<double> imu_to_camera =
- Eigen::Quaternion<double>::Identity();
- Eigen::Vector3d imu_bias = Eigen::Vector3d::Zero();
+ Eigen::Quaternion<double> initial_orientation = nominal_initial_orientation;
+ // Eigen::Quaternion<double>::Identity();
+ Eigen::Quaternion<double> imu_to_camera = nominal_imu_to_camera;
+ // Eigen::Quaternion<double>::Identity();
+ Eigen::Quaternion<double> board_to_world = nominal_board_to_world;
+ // Eigen::Quaternion<double>::Identity();
+ Eigen::Vector3d gyro_bias = Eigen::Vector3d::Zero();
+ Eigen::Matrix<double, 6, 1> initial_state =
+ Eigen::Matrix<double, 6, 1>::Zero();
+ Eigen::Matrix<double, 3, 1> imu_to_camera_translation =
+ Eigen::Matrix<double, 3, 1>::Zero();
+
+ double gravity_scalar = 1.0;
+ Eigen::Matrix<double, 3, 1> accelerometer_bias =
+ Eigen::Matrix<double, 3, 1>::Zero();
{
ceres::Problem problem;
@@ -458,19 +628,28 @@
// auto-differentiation to obtain the derivative (jacobian).
ceres::CostFunction *cost_function =
- new ceres::AutoDiffCostFunction<CostFunctor, ceres::DYNAMIC, 4, 4, 3>(
- new CostFunctor(&data), data.camera_samples_size() * 3);
- problem.AddResidualBlock(cost_function, nullptr,
- initial_orientation.coeffs().data(),
- imu_to_camera.coeffs().data(), imu_bias.data());
+ new ceres::AutoDiffCostFunction<CostFunctor, ceres::DYNAMIC, 4, 4, 3, 6,
+ 4, 3, 1, 3>(
+ new CostFunctor(&data), data.camera_samples_size() * 6);
+ problem.AddResidualBlock(
+ cost_function, nullptr, initial_orientation.coeffs().data(),
+ imu_to_camera.coeffs().data(), gyro_bias.data(), initial_state.data(),
+ board_to_world.coeffs().data(), imu_to_camera_translation.data(),
+ &gravity_scalar, accelerometer_bias.data());
problem.SetParameterization(initial_orientation.coeffs().data(),
quaternion_local_parameterization);
problem.SetParameterization(imu_to_camera.coeffs().data(),
quaternion_local_parameterization);
+ problem.SetParameterization(board_to_world.coeffs().data(),
+ quaternion_local_parameterization);
for (int i = 0; i < 3; ++i) {
- problem.SetParameterLowerBound(imu_bias.data(), i, -0.05);
- problem.SetParameterUpperBound(imu_bias.data(), i, 0.05);
+ problem.SetParameterLowerBound(gyro_bias.data(), i, -0.05);
+ problem.SetParameterUpperBound(gyro_bias.data(), i, 0.05);
+ problem.SetParameterLowerBound(accelerometer_bias.data(), i, -0.05);
+ problem.SetParameterUpperBound(accelerometer_bias.data(), i, 0.05);
}
+ problem.SetParameterLowerBound(&gravity_scalar, 0, 0.95);
+ problem.SetParameterUpperBound(&gravity_scalar, 0, 1.05);
// Run the solver!
ceres::Solver::Options options;
@@ -497,12 +676,28 @@
<< frc971::controls::ToRotationVectorFromQuaternion(
imu_to_camera * nominal_imu_to_camera.inverse())
.transpose();
- LOG(INFO) << "imu_bias " << imu_bias.transpose();
+ LOG(INFO) << "gyro_bias " << gyro_bias.transpose();
+ LOG(INFO) << "board_to_world " << board_to_world.coeffs().transpose();
+ LOG(INFO) << "board_to_world(rotation) "
+ << frc971::controls::ToRotationVectorFromQuaternion(
+ board_to_world)
+ .transpose();
+ LOG(INFO) << "board_to_world delta "
+ << frc971::controls::ToRotationVectorFromQuaternion(
+ board_to_world * nominal_board_to_world.inverse())
+ .transpose();
+ LOG(INFO) << "imu_to_camera_translation "
+ << imu_to_camera_translation.transpose();
+ LOG(INFO) << "gravity " << kGravity * gravity_scalar;
+ LOG(INFO) << "accelerometer bias " << accelerometer_bias.transpose();
}
{
- PoseFilter filter(initial_orientation, imu_to_camera, imu_bias);
+ PoseFilter filter(initial_orientation, imu_to_camera, gyro_bias,
+ initial_state, board_to_world, imu_to_camera_translation,
+ gravity_scalar, accelerometer_bias);
data.ReviewData(&filter);
+ filter.Plot();
}
}
diff --git a/y2022/BUILD b/y2022/BUILD
index 812cac1..78cb86c 100644
--- a/y2022/BUILD
+++ b/y2022/BUILD
@@ -26,7 +26,9 @@
robot_downloader(
name = "pi_download",
binaries = [
+ "//y2020/vision:calibration",
"//y2022/vision:viewer",
+ "//y2022/localizer:imu_main",
],
data = [
":config",
@@ -166,6 +168,7 @@
"//frc971/control_loops:pose",
"//frc971/control_loops:static_zeroing_single_dof_profiled_subsystem",
"//y2022/control_loops/drivetrain:polydrivetrain_plants",
+ "//y2022/control_loops/superstructure/intake:intake_plants",
"@com_github_google_glog//:glog",
"@com_google_absl//absl/base",
],
@@ -232,3 +235,10 @@
"//y2022/control_loops/superstructure:superstructure_status_fbs",
],
)
+
+py_library(
+ name = "python_init",
+ srcs = ["__init__.py"],
+ target_compatible_with = ["@platforms//os:linux"],
+ visibility = ["//visibility:public"],
+)
diff --git a/y2022/__init__.py b/y2022/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/y2022/__init__.py
diff --git a/y2022/constants.cc b/y2022/constants.cc
index 98f6511..73e72f3 100644
--- a/y2022/constants.cc
+++ b/y2022/constants.cc
@@ -11,6 +11,7 @@
#include "aos/mutex/mutex.h"
#include "aos/network/team_number.h"
#include "glog/logging.h"
+#include "y2022/control_loops/superstructure/intake/integral_intake_plant.h"
namespace y2022 {
namespace constants {
@@ -26,6 +27,37 @@
const Values *DoGetValuesForTeam(uint16_t team) {
Values *const r = new Values();
+ // TODO(Yash): Set constants
+ // Intake constants.
+ auto *const intake = &r->intake;
+
+ intake->zeroing_voltage = 3.0;
+ intake->operating_voltage = 12.0;
+ intake->zeroing_profile_params = {0.5, 3.0};
+ intake->default_profile_params = {6.0, 30.0};
+ intake->range = Values::kIntakeRange();
+ intake->make_integral_loop =
+ control_loops::superstructure::intake::MakeIntegralIntakeLoop;
+
+ // The number of samples in the moving average filter.
+ intake->zeroing_constants.average_filter_size = Values::kZeroingSampleSize;
+ // The distance that the absolute encoder needs to complete a full rotation.
+ intake->zeroing_constants.one_revolution_distance =
+ M_PI * 2.0 * constants::Values::kIntakeEncoderRatio();
+
+ // Threshold for deciding if we are moving. moving_buffer_size samples need to
+ // be within this distance of each other before we use the middle one to zero.
+ intake->zeroing_constants.zeroing_threshold = 0.0005;
+ // Buffer size for deciding if we are moving.
+ intake->zeroing_constants.moving_buffer_size = 20;
+
+ // Value between 0 and 1 indicating what fraction of one_revolution_distance
+ // it is acceptable for the offset to move.
+ intake->zeroing_constants.allowable_encoder_error = 0.9;
+
+ // Measured absolute position of the encoder when at zero.
+ intake->zeroing_constants.measured_absolute_position = 0.0;
+
switch (team) {
// A set of constants for tests.
case 1:
diff --git a/y2022/constants.h b/y2022/constants.h
index 050a363..050c5c7 100644
--- a/y2022/constants.h
+++ b/y2022/constants.h
@@ -9,6 +9,7 @@
#include "frc971/control_loops/pose.h"
#include "frc971/control_loops/static_zeroing_single_dof_profiled_subsystem.h"
#include "y2022/control_loops/drivetrain/drivetrain_dog_motor_plant.h"
+#include "y2022/control_loops/superstructure/intake/intake_plant.h"
namespace y2022 {
namespace constants {
@@ -32,6 +33,19 @@
static constexpr double kRollerSupplyCurrentLimit() { return 30.0; }
static constexpr double kRollerStatorCurrentLimit() { return 40.0; }
+ ::frc971::control_loops::StaticZeroingSingleDOFProfiledSubsystemParams<
+ ::frc971::zeroing::PotAndAbsoluteEncoderZeroingEstimator>
+ intake;
+
+ // TODO (Yash): Constants need to be tuned
+ static constexpr ::frc971::constants::Range kIntakeRange() {
+ return ::frc971::constants::Range{
+ .lower_hard = -0.5, // Back Hard
+ .upper_hard = 2.85 + 0.05, // Front Hard
+ .lower = -0.300, // Back Soft
+ .upper = 2.725 // Front Soft
+ };
+ }
// Climber
static constexpr double kClimberSupplyCurrentLimit() { return 60.0; }
diff --git a/y2022/control_loops/BUILD b/y2022/control_loops/BUILD
new file mode 100644
index 0000000..49bc419
--- /dev/null
+++ b/y2022/control_loops/BUILD
@@ -0,0 +1,7 @@
+py_library(
+ name = "python_init",
+ srcs = ["__init__.py"],
+ target_compatible_with = ["@platforms//os:linux"],
+ visibility = ["//visibility:public"],
+ deps = ["//y2022:python_init"],
+)
diff --git a/y2022/control_loops/__init__.py b/y2022/control_loops/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/y2022/control_loops/__init__.py
diff --git a/y2022/control_loops/python/BUILD b/y2022/control_loops/python/BUILD
index e0b0daa..bc2b624 100644
--- a/y2022/control_loops/python/BUILD
+++ b/y2022/control_loops/python/BUILD
@@ -48,10 +48,26 @@
],
)
+py_binary(
+ name = "intake",
+ srcs = [
+ "intake.py",
+ ],
+ legacy_create_init = False,
+ target_compatible_with = ["@platforms//cpu:x86_64"],
+ deps = [
+ ":python_init",
+ "//external:python-gflags",
+ "//external:python-glog",
+ "//frc971/control_loops/python:angular_system",
+ "//frc971/control_loops/python:controls",
+ ],
+)
+
py_library(
name = "python_init",
srcs = ["__init__.py"],
target_compatible_with = ["@platforms//os:linux"],
visibility = ["//visibility:public"],
- deps = ["//y2020/control_loops:python_init"],
+ deps = ["//y2022/control_loops:python_init"],
)
diff --git a/y2022/control_loops/python/intake.py b/y2022/control_loops/python/intake.py
new file mode 100644
index 0000000..e5030e5
--- /dev/null
+++ b/y2022/control_loops/python/intake.py
@@ -0,0 +1,55 @@
+#!/usr/bin/python3
+
+from aos.util.trapezoid_profile import TrapezoidProfile
+from frc971.control_loops.python import control_loop
+from frc971.control_loops.python import angular_system
+from frc971.control_loops.python import controls
+import numpy
+import sys
+from matplotlib import pylab
+import gflags
+import glog
+
+FLAGS = gflags.FLAGS
+
+try:
+ gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
+except gflags.DuplicateFlagError:
+ pass
+
+kIntake = angular_system.AngularSystemParams(
+ name='Intake',
+ motor=control_loop.Falcon(),
+ # TODO(Milo): Change gear ratios when we have all of them
+ G=0.02,
+ J=0.34,
+ q_pos=0.40,
+ q_vel=20.0,
+ kalman_q_pos=0.12,
+ kalman_q_vel=2.0,
+ kalman_q_voltage=4.0,
+ kalman_r_position=0.05,
+ radius=13 * 0.0254)
+
+
+def main(argv):
+ if FLAGS.plot:
+ R = numpy.matrix([[numpy.pi / 2.0], [0.0]])
+ angular_system.PlotKick(kIntake, R)
+ angular_system.PlotMotion(kIntake, R)
+
+ # Write the generated constants out to a file.
+ if len(argv) != 5:
+ glog.fatal(
+ 'Expected .h file name and .cc file name for the intake and integral intake.'
+ )
+ else:
+ namespaces = ['y2022', 'control_loops', 'superstructure', 'intake']
+ angular_system.WriteAngularSystem(kIntake, argv[1:3], argv[3:5],
+ namespaces)
+
+
+if __name__ == '__main__':
+ argv = FLAGS(sys.argv)
+ glog.init()
+ sys.exit(main(argv))
diff --git a/y2022/control_loops/superstructure/intake/BUILD b/y2022/control_loops/superstructure/intake/BUILD
new file mode 100644
index 0000000..f6e5be0
--- /dev/null
+++ b/y2022/control_loops/superstructure/intake/BUILD
@@ -0,0 +1,34 @@
+package(default_visibility = ["//y2022:__subpackages__"])
+
+genrule(
+ name = "genrule_intake",
+ outs = [
+ "intake_plant.h",
+ "intake_plant.cc",
+ "integral_intake_plant.h",
+ "integral_intake_plant.cc",
+ ],
+ cmd = "$(location //y2022/control_loops/python:intake) $(OUTS)",
+ target_compatible_with = ["@platforms//os:linux"],
+ tools = [
+ "//y2022/control_loops/python:intake",
+ ],
+)
+
+cc_library(
+ name = "intake_plants",
+ srcs = [
+ "intake_plant.cc",
+ "integral_intake_plant.cc",
+ ],
+ hdrs = [
+ "intake_plant.h",
+ "integral_intake_plant.h",
+ ],
+ target_compatible_with = ["@platforms//os:linux"],
+ visibility = ["//visibility:public"],
+ deps = [
+ "//frc971/control_loops:hybrid_state_feedback_loop",
+ "//frc971/control_loops:state_feedback_loop",
+ ],
+)
diff --git a/y2022/localizer/BUILD b/y2022/localizer/BUILD
new file mode 100644
index 0000000..c2afa55
--- /dev/null
+++ b/y2022/localizer/BUILD
@@ -0,0 +1,32 @@
+cc_library(
+ name = "imu",
+ srcs = [
+ "imu.cc",
+ ],
+ hdrs = [
+ "imu.h",
+ ],
+ target_compatible_with = ["@platforms//os:linux"],
+ deps = [
+ "//aos/events:epoll",
+ "//aos/events:shm_event_loop",
+ "//aos/util:crc32",
+ "//frc971/wpilib:imu_batch_fbs",
+ "//frc971/wpilib:imu_fbs",
+ "//y2022:constants",
+ "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/types:span",
+ ],
+)
+
+cc_binary(
+ name = "imu_main",
+ srcs = ["imu_main.cc"],
+ target_compatible_with = ["@platforms//os:linux"],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":imu",
+ "//aos:init",
+ "//aos/events:shm_event_loop",
+ ],
+)
diff --git a/y2022/localizer/imu.cc b/y2022/localizer/imu.cc
new file mode 100644
index 0000000..dbdc1a1
--- /dev/null
+++ b/y2022/localizer/imu.cc
@@ -0,0 +1,165 @@
+#include "y2022/localizer/imu.h"
+
+#include "aos/util/crc32.h"
+#include "glog/logging.h"
+
+namespace y2022::localizer {
+
+namespace {
+
+constexpr size_t kReadSize = 50;
+constexpr double kGyroScale = 1 / 655360.0 / 360.0 * (2 * M_PI);
+constexpr double kAccelScale = 1 / 26756268.0 / 9.80665;
+constexpr double kTempScale = 0.1;
+
+} // namespace
+
+Imu::Imu(aos::ShmEventLoop *event_loop)
+ : event_loop_(event_loop),
+ imu_sender_(
+ event_loop_->MakeSender<frc971::IMUValuesBatch>("/drivetrain")) {
+ event_loop->SetRuntimeRealtimePriority(30);
+ imu_fd_ = open("/dev/adis16505", O_RDONLY | O_NONBLOCK);
+ PCHECK(imu_fd_ != -1) << ": Failed to open SPI device for IMU.";
+ aos::internal::EPoll *epoll = event_loop_->epoll();
+ epoll->OnReadable(imu_fd_, [this]() {
+ uint8_t buf[kReadSize];
+ ssize_t read_len = read(imu_fd_, buf, kReadSize);
+ // TODO: Do we care about gracefully handling EAGAIN or anything else?
+ // This should only get called when there is data.
+ PCHECK(read_len != -1);
+ CHECK_EQ(read_len, static_cast<ssize_t>(kReadSize))
+ << ": Read incorrect number of bytes.";
+
+ auto sender = imu_sender_.MakeBuilder();
+
+ const flatbuffers::Offset<frc971::IMUValues> values_offset =
+ ProcessReading(sender.fbb(), absl::Span(buf, kReadSize));
+ const flatbuffers::Offset<
+ flatbuffers::Vector<flatbuffers::Offset<frc971::IMUValues>>>
+ readings_offset = sender.fbb()->CreateVector(&values_offset, 1);
+ frc971::IMUValuesBatch::Builder batch_builder =
+ sender.MakeBuilder<frc971::IMUValuesBatch>();
+ batch_builder.add_readings(readings_offset);
+ imu_sender_.CheckOk(sender.Send(batch_builder.Finish()));
+ });
+}
+
+flatbuffers::Offset<frc971::IMUValues> Imu::ProcessReading(
+ flatbuffers::FlatBufferBuilder *fbb, const absl::Span<uint8_t> message) {
+ absl::Span<const uint8_t> buf = message;
+
+ uint64_t driver_timestamp;
+ memcpy(&driver_timestamp, buf.data(), sizeof(driver_timestamp));
+ buf = buf.subspan(8);
+
+ uint16_t diag_stat;
+ memcpy(&diag_stat, buf.data(), sizeof(diag_stat));
+ buf = buf.subspan(2);
+
+ double x_gyro = ConvertValue32(buf, kGyroScale);
+ buf = buf.subspan(4);
+ double y_gyro = ConvertValue32(buf, kGyroScale);
+ buf = buf.subspan(4);
+ double z_gyro = ConvertValue32(buf, kGyroScale);
+ buf = buf.subspan(4);
+ double x_accel = ConvertValue32(buf, kAccelScale);
+ buf = buf.subspan(4);
+ double y_accel = ConvertValue32(buf, kAccelScale);
+ buf = buf.subspan(4);
+ double z_accel = ConvertValue32(buf, kAccelScale);
+ buf = buf.subspan(4);
+ double temp = ConvertValue16(buf, kTempScale);
+ buf = buf.subspan(2);
+ uint16_t data_counter;
+ memcpy(&data_counter, buf.data(), sizeof(data_counter));
+ buf = buf.subspan(2);
+ uint32_t pico_timestamp;
+ memcpy(&pico_timestamp, buf.data(), sizeof(pico_timestamp));
+ buf = buf.subspan(4);
+ int16_t encoder1_count;
+ memcpy(&encoder1_count, buf.data(), sizeof(encoder1_count));
+ buf = buf.subspan(2);
+ int16_t encoder2_count;
+ memcpy(&encoder2_count, buf.data(), sizeof(encoder2_count));
+ buf = buf.subspan(2);
+ uint32_t checksum;
+ memcpy(&checksum, buf.data(), sizeof(checksum));
+ buf = buf.subspan(4);
+
+ CHECK(buf.empty()) << "Have leftover bytes: " << buf.size();
+
+ u_int32_t calculated_checksum = aos::ComputeCrc32(message.subspan(8, 38));
+
+ if (checksum != calculated_checksum) {
+ this->failed_checksums_++;
+ }
+
+ const auto diag_stat_offset = PackDiagStat(fbb, diag_stat);
+
+ frc971::IMUValues::Builder imu_builder(*fbb);
+
+ if (checksum == calculated_checksum) {
+ constexpr uint16_t kChecksumMismatch = 1 << 0;
+ bool imu_checksum_matched = !(diag_stat & kChecksumMismatch);
+
+ // data from the IMU packet
+ if (imu_checksum_matched) {
+ imu_builder.add_gyro_x(x_gyro);
+ imu_builder.add_gyro_y(y_gyro);
+ imu_builder.add_gyro_z(z_gyro);
+
+ imu_builder.add_accelerometer_x(x_accel);
+ imu_builder.add_accelerometer_y(y_accel);
+ imu_builder.add_accelerometer_z(z_accel);
+
+ imu_builder.add_temperature(temp);
+
+ imu_builder.add_data_counter(data_counter);
+ }
+
+ // extra data from the pico
+ imu_builder.add_pico_timestamp_us(pico_timestamp);
+ imu_builder.add_left_encoder(encoder1_count);
+ imu_builder.add_right_encoder(encoder2_count);
+ imu_builder.add_previous_reading_diag_stat(diag_stat_offset);
+ }
+
+ // extra data from us
+ imu_builder.add_monotonic_timestamp_ns(driver_timestamp);
+ imu_builder.add_failed_checksums(failed_checksums_);
+ imu_builder.add_checksum_failed(checksum != calculated_checksum);
+
+ return imu_builder.Finish();
+}
+
+flatbuffers::Offset<frc971::ADIS16470DiagStat> Imu::PackDiagStat(
+ flatbuffers::FlatBufferBuilder *fbb, uint16_t value) {
+ frc971::ADIS16470DiagStat::Builder diag_stat_builder(*fbb);
+ diag_stat_builder.add_clock_error(value & (1 << 7));
+ diag_stat_builder.add_memory_failure(value & (1 << 6));
+ diag_stat_builder.add_sensor_failure(value & (1 << 5));
+ diag_stat_builder.add_standby_mode(value & (1 << 4));
+ diag_stat_builder.add_spi_communication_error(value & (1 << 3));
+ diag_stat_builder.add_flash_memory_update_error(value & (1 << 2));
+ diag_stat_builder.add_data_path_overrun(value & (1 << 1));
+ diag_stat_builder.add_checksum_mismatch(value & (1 << 0));
+ return diag_stat_builder.Finish();
+}
+
+double Imu::ConvertValue32(absl::Span<const uint8_t> data,
+ double lsb_per_output) {
+ int32_t value;
+ memcpy(&value, data.data(), sizeof(value));
+ return static_cast<double>(value) * lsb_per_output;
+}
+
+double Imu::ConvertValue16(absl::Span<const uint8_t> data,
+ double lsb_per_output) {
+ int16_t value;
+ memcpy(&value, data.data(), sizeof(value));
+ return static_cast<double>(value) * lsb_per_output;
+}
+
+Imu::~Imu() { PCHECK(0 == close(imu_fd_)); }
+} // namespace y2022::localizer
diff --git a/y2022/localizer/imu.h b/y2022/localizer/imu.h
new file mode 100644
index 0000000..cd45710
--- /dev/null
+++ b/y2022/localizer/imu.h
@@ -0,0 +1,31 @@
+#ifndef Y2022_LOCALIZER_IMU_H_
+#define Y2022_LOCALIZER_IMU_H_
+#include "aos/events/shm_event_loop.h"
+#include "frc971/wpilib/imu_batch_generated.h"
+#include "y2022/constants.h"
+
+namespace y2022::localizer {
+
+// Reads IMU packets from the kernel driver which reads them over spi
+// from the Raspberry Pi Pico on the IMU board.
+class Imu {
+ public:
+ Imu(aos::ShmEventLoop *event_loop);
+ ~Imu();
+
+ private:
+ flatbuffers::Offset<frc971::ADIS16470DiagStat> PackDiagStat(
+ flatbuffers::FlatBufferBuilder *fbb, uint16_t value);
+ flatbuffers::Offset<frc971::IMUValues> ProcessReading(
+ flatbuffers::FlatBufferBuilder *fbb, absl::Span<uint8_t> buf);
+ double ConvertValue32(absl::Span<const uint8_t> data, double lsb_per_output);
+ double ConvertValue16(absl::Span<const uint8_t> data, double lsb_per_output);
+
+ aos::ShmEventLoop *event_loop_;
+ aos::Sender<frc971::IMUValuesBatch> imu_sender_;
+ int imu_fd_;
+
+ uint failed_checksums_ = 0;
+};
+} // namespace y2022::localizer
+#endif // Y2022_LOCALIZER_IMU_H_
diff --git a/y2022/localizer/imu_main.cc b/y2022/localizer/imu_main.cc
new file mode 100644
index 0000000..bba2dd7
--- /dev/null
+++ b/y2022/localizer/imu_main.cc
@@ -0,0 +1,19 @@
+#include "aos/events/shm_event_loop.h"
+#include "aos/init.h"
+#include "y2022/localizer/imu.h"
+
+DEFINE_string(config, "config.json", "Path to the config file to use.");
+
+int main(int argc, char *argv[]) {
+ aos::InitGoogle(&argc, &argv);
+
+ aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+ aos::configuration::ReadConfig(FLAGS_config);
+
+ aos::ShmEventLoop event_loop(&config.message());
+ y2022::localizer::Imu imu(&event_loop);
+
+ event_loop.Run();
+
+ return 0;
+}
diff --git a/y2022/vision/blob_detector.cc b/y2022/vision/blob_detector.cc
index aba3550..76b16ca 100644
--- a/y2022/vision/blob_detector.cc
+++ b/y2022/vision/blob_detector.cc
@@ -17,11 +17,11 @@
namespace y2022 {
namespace vision {
-cv::Mat BlobDetector::ThresholdImage(cv::Mat rgb_image) {
- cv::Mat binarized_image(cv::Size(rgb_image.cols, rgb_image.rows), CV_8UC1);
- for (int row = 0; row < rgb_image.rows; row++) {
- for (int col = 0; col < rgb_image.cols; col++) {
- cv::Vec3b pixel = rgb_image.at<cv::Vec3b>(row, col);
+cv::Mat BlobDetector::ThresholdImage(cv::Mat bgr_image) {
+ cv::Mat binarized_image(cv::Size(bgr_image.cols, bgr_image.rows), CV_8UC1);
+ for (int row = 0; row < bgr_image.rows; row++) {
+ for (int col = 0; col < bgr_image.cols; col++) {
+ cv::Vec3b pixel = bgr_image.at<cv::Vec3b>(row, col);
uint8_t blue = pixel.val[0];
uint8_t green = pixel.val[1];
uint8_t red = pixel.val[2];
@@ -143,15 +143,26 @@
}
double DistanceTo(cv::Point2d p) const {
- // Translate the point so that the circle orgin can be (0, 0)
- const auto p_prime = cv::Point2d(p.y - center.y, p.x - center.x);
+ const auto p_prime = TranslateToOrigin(p);
// Now, the distance is simply the difference between distance from the
// origin to p' and the radius.
return std::abs(cv::norm(p_prime) - radius);
}
- // Inverted because y-coordinates go backwards
- bool OnTopHalf(cv::Point2d p) const { return p.y <= center.y; }
+ bool InAngleRange(cv::Point2d p, double theta_min, double theta_max) const {
+ auto p_prime = TranslateToOrigin(p);
+ // Flip the y because y values go downwards.
+ p_prime.y *= -1;
+ const double theta = std::atan2(p_prime.y, p_prime.x);
+ return (theta >= theta_min && theta <= theta_max);
+ }
+
+ private:
+ // Translate the point on the circle
+ // as if the circle's center is the origin (0,0)
+ cv::Point2d TranslateToOrigin(cv::Point2d p) const {
+ return cv::Point2d(p.x - center.x, p.y - center.y);
+ }
};
} // namespace
@@ -176,17 +187,17 @@
// y = -(y_offset - offset_y)
constexpr int kMaxY = 400;
constexpr double kTapeAspectRatio = 5.0 / 2.0;
- constexpr double kAspectRatioThreshold = 1.5;
+ constexpr double kAspectRatioThreshold = 1.6;
constexpr double kMinArea = 10;
- constexpr size_t kMinPoints = 6;
+ constexpr size_t kMinNumPoints = 6;
// Remove all blobs that are at the bottom of the image, have a different
- // aspect ratio than the tape, or have too little area or points
- // TODO(milind): modify to take into account that blobs will be on the side.
+ // aspect ratio than the tape, or have too little area or points.
if ((stats_it->centroid.y <= kMaxY) &&
(std::abs(kTapeAspectRatio - stats_it->aspect_ratio) <
kAspectRatioThreshold) &&
- (stats_it->area >= kMinArea) && (stats_it->num_points >= kMinPoints)) {
+ (stats_it->area >= kMinArea) &&
+ (stats_it->num_points >= kMinNumPoints)) {
filtered_blobs.push_back(*blob_it);
filtered_stats.push_back(*stats_it);
}
@@ -196,6 +207,9 @@
// Threshold for mean distance from a blob centroid to a circle.
constexpr double kCircleDistanceThreshold = 5.0;
+ // We should only expect to see blobs between these angles on a circle.
+ constexpr double kMinBlobAngle = M_PI / 3;
+ constexpr double kMaxBlobAngle = M_PI - kMinBlobAngle;
std::vector<std::vector<cv::Point>> blob_circle;
std::vector<cv::Point2d> centroids;
@@ -230,16 +244,20 @@
continue;
}
- // Only try to fit points to this circle if all of these are on the top
- // half, like how the blobs should be
- if (circle->OnTopHalf(current_centroids[0]) &&
- circle->OnTopHalf(current_centroids[1]) &&
- circle->OnTopHalf(current_centroids[2])) {
+ // Only try to fit points to this circle if all of these are between
+ // certain angles.
+ if (circle->InAngleRange(current_centroids[0], kMinBlobAngle,
+ kMaxBlobAngle) &&
+ circle->InAngleRange(current_centroids[1], kMinBlobAngle,
+ kMaxBlobAngle) &&
+ circle->InAngleRange(current_centroids[2], kMinBlobAngle,
+ kMaxBlobAngle)) {
for (size_t m = 0; m < filtered_blobs.size(); m++) {
// Add this blob to the list if it is close to the circle, is on the
// top half, and isn't one of the other blobs
if ((m != i) && (m != j) && (m != k) &&
- circle->OnTopHalf(filtered_stats[m].centroid) &&
+ circle->InAngleRange(filtered_stats[m].centroid, kMinBlobAngle,
+ kMaxBlobAngle) &&
(circle->DistanceTo(filtered_stats[m].centroid) <
kCircleDistanceThreshold)) {
current_blobs.emplace_back(filtered_blobs[m]);
@@ -293,18 +311,16 @@
cv::circle(view_image, centroid, 3, cv::Scalar(255, 255, 0), cv::FILLED);
}
-void BlobDetector::ExtractBlobs(
- cv::Mat rgb_image, cv::Mat &binarized_image,
- std::vector<std::vector<cv::Point>> &filtered_blobs,
- std::vector<std::vector<cv::Point>> &unfiltered_blobs,
- std::vector<BlobStats> &blob_stats, cv::Point ¢roid) {
+void BlobDetector::ExtractBlobs(cv::Mat bgr_image,
+ BlobDetector::BlobResult *blob_result) {
auto start = aos::monotonic_clock::now();
- binarized_image = ThresholdImage(rgb_image);
- unfiltered_blobs = FindBlobs(binarized_image);
- blob_stats = ComputeStats(unfiltered_blobs);
- auto filtered_pair = FilterBlobs(unfiltered_blobs, blob_stats);
- filtered_blobs = filtered_pair.first;
- centroid = filtered_pair.second;
+ blob_result->binarized_image = ThresholdImage(bgr_image);
+ blob_result->unfiltered_blobs = FindBlobs(blob_result->binarized_image);
+ blob_result->blob_stats = ComputeStats(blob_result->unfiltered_blobs);
+ auto filtered_pair =
+ FilterBlobs(blob_result->unfiltered_blobs, blob_result->blob_stats);
+ blob_result->filtered_blobs = filtered_pair.first;
+ blob_result->centroid = filtered_pair.second;
auto end = aos::monotonic_clock::now();
LOG(INFO) << "Blob detection elapsed time: "
<< std::chrono::duration<double, std::milli>(end - start).count()
diff --git a/y2022/vision/blob_detector.h b/y2022/vision/blob_detector.h
index f8a4ab4..d263d32 100644
--- a/y2022/vision/blob_detector.h
+++ b/y2022/vision/blob_detector.h
@@ -16,11 +16,19 @@
size_t num_points;
};
+ struct BlobResult {
+ cv::Mat binarized_image;
+ std::vector<std::vector<cv::Point>> filtered_blobs, unfiltered_blobs;
+ std::vector<BlobStats> blob_stats;
+ cv::Point centroid;
+ };
+
BlobDetector() {}
+
// Given an image, threshold it to find "green" pixels
// Input: Color image
// Output: Grayscale (binarized) image with green pixels set to 255
- static cv::Mat ThresholdImage(cv::Mat rgb_image);
+ static cv::Mat ThresholdImage(cv::Mat bgr_image);
// Given binary image, extract blobs
static std::vector<std::vector<cv::Point>> FindBlobs(cv::Mat threshold_image);
@@ -44,11 +52,7 @@
const std::vector<std::vector<cv::Point>> &unfiltered_blobs,
const std::vector<BlobStats> &blob_stats, cv::Point centroid);
- static void ExtractBlobs(
- cv::Mat rgb_image, cv::Mat &binarized_image,
- std::vector<std::vector<cv::Point>> &filtered_blobs,
- std::vector<std::vector<cv::Point>> &unfiltered_blobs,
- std::vector<BlobStats> &blob_stats, cv::Point ¢roid);
+ static void ExtractBlobs(cv::Mat bgr_image, BlobResult *blob_result);
};
} // namespace vision
} // namespace y2022
diff --git a/y2022/vision/calib_files/calibration_pi-971-1_2021-09-11_17-49-00.000000000.json b/y2022/vision/calib_files/calibration_pi-971-1_2021-09-11_17-49-00.000000000.json
deleted file mode 100644
index 39c7911..0000000
--- a/y2022/vision/calib_files/calibration_pi-971-1_2021-09-11_17-49-00.000000000.json
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "node_name": "pi1",
- "team_number": 971,
- "intrinsics": [
- 392.276093,
- 0.0,
- 293.934753,
- 0.0,
- 392.30838,
- 212.287537,
- 0.0,
- 0.0,
- 1.0
- ],
- "dist_coeffs": [
- 0.149561,
- -0.261432,
- -0.000182,
- -0.000697,
- 0.099255
- ],
- "calibration_timestamp": 1597994992500905688
-}
diff --git a/y2022/vision/calib_files/calibration_pi-971-1_2022-02-06_15-19-00.000000000.json b/y2022/vision/calib_files/calibration_pi-971-1_2022-02-06_15-19-00.000000000.json
new file mode 100755
index 0000000..6a4f05c
--- /dev/null
+++ b/y2022/vision/calib_files/calibration_pi-971-1_2022-02-06_15-19-00.000000000.json
@@ -0,0 +1,23 @@
+{
+ "node_name": "pi1",
+ "team_number": 971,
+ "intrinsics": [
+ 398.312439,
+ 0.0,
+ 348.653015,
+ 0.0,
+ 397.627533,
+ 257.368805,
+ 0.0,
+ 0.0,
+ 1.0
+ ],
+ "dist_coeffs": [
+ 0.143741,
+ -0.274336,
+ -0.000311,
+ -0.000171,
+ 0.10252
+ ],
+ "calibration_timestamp": 1635600750700335075
+}
diff --git a/y2022/vision/camera_reader.cc b/y2022/vision/camera_reader.cc
index e1de75a..abce18c 100644
--- a/y2022/vision/camera_reader.cc
+++ b/y2022/vision/camera_reader.cc
@@ -1,17 +1,16 @@
#include "y2022/vision/camera_reader.h"
-#include <cmath>
#include <chrono>
+#include <cmath>
#include <thread>
-#include <opencv2/imgproc.hpp>
-
#include "aos/events/event_loop.h"
#include "aos/events/shm_event_loop.h"
#include "aos/flatbuffer_merge.h"
#include "aos/network/team_number.h"
#include "frc971/vision/v4l2_reader.h"
#include "frc971/vision/vision_generated.h"
+#include "opencv2/imgproc.hpp"
#include "y2022/vision/blob_detector.h"
#include "y2022/vision/calibration_generated.h"
#include "y2022/vision/target_estimator.h"
@@ -82,25 +81,23 @@
} // namespace
void CameraReader::ProcessImage(cv::Mat image_mat) {
- // Remember, we're getting YUYV images, so we start by converting to RGB
-
- std::vector<std::vector<cv::Point>> filtered_blobs, unfiltered_blobs;
- std::vector<BlobDetector::BlobStats> blob_stats;
- cv::Mat binarized_image =
+ BlobDetector::BlobResult blob_result;
+ blob_result.binarized_image =
cv::Mat::zeros(cv::Size(image_mat.cols, image_mat.rows), CV_8UC1);
- cv::Point centroid;
- BlobDetector::ExtractBlobs(image_mat, binarized_image, filtered_blobs,
- unfiltered_blobs, blob_stats, centroid);
+ BlobDetector::ExtractBlobs(image_mat, &blob_result);
auto builder = target_estimate_sender_.MakeBuilder();
- flatbuffers::Offset<BlobResult> blob_result_offset;
+ flatbuffers::Offset<BlobResultFbs> blob_result_offset;
{
- const auto filtered_blobs_offset = CvBlobsToFbs(filtered_blobs, builder);
+ const auto filtered_blobs_offset =
+ CvBlobsToFbs(blob_result.filtered_blobs, builder);
const auto unfiltered_blobs_offset =
- CvBlobsToFbs(unfiltered_blobs, builder);
- const auto blob_stats_offset = BlobStatsToFbs(blob_stats, builder);
- const Point centroid_fbs = Point{centroid.x, centroid.y};
+ CvBlobsToFbs(blob_result.unfiltered_blobs, builder);
+ const auto blob_stats_offset =
+ BlobStatsToFbs(blob_result.blob_stats, builder);
+ const Point centroid_fbs =
+ Point{blob_result.centroid.x, blob_result.centroid.y};
- auto blob_result_builder = builder.MakeBuilder<BlobResult>();
+ auto blob_result_builder = builder.MakeBuilder<BlobResultFbs>();
blob_result_builder.add_filtered_blobs(filtered_blobs_offset);
blob_result_builder.add_unfiltered_blobs(unfiltered_blobs_offset);
blob_result_builder.add_blob_stats(blob_stats_offset);
@@ -109,9 +106,9 @@
}
auto target_estimate_builder = builder.MakeBuilder<TargetEstimate>();
- TargetEstimator::EstimateTargetLocation(centroid, CameraIntrinsics(),
- CameraExtrinsics(),
- &target_estimate_builder);
+ TargetEstimator::EstimateTargetLocation(
+ blob_result.centroid, CameraIntrinsics(), CameraExtrinsics(),
+ &target_estimate_builder);
target_estimate_builder.add_blob_result(blob_result_offset);
builder.CheckOk(builder.Send(target_estimate_builder.Finish()));
@@ -127,8 +124,8 @@
// that can be sent in a second.
std::this_thread::sleep_for(std::chrono::milliseconds(50));
LOG(INFO) << "Reading file " << file;
- cv::Mat rgb_image = cv::imread(file.c_str());
- ProcessImage(rgb_image);
+ cv::Mat bgr_image = cv::imread(file.c_str());
+ ProcessImage(bgr_image);
}
event_loop_->Exit();
return;
diff --git a/y2022/vision/target_estimate.fbs b/y2022/vision/target_estimate.fbs
index 207a37a..707014c 100644
--- a/y2022/vision/target_estimate.fbs
+++ b/y2022/vision/target_estimate.fbs
@@ -18,7 +18,7 @@
}
// Information for debugging blob detection
-table BlobResult {
+table BlobResultFbs {
// Blobs that passed the filtering step
filtered_blobs:[Blob] (id: 0);
// All detected blobs
@@ -37,7 +37,7 @@
// Positive means right of center, negative means left.
angle_to_target:double (id: 1);
- blob_result:BlobResult (id: 2);
+ blob_result:BlobResultFbs (id: 2);
// TODO(milind): add confidence
}
diff --git a/y2022/vision/viewer.cc b/y2022/vision/viewer.cc
index 7c13d1b..c9c2aff 100644
--- a/y2022/vision/viewer.cc
+++ b/y2022/vision/viewer.cc
@@ -68,41 +68,47 @@
// TODO(Milind) Store the target estimates and match them by timestamp to make
// sure we're getting the right one.
- CHECK(target_estimate_fetcher.FetchNext());
- const TargetEstimate *target = target_estimate_fetcher.get();
+ const TargetEstimate *target_est = nullptr;
+ if (target_estimate_fetcher.Fetch()) {
+ target_est = target_estimate_fetcher.get();
+ }
// Create color image:
cv::Mat image_color_mat(cv::Size(image->cols(), image->rows()), CV_8UC2,
(void *)image->data()->data());
- cv::Mat rgb_image(cv::Size(image->cols(), image->rows()), CV_8UC3);
- cv::cvtColor(image_color_mat, rgb_image, cv::COLOR_YUV2BGR_YUYV);
+ cv::Mat bgr_image(cv::Size(image->cols(), image->rows()), CV_8UC3);
+ cv::cvtColor(image_color_mat, bgr_image, cv::COLOR_YUV2RGB_YUYV);
if (!FLAGS_capture.empty()) {
- cv::imwrite(FLAGS_capture, rgb_image);
+ cv::imwrite(FLAGS_capture, bgr_image);
return false;
}
- LOG(INFO) << image->monotonic_timestamp_ns()
- << ": # blobs: " << target->blob_result()->filtered_blobs()->size();
+ LOG(INFO) << image->monotonic_timestamp_ns() << ": # unfiltered blobs: "
+ << target_est->blob_result()->unfiltered_blobs()->size()
+ << "; # filtered blobs: "
+ << target_est->blob_result()->filtered_blobs()->size();
- cv::Mat ret_image;
- BlobDetector::DrawBlobs(
- ret_image, FbsToCvBlobs(*target->blob_result()->filtered_blobs()),
- FbsToCvBlobs(*target->blob_result()->unfiltered_blobs()),
- FbsToBlobStats(*target->blob_result()->blob_stats()),
- cv::Point{target->blob_result()->centroid()->x(),
- target->blob_result()->centroid()->y()});
+ cv::Mat ret_image(cv::Size(image->cols(), image->rows()), CV_8UC3);
+ if (target_est != nullptr) {
+ BlobDetector::DrawBlobs(
+ ret_image, FbsToCvBlobs(*target_est->blob_result()->filtered_blobs()),
+ FbsToCvBlobs(*target_est->blob_result()->unfiltered_blobs()),
+ FbsToBlobStats(*target_est->blob_result()->blob_stats()),
+ cv::Point{target_est->blob_result()->centroid()->x(),
+ target_est->blob_result()->centroid()->y()});
+ cv::imshow("blobs", ret_image);
+ }
- cv::imshow("image", rgb_image);
- cv::imshow("blobs", ret_image);
+ cv::imshow("image", bgr_image);
int keystroke = cv::waitKey(1);
if ((keystroke & 0xFF) == static_cast<int>('c')) {
// Convert again, to get clean image
- cv::cvtColor(image_color_mat, rgb_image, cv::COLOR_YUV2BGR_YUYV);
+ cv::cvtColor(image_color_mat, bgr_image, cv::COLOR_YUV2BGR_YUYV);
std::stringstream name;
name << "capture-" << aos::realtime_clock::now() << ".png";
- cv::imwrite(name.str(), rgb_image);
+ cv::imwrite(name.str(), bgr_image);
LOG(INFO) << "Saved image file: " << name.str();
} else if ((keystroke & 0xFF) == static_cast<int>('q')) {
return false;
@@ -119,6 +125,9 @@
image_fetcher =
event_loop.MakeFetcher<frc971::vision::CameraImage>(FLAGS_channel);
+ target_estimate_fetcher =
+ event_loop.MakeFetcher<y2022::vision::TargetEstimate>(FLAGS_channel);
+
// Run the display loop
event_loop.AddPhasedLoop(
[&event_loop](int) {
diff --git a/y2022/y2022_pi_template.json b/y2022/y2022_pi_template.json
index 823a7f5..07835a0 100644
--- a/y2022/y2022_pi_template.json
+++ b/y2022/y2022_pi_template.json
@@ -110,7 +110,8 @@
"type": "y2022.vision.TargetEstimate",
"source_node": "pi{{ NUM }}",
"frequency": 25,
- "num_senders": 2
+ "num_senders": 2,
+ "max_size": 20000
},
{
"name": "/logger/aos",