Merge changes Ie6b43814,I6bbcafa9
* changes:
Redo timer math for spurrious events
Add scripts to modify a root filesystem for the vision pi
diff --git a/WORKSPACE b/WORKSPACE
index fb6e2cb..f14f10b 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -641,6 +641,13 @@
urls = ["http://www.frc971.org/Build-Dependencies/small_sample_logfile.fbs"],
)
+http_file(
+ name = "drivetrain_replay",
+ downloaded_file_path = "spinning_wheels_while_still.bfbs",
+ sha256 = "3fa56d9af0852798bdd00ea5cc02f8261ad2a389a12a054ba619f9f7c43ab6fd",
+ urls = ["http://www.frc971.org/Build-Dependencies/spinning_wheels_while_still.bfbs"],
+)
+
# OpenCV armhf (for raspberry pi)
http_archive(
name = "opencv_armhf",
diff --git a/aos/BUILD b/aos/BUILD
index 2bd49c1..12d25b2 100644
--- a/aos/BUILD
+++ b/aos/BUILD
@@ -1,5 +1,5 @@
load("//tools:environments.bzl", "mcu_cpus")
-load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library", "flatbuffer_py_library")
+load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library", "flatbuffer_py_library", "flatbuffer_ts_library")
filegroup(
name = "prime_binaries",
@@ -285,6 +285,12 @@
visibility = ["//visibility:public"],
)
+flatbuffer_ts_library(
+ name = "configuration_ts_fbs",
+ srcs = ["configuration.fbs"],
+ visibility = ["//visibility:public"],
+)
+
flatbuffer_py_library(
name = "configuration_fbs_python",
srcs = ["configuration.fbs"],
diff --git a/aos/network/BUILD b/aos/network/BUILD
index 96d561f..89853f0 100644
--- a/aos/network/BUILD
+++ b/aos/network/BUILD
@@ -13,6 +13,14 @@
],
)
+flatbuffer_ts_library(
+ name = "connect_ts_fbs",
+ srcs = ["connect.fbs"],
+ includes = [
+ "//aos:configuration_fbs_includes",
+ ],
+)
+
flatbuffer_cc_library(
name = "timestamp_fbs",
srcs = ["timestamp.fbs"],
diff --git a/aos/network/web_proxy.cc b/aos/network/web_proxy.cc
index 77371eb..f4a8ce8 100644
--- a/aos/network/web_proxy.cc
+++ b/aos/network/web_proxy.cc
@@ -212,6 +212,10 @@
const message_bridge::Connect *message =
flatbuffers::GetRoot<message_bridge::Connect>(buffer.data.data());
for (auto &subscriber : subscribers_) {
+ // Make sure the subscriber is for a channel on this node.
+ if (subscriber.get() == nullptr) {
+ continue;
+ }
bool found_match = false;
for (auto channel : *message->channels_to_transfer()) {
if (subscriber->Compare(channel)) {
diff --git a/aos/network/web_proxy_main.cc b/aos/network/web_proxy_main.cc
index 21b6f9c..479320a 100644
--- a/aos/network/web_proxy_main.cc
+++ b/aos/network/web_proxy_main.cc
@@ -15,13 +15,18 @@
std::vector<std::unique_ptr<aos::web_proxy::Subscriber>> *subscribers,
const aos::FlatbufferDetachedBuffer<aos::Configuration> &config) {
aos::ShmEventLoop event_loop(&config.message());
+ const aos::Node *self = aos::configuration::GetMyNode(&config.message());
// TODO(alex): skip fetchers on the wrong node.
for (uint i = 0; i < config.message().channels()->size(); ++i) {
auto channel = config.message().channels()->Get(i);
- auto fetcher = event_loop.MakeRawFetcher(channel);
- subscribers->emplace_back(
- std::make_unique<aos::web_proxy::Subscriber>(std::move(fetcher), i));
+ if (!aos::configuration::ChannelIsReadableOnNode(channel, self)) {
+ subscribers->emplace_back(nullptr);
+ } else {
+ auto fetcher = event_loop.MakeRawFetcher(channel);
+ subscribers->emplace_back(
+ std::make_unique<aos::web_proxy::Subscriber>(std::move(fetcher), i));
+ }
}
flatbuffers::FlatBufferBuilder fbb(1024);
diff --git a/aos/network/www/BUILD b/aos/network/www/BUILD
index 76e8ef4..bab5198 100644
--- a/aos/network/www/BUILD
+++ b/aos/network/www/BUILD
@@ -18,6 +18,8 @@
],
deps = [
"//aos/network:web_proxy_ts_fbs",
+ "//aos/network:connect_ts_fbs",
+ "//aos:configuration_ts_fbs",
],
visibility=["//visibility:public"],
)
diff --git a/aos/network/www/config_handler.ts b/aos/network/www/config_handler.ts
index 0af78b6..72b496e 100644
--- a/aos/network/www/config_handler.ts
+++ b/aos/network/www/config_handler.ts
@@ -1,10 +1,11 @@
-import {aos} from 'aos/network/web_proxy_generated';
+import {Configuration, Channel} from 'aos/configuration_generated';
+import {Connect} from 'aos/network/connect_generated';
export class ConfigHandler {
private readonly root_div = document.getElementById('config');
constructor(
- private readonly config: aos.Configuration,
+ private readonly config: Configuration,
private readonly dataChannel: RTCDataChannel) {}
printConfig() {
@@ -46,19 +47,18 @@
const channel = this.config.channels(index);
const namefb = builder.createString(channel.name());
const typefb = builder.createString(channel.type());
- aos.Channel.startChannel(builder);
- aos.Channel.addName(builder, namefb);
- aos.Channel.addType(builder, typefb);
- const channelfb = aos.Channel.endChannel(builder);
+ Channel.startChannel(builder);
+ Channel.addName(builder, namefb);
+ Channel.addType(builder, typefb);
+ const channelfb = Channel.endChannel(builder);
channels.push(channelfb);
}
const channelsfb =
- aos.message_bridge.Connect.createChannelsToTransferVector(
- builder, channels);
- aos.message_bridge.Connect.startConnect(builder);
- aos.message_bridge.Connect.addChannelsToTransfer(builder, channelsfb);
- const connect = aos.message_bridge.Connect.endConnect(builder);
+ Connect.createChannelsToTransferVector(builder, channels);
+ Connect.startConnect(builder);
+ Connect.addChannelsToTransfer(builder, channelsfb);
+ const connect = Connect.endConnect(builder);
builder.finish(connect);
const array = builder.asUint8Array();
console.log('connect', array);
diff --git a/aos/network/www/ping_handler.ts b/aos/network/www/ping_handler.ts
index 9b37d70..df7635d 100644
--- a/aos/network/www/ping_handler.ts
+++ b/aos/network/www/ping_handler.ts
@@ -1,8 +1,8 @@
-import {aos} from 'aos/events/ping_generated';
+import {Ping} from 'aos/events/ping_generated';
export function HandlePing(data: Uint8Array) {
const fbBuffer = new flatbuffers.ByteBuffer(data);
- const ping = aos.examples.Ping.getRootAsPing(fbBuffer);
+ const ping = Ping.getRootAsPing(fbBuffer);
document.getElementById('val').innerHTML = ping.value();
document.getElementById('time').innerHTML = ping.sendTime().low;
diff --git a/aos/network/www/proxy.ts b/aos/network/www/proxy.ts
index 27ffbfe..80c71b2 100644
--- a/aos/network/www/proxy.ts
+++ b/aos/network/www/proxy.ts
@@ -1,5 +1,5 @@
-import {aos} from 'aos/network/web_proxy_generated';
import {ConfigHandler} from './config_handler';
+import {Configuration} from 'aos/configuration_generated';
// There is one handler for each DataChannel, it maintains the state of
// multi-part messages and delegates to a callback when the message is fully
@@ -16,7 +16,7 @@
handleMessage(e: MessageEvent): void {
const fbBuffer = new flatbuffers.ByteBuffer(new Uint8Array(e.data));
const messageHeader =
- aos.web_proxy.MessageHeader.getRootAsMessageHeader(fbBuffer);
+ WebProxy.MessageHeader.getRootAsMessageHeader(fbBuffer);
// Short circuit if only one packet
if (messageHeader.packetCount === 1) {
this.handlerFunc(messageHeader.dataArray());
@@ -44,8 +44,8 @@
private rtcPeerConnection: RTCPeerConnection|null = null;
private dataChannel: DataChannel|null = null;
private webSocketUrl: string;
- private configHandler: ConfigHandler|null =
- null private config: aos.Configuration|null = null;
+ private configHandler: ConfigHandler|null = null;
+ private config: Configuration|null = null;
private readonly handlerFuncs = new Map<string, (data: Uint8Array) => void>();
private readonly handlers = new Set<Handler>();
@@ -98,10 +98,10 @@
const candidateString = builder.createString(candidate.candidate);
const sdpMidString = builder.createString(candidate.sdpMid);
- const iceFb = aos.web_proxy.WebSocketIce.createWebSocketIce(
+ const iceFb = WebProxy.WebSocketIce.createWebSocketIce(
builder, candidateString, sdpMidString, candidate.sdpMLineIndex);
- const messageFb = aos.web_proxy.WebSocketMessage.createWebSocketMessage(
- builder, aos.web_proxy.Payload.WebSocketIce, iceFb);
+ const messageFb = WebProxy.WebSocketMessage.createWebSocketMessage(
+ builder, WebProxy.Payload.WebSocketIce, iceFb);
builder.finish(messageFb);
const array = builder.asUint8Array();
this.webSocketConnection.send(array.buffer.slice(array.byteOffset));
@@ -113,10 +113,10 @@
const builder = new flatbuffers.Builder(512);
const offerString = builder.createString(description.sdp);
- const webSocketSdp = aos.web_proxy.WebSocketSdp.createWebSocketSdp(
- builder, aos.web_proxy.SdpType.OFFER, offerString);
- const message = aos.web_proxy.WebSocketMessage.createWebSocketMessage(
- builder, aos.web_proxy.Payload.WebSocketSdp, webSocketSdp);
+ const webSocketSdp = WebProxy.WebSocketSdp.createWebSocketSdp(
+ builder, WebProxy.SdpType.OFFER, offerString);
+ const message = WebProxy.WebSocketMessage.createWebSocketMessage(
+ builder, WebProxy.Payload.WebSocketSdp, webSocketSdp);
builder.finish(message);
const array = builder.asUint8Array();
this.webSocketConnection.send(array.buffer.slice(array.byteOffset));
@@ -145,19 +145,19 @@
const buffer = new Uint8Array(e.data)
const fbBuffer = new flatbuffers.ByteBuffer(buffer);
const message =
- aos.web_proxy.WebSocketMessage.getRootAsWebSocketMessage(fbBuffer);
+ WebProxy.WebSocketMessage.getRootAsWebSocketMessage(fbBuffer);
switch (message.payloadType()) {
- case aos.web_proxy.Payload.WebSocketSdp:
- const sdpFb = message.payload(new aos.web_proxy.WebSocketSdp());
- if (sdpFb.type() !== aos.web_proxy.SdpType.ANSWER) {
+ case WebProxy.Payload.WebSocketSdp:
+ const sdpFb = message.payload(new WebProxy.WebSocketSdp());
+ if (sdpFb.type() !== WebProxy.SdpType.ANSWER) {
console.log('got something other than an answer back');
break;
}
this.rtcPeerConnection.setRemoteDescription(new RTCSessionDescription(
{'type': 'answer', 'sdp': sdpFb.payload()}));
break;
- case aos.web_proxy.Payload.WebSocketIce:
- const iceFb = message.payload(new aos.web_proxy.WebSocketIce());
+ case WebProxy.Payload.WebSocketIce:
+ const iceFb = message.payload(new WebProxy.WebSocketIce());
const candidate = {} as RTCIceCandidateInit;
candidate.candidate = iceFb.candidate();
candidate.sdpMid = iceFb.sdpMid();
diff --git a/frc971/control_loops/drivetrain/BUILD b/frc971/control_loops/drivetrain/BUILD
index 2c94473..9fef8f9 100644
--- a/frc971/control_loops/drivetrain/BUILD
+++ b/frc971/control_loops/drivetrain/BUILD
@@ -152,6 +152,7 @@
hdrs = ["hybrid_ekf.h"],
deps = [
":drivetrain_config",
+ "//aos:math",
"//aos/containers:priority_queue",
"//aos/util:math",
"//frc971/control_loops:c2d",
@@ -184,7 +185,9 @@
hdrs = ["localizer.h"],
deps = [
":drivetrain_config",
+ ":drivetrain_status_fbs",
":hybrid_ekf",
+ "//frc971/control_loops:control_loops_fbs",
"//frc971/control_loops:pose",
],
)
diff --git a/frc971/control_loops/drivetrain/drivetrain.cc b/frc971/control_loops/drivetrain/drivetrain.cc
index 7dfd0e6..a19f57b 100644
--- a/frc971/control_loops/drivetrain/drivetrain.cc
+++ b/frc971/control_loops/drivetrain/drivetrain.cc
@@ -243,7 +243,9 @@
}
localizer_->Update({last_last_left_voltage_, last_last_right_voltage_},
monotonic_now, position->left_encoder(),
- position->right_encoder(), last_gyro_rate_, last_accel_);
+ position->right_encoder(),
+ down_estimator_.avg_recent_yaw_rates(),
+ down_estimator_.avg_recent_accel());
}
dt_openloop_.SetPosition(position, left_gear_, right_gear_);
@@ -321,6 +323,9 @@
const flatbuffers::Offset<DownEstimatorState> down_estimator_state_offset =
down_estimator_.PopulateStatus(status->fbb(), monotonic_now);
+ const flatbuffers::Offset<LocalizerState> localizer_offset =
+ localizer_->PopulateStatus(status->fbb());
+
const flatbuffers::Offset<ImuZeroerState> zeroer_offset =
imu_zeroer_.PopulateStatus(status->fbb());
@@ -363,6 +368,7 @@
builder.add_line_follow_logging(line_follow_logging_offset);
builder.add_trajectory_logging(trajectory_logging_offset);
builder.add_down_estimator(down_estimator_state_offset);
+ builder.add_localizer(localizer_offset);
builder.add_zeroing(zeroer_offset);
status->Send(builder.Finish());
}
diff --git a/frc971/control_loops/drivetrain/drivetrain.h b/frc971/control_loops/drivetrain/drivetrain.h
index f7924c5..b766db8 100644
--- a/frc971/control_loops/drivetrain/drivetrain.h
+++ b/frc971/control_loops/drivetrain/drivetrain.h
@@ -38,6 +38,8 @@
LocalizerInterface *localizer,
const ::std::string &name = "/drivetrain");
+ virtual ~DrivetrainLoop() {}
+
int ControllerIndexFromGears();
protected:
diff --git a/frc971/control_loops/drivetrain/drivetrain_lib_test.cc b/frc971/control_loops/drivetrain/drivetrain_lib_test.cc
index 1de5d40..3c206b3 100644
--- a/frc971/control_loops/drivetrain/drivetrain_lib_test.cc
+++ b/frc971/control_loops/drivetrain/drivetrain_lib_test.cc
@@ -82,9 +82,9 @@
void VerifyNearGoal() {
drivetrain_goal_fetcher_.Fetch();
EXPECT_NEAR(drivetrain_goal_fetcher_->left_goal(),
- drivetrain_plant_.GetLeftPosition(), 1e-3);
+ drivetrain_plant_.GetLeftPosition(), 1e-2);
EXPECT_NEAR(drivetrain_goal_fetcher_->right_goal(),
- drivetrain_plant_.GetRightPosition(), 1e-3);
+ drivetrain_plant_.GetRightPosition(), 1e-2);
}
void VerifyNearPosition(double x, double y) {
@@ -100,8 +100,8 @@
const double expected_y =
CHECK_NOTNULL(drivetrain_status_fetcher_->trajectory_logging())->y();
const ::Eigen::Vector2d actual = drivetrain_plant_.GetPosition();
- EXPECT_NEAR(actual(0), expected_x, 3e-2);
- EXPECT_NEAR(actual(1), expected_y, 3e-2);
+ EXPECT_NEAR(actual(0), expected_x, 4e-2);
+ EXPECT_NEAR(actual(1), expected_y, 4e-2);
}
void WaitForTrajectoryPlan() {
@@ -131,7 +131,7 @@
drivetrain_status_fetcher_->theta();
EXPECT_LT(
std::abs(aos::math::DiffAngle(down_estimator_yaw, localizer_yaw)),
- 1e-5);
+ 1e-2);
const double true_yaw = (drivetrain_plant_.GetRightPosition() -
drivetrain_plant_.GetLeftPosition()) /
(dt_config_.robot_radius * 2.0);
diff --git a/frc971/control_loops/drivetrain/hybrid_ekf.h b/frc971/control_loops/drivetrain/hybrid_ekf.h
index 3f3a7f7..d2c0be3 100644
--- a/frc971/control_loops/drivetrain/hybrid_ekf.h
+++ b/frc971/control_loops/drivetrain/hybrid_ekf.h
@@ -4,6 +4,7 @@
#include <chrono>
#include "Eigen/Dense"
+#include "aos/commonmath.h"
#include "aos/containers/priority_queue.h"
#include "aos/util/math.h"
#include "frc971/control_loops/c2d.h"
@@ -37,24 +38,104 @@
// measurement updates with an encoder/gyro as well as a more generic
// update function that can be used for arbitrary nonlinear updates (presumably
// a camera update).
+//
+// Discussion of the model:
+// In the current model, we try to rely primarily on IMU measurements for
+// estimating robot state--we also need additional information (some combination
+// of output voltages, encoders, and camera data) to help eliminate the biases
+// that can accumulate due to integration of IMU data.
+// We use IMU measurements as inputs rather than measurement outputs because
+// that seemed to be easier to implement. I tried initially running with
+// the IMU as a measurement, but it seemed to blow up the complexity of the
+// model.
+//
+// On each prediction update, we take in inputs of the left/right voltages and
+// the current measured longitudinal/lateral accelerations. In the current
+// setup, the accelerometer readings will be used for estimating how the
+// evolution of the longitudinal/lateral velocities. The voltages (and voltage
+// errors) will solely be used for estimating the current rotational velocity of
+// the robot (I do this because currently I suspect that the accelerometer is a
+// much better indicator of current robot state than the voltages). We also
+// deliberately decay all of the velocity estimates towards zero to help address
+// potential accelerometer biases. We use two separate decay models:
+// -The longitudinal velocity is modelled as decaying at a constant rate (see
+// the documentation on the VelocityAccel() method)--this needs a more
+// complex model because the robot will, under normal circumstances, be
+// travelling at non-zero velocities.
+// -The lateral velocity is modelled as exponentially decaying towards zero.
+// This is simpler to model and should be reasonably valid, since we will
+// not *normally* be travelling sideways consistently (this assumption may
+// need to be revisited).
+// -The "longitudinal velocity offset" (described below) also uses an
+// exponential decay, albeit with a different time constant. A future
+// improvement may remove the decay modelling on the longitudinal velocity
+// itself and instead use that decay model on the longitudinal velocity offset.
+// This would place a bit more trust in the encoder measurements but also
+// more correctly model situations where the robot is legitimately moving at
+// a certain velocity.
+//
+// For modelling how the drivetrain encoders evolve, and to help prevent the
+// aforementioned decay functions from affecting legitimate high-velocity
+// maneuvers too much, we have a "longitudinal velocity offset" term. This term
+// models the difference between the actual longitudinal velocity of the robot
+// (estimated by the average of the left/right velocities) and the velocity
+// experienced by the wheels (which can be observed from the encoders more
+// directly). Because we model this velocity offset as decaying towards zero,
+// what this will do is allow the encoders to be a constant velocity off from
+// the accelerometer updates for short periods of time but then gradually
+// pull the "actual" longitudinal velocity offset towards that of the encoders,
+// helping to reduce constant biases.
template <typename Scalar = double>
class HybridEkf {
public:
// An enum specifying what each index in the state vector is for.
enum StateIdx {
+ // Current X/Y position, in meters, of the robot.
kX = 0,
kY = 1,
+ // Current heading of the robot.
kTheta = 2,
+ // Current estimated encoder reading of the left wheels, in meters.
+ // Rezeroed once on startup.
kLeftEncoder = 3,
+ // Current estimated actual velocity of the left side of the robot, in m/s.
kLeftVelocity = 4,
+ // Same variables, for the right side of the robot.
kRightEncoder = 5,
kRightVelocity = 6,
+ // Estimated offset to input voltage. Used as a generic error term, Volts.
kLeftVoltageError = 7,
kRightVoltageError = 8,
+ // These error terms are used to estimate the difference between the actual
+ // movement of the drivetrain and that implied by the wheel odometry.
+ // Angular error effectively estimates a constant angular rate offset of the
+ // encoders relative to the actual rotation of the robot.
+ // Semi-arbitrary units (we don't bother accounting for robot radius in
+ // this).
kAngularError = 9,
+ // Estimate of slip between the drivetrain wheels and the actual
+ // forwards/backwards velocity of the robot, in m/s.
+ // I.e., (left velocity + right velocity) / 2.0 = (left wheel velocity +
+ // right wheel velocity) / 2.0 + longitudinal velocity offset
+ kLongitudinalVelocityOffset = 10,
+ // Current estimate of the lateral velocity of the robot, in m/s.
+ // Positive implies the robot is moving to its left.
+ kLateralVelocity = 11,
};
- static constexpr int kNStates = 10;
- static constexpr int kNInputs = 2;
+ static constexpr int kNStates = 12;
+ enum InputIdx {
+ // Left/right drivetrain voltages.
+ kLeftVoltage = 0,
+ kRightVoltage = 1,
+ // Current accelerometer readings, in m/s/s, along the longitudinal and
+ // lateral axes of the robot. Should be projected onto the X/Y plane, to
+ // compensate for tilt of the robot before being passed to this filter. The
+ // HybridEkf has no knowledge of the current pitch/roll of the robot, and so
+ // can't do anything to compensate for it.
+ kLongitudinalAccel = 2,
+ kLateralAccel = 3,
+ };
+ static constexpr int kNInputs = 4;
// Number of previous samples to save.
static constexpr int kSaveSamples = 50;
// Assume that all correction steps will have kNOutputs
@@ -63,6 +144,10 @@
// figuring out how to deal with storing variable size
// observation matrices, though.
static constexpr int kNOutputs = 3;
+ // Time constant to use for estimating how the longitudinal/lateral velocity
+ // offsets decay, in seconds.
+ static constexpr double kVelocityOffsetTimeConstant = 10.0;
+ static constexpr double kLateralVelocityTimeConstant = 1.0;
// Inputs are [left_volts, right_volts]
typedef Eigen::Matrix<Scalar, kNInputs, 1> Input;
// Outputs are either:
@@ -71,19 +156,7 @@
// variable-size measurement updates.
typedef Eigen::Matrix<Scalar, kNOutputs, 1> Output;
typedef Eigen::Matrix<Scalar, kNStates, kNStates> StateSquare;
- // State is [x_position, y_position, theta, Kalman States], where
- // Kalman States are the states from the standard drivetrain Kalman Filter,
- // which is: [left encoder, left ground vel, right encoder, right ground vel,
- // left voltage error, right voltage error, angular_error], where:
- // left/right encoder should correspond directly to encoder readings
- // left/right velocities are the velocity of the left/right sides over the
- // ground (i.e., corrected for angular_error).
- // voltage errors are the difference between commanded and effective voltage,
- // used to estimate consistent modelling errors (e.g., friction).
- // angular error is the difference between the angular velocity as estimated
- // by the encoders vs. estimated by the gyro, such as might be caused by
- // wheels on one side of the drivetrain being too small or one side's
- // wheels slipping more than the other.
+ // State contains the states defined by the StateIdx enum. See comments there.
typedef Eigen::Matrix<Scalar, kNStates, 1> State;
// Constructs a HybridEkf for a particular drivetrain.
@@ -136,28 +209,39 @@
// matrix for linear cases?
void Correct(
const Output &z, const Input *U,
- ::std::function<
+ std::function<
void(const State &, const StateSquare &,
- ::std::function<Output(const State &, const Input &)> *,
- ::std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(
- const State &)> *)>
- make_h,
- ::std::function<Output(const State &, const Input &)> h,
- ::std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
- dhdx,
- const Eigen::Matrix<Scalar, kNOutputs, kNOutputs> &R,
+ std::function<Output(const State &, const Input &)> *,
+ std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(
+ const State &)> *)> make_h,
+ std::function<Output(const State &, const Input &)> h,
+ std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
+ dhdx, const Eigen::Matrix<Scalar, kNOutputs, kNOutputs> &R,
aos::monotonic_clock::time_point t);
// A utility function for specifically updating with encoder and gyro
// measurements.
void UpdateEncodersAndGyro(const Scalar left_encoder,
const Scalar right_encoder, const Scalar gyro_rate,
- const Input &U,
- ::aos::monotonic_clock::time_point t) {
+ const Eigen::Matrix<Scalar, 2, 1> &voltage,
+ const Eigen::Matrix<Scalar, 3, 1> &accel,
+ aos::monotonic_clock::time_point t) {
+ Input U;
+ U.template block<2, 1>(0, 0) = voltage;
+ U.template block<2, 1>(kLongitudinalAccel, 0) =
+ accel.template block<2, 1>(0, 0);
+ RawUpdateEncodersAndGyro(left_encoder, right_encoder, gyro_rate, U, t);
+ }
+ // Version of UpdateEncodersAndGyro that takes a input matrix rather than
+ // taking in a voltage/acceleration separately.
+ void RawUpdateEncodersAndGyro(const Scalar left_encoder,
+ const Scalar right_encoder,
+ const Scalar gyro_rate, const Input &U,
+ aos::monotonic_clock::time_point t) {
// Because the check below for have_zeroed_encoders_ will add an
// Observation, do a check here to ensure that initialization has been
// performed and so there is at least one observation.
- AOS_CHECK(!observations_.empty());
+ CHECK(!observations_.empty());
if (!have_zeroed_encoders_) {
// This logic handles ensuring that on the first encoder reading, we
// update the internal state for the encoders to match the reading.
@@ -165,35 +249,49 @@
// wpilib_interface, then we can get some obnoxious initial corrections
// that mess up the localization.
State newstate = X_hat_;
- newstate(kLeftEncoder, 0) = left_encoder;
- newstate(kRightEncoder, 0) = right_encoder;
- newstate(kLeftVoltageError, 0) = 0.0;
- newstate(kRightVoltageError, 0) = 0.0;
- newstate(kAngularError, 0) = 0.0;
+ newstate(kLeftEncoder) = left_encoder;
+ newstate(kRightEncoder) = right_encoder;
+ newstate(kLeftVoltageError) = 0.0;
+ newstate(kRightVoltageError) = 0.0;
+ newstate(kAngularError) = 0.0;
+ newstate(kLongitudinalVelocityOffset) = 0.0;
+ newstate(kLateralVelocity) = 0.0;
ResetInitialState(t, newstate, P_);
// We need to set have_zeroed_encoders_ after ResetInitialPosition because
// the reset clears have_zeroed_encoders_...
have_zeroed_encoders_ = true;
}
+
Output z(left_encoder, right_encoder, gyro_rate);
+
Eigen::Matrix<Scalar, kNOutputs, kNOutputs> R;
R.setZero();
R.diagonal() << encoder_noise_, encoder_noise_, gyro_noise_;
- Correct(z, &U, {},
- [this](const State &X, const Input &) {
- return H_encoders_and_gyro_ * X;
- },
- [this](const State &) { return H_encoders_and_gyro_; }, R, t);
+ Correct(
+ z, &U, {},
+ [this](const State &X, const Input &) {
+ return H_encoders_and_gyro_ * X;
+ },
+ [this](const State &) { return H_encoders_and_gyro_; }, R, t);
}
// Sundry accessor:
State X_hat() const { return X_hat_; }
- Scalar X_hat(long i) const { return X_hat_(i, 0); }
+ Scalar X_hat(long i) const { return X_hat_(i); }
StateSquare P() const { return P_; }
- ::aos::monotonic_clock::time_point latest_t() const {
+ aos::monotonic_clock::time_point latest_t() const {
return observations_.top().t;
}
+ static Scalar CalcLongitudinalVelocity(const State &X) {
+ return (X(kLeftVelocity) + X(kRightVelocity)) / 2.0;
+ }
+
+ Scalar CalcYawRate(const State &X) const {
+ return (X(kRightVelocity) - X(kLeftVelocity)) / 2.0 /
+ dt_config_.robot_radius;
+ }
+
private:
struct Observation {
// Time when the observation was taken.
@@ -213,16 +311,14 @@
// estimate. This is used by the camera to make it so that we only have to
// match targets once.
// Only called if h and dhdx are empty.
- ::std::function<void(
- const State &, const StateSquare &,
- ::std::function<Output(const State &, const Input &)> *,
- ::std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(
- const State &)> *)>
- make_h;
+ std::function<void(const State &, const StateSquare &,
+ std::function<Output(const State &, const Input &)> *,
+ std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(
+ const State &)> *)> make_h;
// A function to calculate the expected output at a given state/input.
// TODO(james): For encoders/gyro, it is linear and the function call may
// be expensive. Potential source of optimization.
- ::std::function<Output(const State &, const Input &)> h;
+ std::function<Output(const State &, const Input &)> h;
// The Jacobian of h with respect to x.
// We assume that U has no impact on the Jacobian.
// TODO(james): Currently, none of the users of this actually make use of
@@ -230,7 +326,7 @@
// recalculate it to be strictly correct, but I was both too lazy to do
// so and it seemed unnecessary). This is a potential source for future
// optimizations if function calls are being expensive.
- ::std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
+ std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
dhdx;
// The measurement noise matrix.
Eigen::Matrix<Scalar, kNOutputs, kNOutputs> R;
@@ -244,33 +340,116 @@
void InitializeMatrices();
- StateSquare AForState(const State &X) const {
+ // These constants and functions define how the longitudinal velocity
+ // (the average of the left and right velocities) decays. We model it as
+ // decaying at a constant rate, except very near zero where the decay rate is
+ // exponential (this is more numerically stable than just using a constant
+ // rate the whole time). We use this model rather than a simpler exponential
+ // decay because an exponential decay will result in the robot's velocity
+ // estimate consistently being far too low when at high velocities, and since
+ // the acceleromater-based estimate of the velocity will only drift at a
+ // relatively slow rate and doesn't get worse at higher velocities, we can
+ // safely decay pretty slowly.
+ static constexpr double kMaxVelocityAccel = 0.005;
+ static constexpr double kMaxVelocityGain = 1.0;
+ static Scalar VelocityAccel(Scalar velocity) {
+ return -std::clamp(kMaxVelocityGain * velocity, -kMaxVelocityAccel,
+ kMaxVelocityAccel);
+ }
+
+ static Scalar VelocityAccelDiff(Scalar velocity) {
+ return (std::abs(kMaxVelocityGain * velocity) > kMaxVelocityAccel)
+ ? 0.0
+ : -kMaxVelocityGain;
+ }
+
+ // Returns the "A" matrix for a given state. See DiffEq for discussion of
+ // ignore_accel.
+ StateSquare AForState(const State &X, bool ignore_accel = false) const {
+ // Calculate the A matrix for a given state. Note that A = partial Xdot /
+ // partial X. This is distinct from saying that Xdot = A * X. This is
+ // particularly relevant for the (kX, kTheta) members which otherwise seem
+ // odd.
StateSquare A_continuous = A_continuous_;
- const Scalar theta = X(kTheta, 0);
- const Scalar linear_vel =
- (X(kLeftVelocity, 0) + X(kRightVelocity, 0)) / 2.0;
- const Scalar stheta = ::std::sin(theta);
- const Scalar ctheta = ::std::cos(theta);
+ const Scalar theta = X(kTheta);
+ const Scalar stheta = std::sin(theta);
+ const Scalar ctheta = std::cos(theta);
+ const Scalar lng_vel = CalcLongitudinalVelocity(X);
+ const Scalar lat_vel = X(kLateralVelocity);
+ const Scalar diameter = 2.0 * dt_config_.robot_radius;
+ const Scalar yaw_rate = CalcYawRate(X);
// X and Y derivatives
- A_continuous(kX, kTheta) = -stheta * linear_vel;
+ A_continuous(kX, kTheta) =
+ -stheta * lng_vel - ctheta * lat_vel;
A_continuous(kX, kLeftVelocity) = ctheta / 2.0;
A_continuous(kX, kRightVelocity) = ctheta / 2.0;
- A_continuous(kY, kTheta) = ctheta * linear_vel;
+ A_continuous(kX, kLateralVelocity) = -stheta;
+ A_continuous(kY, kTheta) = ctheta * lng_vel - stheta * lat_vel;
A_continuous(kY, kLeftVelocity) = stheta / 2.0;
A_continuous(kY, kRightVelocity) = stheta / 2.0;
+ A_continuous(kY, kLateralVelocity) = ctheta;
+
+ if (!ignore_accel) {
+ const Eigen::Matrix<Scalar, 1, kNStates> lng_vel_row =
+ (A_continuous.row(kLeftVelocity) + A_continuous.row(kRightVelocity)) /
+ 2.0;
+ A_continuous.row(kLeftVelocity) -= lng_vel_row;
+ A_continuous.row(kRightVelocity) -= lng_vel_row;
+ // Terms to account for centripetal accelerations.
+ // lateral centripetal accel = -yaw_rate * lng_vel
+ A_continuous(kLateralVelocity, kLeftVelocity) +=
+ X(kLeftVelocity) / diameter;
+ A_continuous(kLateralVelocity, kRightVelocity) +=
+ -X(kRightVelocity) / diameter;
+ A_continuous(kRightVelocity, kLateralVelocity) += yaw_rate;
+ A_continuous(kLeftVelocity, kLateralVelocity) += yaw_rate;
+ const Scalar dlng_accel_dwheel_vel = X(kLateralVelocity) / diameter;
+ A_continuous(kRightVelocity, kRightVelocity) += dlng_accel_dwheel_vel;
+ A_continuous(kLeftVelocity, kRightVelocity) += dlng_accel_dwheel_vel;
+ A_continuous(kRightVelocity, kLeftVelocity) += -dlng_accel_dwheel_vel;
+ A_continuous(kLeftVelocity, kLeftVelocity) += -dlng_accel_dwheel_vel;
+
+ A_continuous(kRightVelocity, kRightVelocity) +=
+ VelocityAccelDiff(lng_vel) / 2.0;
+ A_continuous(kRightVelocity, kLeftVelocity) +=
+ VelocityAccelDiff(lng_vel) / 2.0;
+ A_continuous(kLeftVelocity, kRightVelocity) +=
+ VelocityAccelDiff(lng_vel) / 2.0;
+ A_continuous(kLeftVelocity, kLeftVelocity) +=
+ VelocityAccelDiff(lng_vel) / 2.0;
+ }
return A_continuous;
}
- State DiffEq(const State &X, const Input &U) const {
+ // Returns dX / dt given X and U. If ignore_accel is set, then we ignore the
+ // accelerometer-based components of U (this is solely used in testing).
+ State DiffEq(const State &X, const Input &U,
+ bool ignore_accel = false) const {
State Xdot = A_continuous_ * X + B_continuous_ * U;
// And then we need to add on the terms for the x/y change:
- const Scalar theta = X(kTheta, 0);
- const Scalar linear_vel =
- (X(kLeftVelocity, 0) + X(kRightVelocity, 0)) / 2.0;
- const Scalar stheta = ::std::sin(theta);
- const Scalar ctheta = ::std::cos(theta);
- Xdot(kX, 0) = ctheta * linear_vel;
- Xdot(kY, 0) = stheta * linear_vel;
+ const Scalar theta = X(kTheta);
+ const Scalar lng_vel = CalcLongitudinalVelocity(X);
+ const Scalar lat_vel = X(kLateralVelocity);
+ const Scalar stheta = std::sin(theta);
+ const Scalar ctheta = std::cos(theta);
+ Xdot(kX) = ctheta * lng_vel - stheta * lat_vel;
+ Xdot(kY) = stheta * lng_vel + ctheta * lat_vel;
+
+ const Scalar yaw_rate = CalcYawRate(X);
+ const Scalar expected_lat_accel = lng_vel * yaw_rate;
+ const Scalar expected_lng_accel =
+ CalcLongitudinalVelocity(Xdot) - yaw_rate * lat_vel;
+ const Scalar lng_accel_offset =
+ U(kLongitudinalAccel) - expected_lng_accel;
+ constexpr double kAccelWeight = 1.0;
+ if (!ignore_accel) {
+ Xdot(kLeftVelocity) += kAccelWeight * lng_accel_offset;
+ Xdot(kRightVelocity) += kAccelWeight * lng_accel_offset;
+ Xdot(kLateralVelocity) += U(kLateralAccel) - expected_lat_accel;
+
+ Xdot(kRightVelocity) += VelocityAccel(lng_vel);
+ Xdot(kLeftVelocity) += VelocityAccel(lng_vel);
+ }
return Xdot;
}
@@ -283,7 +462,7 @@
*state = RungeKuttaU(
[this](const State &X, const Input &U) { return DiffEq(X, U); }, *state,
- U, ::aos::time::DurationInSeconds(dt));
+ U, aos::time::DurationInSeconds(dt));
StateSquare Ptemp = A_d * *P * A_d.transpose() + Q_d;
*P = Ptemp;
@@ -310,7 +489,7 @@
PredictImpl(obs->U, dt, state, P);
}
if (!(obs->h && obs->dhdx)) {
- AOS_CHECK(obs->make_h);
+ CHECK(obs->make_h);
obs->make_h(*state, *P, &obs->h, &obs->dhdx);
}
CorrectImpl(obs->R, obs->z, obs->h(*state, obs->U), obs->dhdx(*state),
@@ -330,45 +509,42 @@
bool have_zeroed_encoders_ = false;
- aos::PriorityQueue<Observation, kSaveSamples, ::std::less<Observation>>
+ aos::PriorityQueue<Observation, kSaveSamples, std::less<Observation>>
observations_;
+
friend class testing::HybridEkfTest;
- friend class ::y2019::control_loops::testing::ParameterizedLocalizerTest;
+ friend class y2019::control_loops::testing::ParameterizedLocalizerTest;
}; // class HybridEkf
template <typename Scalar>
void HybridEkf<Scalar>::Correct(
const Output &z, const Input *U,
- ::std::function<
- void(const State &, const StateSquare &,
- ::std::function<Output(const State &, const Input &)> *,
- ::std::function<
- Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)> *)>
- make_h,
- ::std::function<Output(const State &, const Input &)> h,
- ::std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
- dhdx,
- const Eigen::Matrix<Scalar, kNOutputs, kNOutputs> &R,
+ std::function<void(const State &, const StateSquare &,
+ std::function<Output(const State &, const Input &)> *,
+ std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(
+ const State &)> *)> make_h,
+ std::function<Output(const State &, const Input &)> h,
+ std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
+ dhdx, const Eigen::Matrix<Scalar, kNOutputs, kNOutputs> &R,
aos::monotonic_clock::time_point t) {
- AOS_CHECK(!observations_.empty());
+ CHECK(!observations_.empty());
if (!observations_.full() && t < observations_.begin()->t) {
- AOS_LOG(ERROR,
- "Dropped an observation that was received before we "
- "initialized.\n");
+ LOG(ERROR) << "Dropped an observation that was received before we "
+ "initialized.\n";
return;
}
auto cur_it =
observations_.PushFromBottom({t, t, State::Zero(), StateSquare::Zero(),
Input::Zero(), z, make_h, h, dhdx, R});
if (cur_it == observations_.end()) {
- AOS_LOG(
- DEBUG,
- "Camera dropped off of end with time of %fs; earliest observation in "
- "queue has time of %fs.\n",
- ::aos::time::DurationInSeconds(t.time_since_epoch()),
- ::aos::time::DurationInSeconds(
- observations_.begin()->t.time_since_epoch()));
+ VLOG(1) << "Camera dropped off of end with time of "
+ << aos::time::DurationInSeconds(t.time_since_epoch())
+ << "s; earliest observation in "
+ "queue has time of "
+ << aos::time::DurationInSeconds(
+ observations_.begin()->t.time_since_epoch())
+ << "s.\n";
return;
}
@@ -396,7 +572,7 @@
--prev_it;
cur_it->prev_t = prev_it->t;
// TODO(james): Figure out a saner way of handling this.
- AOS_CHECK(U != nullptr);
+ CHECK(U != nullptr);
cur_it->U = *U;
} else {
cur_it->X_hat = next_it->X_hat;
@@ -412,7 +588,7 @@
// We use X_hat_ and P_ to store the intermediate states, and then
// once we reach the end they will all be up-to-date.
ProcessObservation(&*cur_it, cur_it->t - cur_it->prev_t, &X_hat_, &P_);
- AOS_CHECK(X_hat_.allFinite());
+ CHECK(X_hat_.allFinite());
if (next_it != observations_.end()) {
next_it->X_hat = X_hat_;
next_it->P = P_;
@@ -435,8 +611,10 @@
// Encoder derivatives
A_continuous_(kLeftEncoder, kLeftVelocity) = 1.0;
A_continuous_(kLeftEncoder, kAngularError) = 1.0;
+ A_continuous_(kLeftEncoder, kLongitudinalVelocityOffset) = -1.0;
A_continuous_(kRightEncoder, kRightVelocity) = 1.0;
A_continuous_(kRightEncoder, kAngularError) = -1.0;
+ A_continuous_(kRightEncoder, kLongitudinalVelocityOffset) = -1.0;
// Pull velocity derivatives from velocity matrices.
// Note that this looks really awkward (doesn't use
@@ -449,11 +627,21 @@
A_continuous_(kRightVelocity, kLeftVelocity) = vel_coefs.A_continuous(1, 0);
A_continuous_(kRightVelocity, kRightVelocity) = vel_coefs.A_continuous(1, 1);
- // Provide for voltage error terms:
+ A_continuous_(kLongitudinalVelocityOffset, kLongitudinalVelocityOffset) =
+ -1.0 / kVelocityOffsetTimeConstant;
+ A_continuous_(kLateralVelocity, kLateralVelocity) =
+ -1.0 / kLateralVelocityTimeConstant;
+
+ // We currently ignore all voltage-related modelling terms.
+ // TODO(james): Decide what to do about these terms. They don't really matter
+ // too much when we have accelerometer readings available.
B_continuous_.setZero();
- B_continuous_.row(kLeftVelocity) = vel_coefs.B_continuous.row(0);
- B_continuous_.row(kRightVelocity) = vel_coefs.B_continuous.row(1);
- A_continuous_.template block<kNStates, kNInputs>(0, 7) = B_continuous_;
+ B_continuous_.template block<1, 2>(kLeftVelocity, kLeftVoltage) =
+ vel_coefs.B_continuous.row(0);
+ B_continuous_.template block<1, 2>(kRightVelocity, kLeftVoltage) =
+ vel_coefs.B_continuous.row(1);
+ A_continuous_.template block<kNStates, 2>(0, kLeftVoltageError) =
+ B_continuous_.template block<kNStates, 2>(0, kLeftVoltage);
Q_continuous_.setZero();
// TODO(james): Improve estimates of process noise--e.g., X/Y noise can
@@ -463,16 +651,26 @@
Q_continuous_(kX, kX) = 0.002;
Q_continuous_(kY, kY) = 0.002;
Q_continuous_(kTheta, kTheta) = 0.0001;
- Q_continuous_(kLeftEncoder, kLeftEncoder) = ::std::pow(0.15, 2.0);
- Q_continuous_(kRightEncoder, kRightEncoder) = ::std::pow(0.15, 2.0);
- Q_continuous_(kLeftVelocity, kLeftVelocity) = ::std::pow(0.5, 2.0);
- Q_continuous_(kRightVelocity, kRightVelocity) = ::std::pow(0.5, 2.0);
- Q_continuous_(kLeftVoltageError, kLeftVoltageError) = ::std::pow(10.0, 2.0);
- Q_continuous_(kRightVoltageError, kRightVoltageError) = ::std::pow(10.0, 2.0);
- Q_continuous_(kAngularError, kAngularError) = ::std::pow(2.0, 2.0);
-
- P_.setZero();
- P_.diagonal() << 0.01, 0.01, 0.01, 0.02, 0.01, 0.02, 0.01, 1, 1, 0.03;
+ Q_continuous_(kLeftEncoder, kLeftEncoder) = std::pow(0.15, 2.0);
+ Q_continuous_(kRightEncoder, kRightEncoder) = std::pow(0.15, 2.0);
+ Q_continuous_(kLeftVelocity, kLeftVelocity) = std::pow(0.5, 2.0);
+ Q_continuous_(kRightVelocity, kRightVelocity) = std::pow(0.5, 2.0);
+ Q_continuous_(kLeftVoltageError, kLeftVoltageError) = std::pow(10.0, 2.0);
+ Q_continuous_(kRightVoltageError, kRightVoltageError) = std::pow(10.0, 2.0);
+ Q_continuous_(kAngularError, kAngularError) = std::pow(2.0, 2.0);
+ // This noise value largely governs whether we will trust the encoders or
+ // accelerometer more for estimating the robot position.
+ // Note that this also affects how we interpret camera measurements,
+ // particularly when using a heading/distance/skew measurement--if the
+ // noise on these numbers is particularly high, then we can end up with weird
+ // dynamics where a camera update both shifts our X/Y position and adjusts our
+ // velocity estimates substantially, causing the camera updates to create
+ // "momentum" and if we don't trust the encoders enough, then we have no way of
+ // determining that the velocity updates are bogus. This also interacts with
+ // kVelocityOffsetTimeConstant.
+ Q_continuous_(kLongitudinalVelocityOffset, kLongitudinalVelocityOffset) =
+ std::pow(1.1, 2.0);
+ Q_continuous_(kLateralVelocity, kLateralVelocity) = std::pow(0.1, 2.0);
H_encoders_and_gyro_.setZero();
// Encoders are stored directly in the state matrix, so are a minor
@@ -483,12 +681,8 @@
H_encoders_and_gyro_(2, kLeftVelocity) = -1.0 / diameter;
H_encoders_and_gyro_(2, kRightVelocity) = 1.0 / diameter;
- const Eigen::Matrix<Scalar, 4, 4> R_kf_drivetrain =
- dt_config_.make_kf_drivetrain_loop().observer().coefficients().R;
- // TODO(james): The multipliers here are hand-waving things that I put in when
- // tuning things. I haven't yet tried messing with these values again.
- encoder_noise_ = 0.5 * R_kf_drivetrain(0, 0);
- gyro_noise_ = 0.1 * R_kf_drivetrain(2, 2);
+ encoder_noise_ = 5e-9;
+ gyro_noise_ = 1e-13;
X_hat_.setZero();
P_.setZero();
diff --git a/frc971/control_loops/drivetrain/hybrid_ekf_test.cc b/frc971/control_loops/drivetrain/hybrid_ekf_test.cc
index fa6acc5..bdfdd4e 100644
--- a/frc971/control_loops/drivetrain/hybrid_ekf_test.cc
+++ b/frc971/control_loops/drivetrain/hybrid_ekf_test.cc
@@ -14,11 +14,13 @@
namespace testing {
typedef HybridEkf<>::StateIdx StateIdx;
+typedef HybridEkf<>::InputIdx InputIdx;
+typedef HybridEkf<>::State State;
+typedef HybridEkf<>::StateSquare StateSquare;
+typedef HybridEkf<>::Input Input;
class HybridEkfTest : public ::testing::Test {
public:
- typedef HybridEkf<>::State State;
- typedef HybridEkf<>::Input Input;
::aos::testing::TestSharedMemory shm_;
HybridEkfTest()
: dt_config_(GetTestDrivetrainConfig()),
@@ -27,47 +29,84 @@
velocity_plant_coefs_(dt_config_.make_hybrid_drivetrain_velocity_loop()
.plant()
.coefficients()) {
- ekf_.ResetInitialState(t0_, State::Zero(),
- HybridEkf<>::StateSquare::Identity());
+ ekf_.ResetInitialState(t0_, State::Zero(), StateSquare::Identity());
}
protected:
- const State Update(const State &X, const Input &U) {
+ const State Update(const State &X, const Input &U, bool ignore_accel) {
return RungeKuttaU(
- ::std::bind(&HybridEkfTest::DiffEq, this, ::std::placeholders::_1,
- ::std::placeholders::_2),
- X, U, ::aos::time::DurationInSeconds(dt_config_.dt));
+ std::bind(&HybridEkfTest::DiffEq, this, std::placeholders::_1,
+ std::placeholders::_2, ignore_accel),
+ X, U, aos::time::DurationInSeconds(dt_config_.dt));
}
- void CheckDiffEq(const State &X, const Input &U) {
+ void CheckDiffEq(const State &X, const Input &U, bool ignore_accel) {
// Re-implement dynamics as a sanity check:
const double diameter = 2.0 * dt_config_.robot_radius;
- const double theta = X(StateIdx::kTheta, 0);
- const double stheta = ::std::sin(theta);
- const double ctheta = ::std::cos(theta);
- const double left_vel = X(StateIdx::kLeftVelocity, 0);
- const double right_vel = X(StateIdx::kRightVelocity, 0);
- const State Xdot_ekf = DiffEq(X, U);
- EXPECT_EQ(Xdot_ekf(StateIdx::kX, 0), ctheta * (left_vel + right_vel) / 2.0);
- EXPECT_EQ(Xdot_ekf(StateIdx::kY, 0), stheta * (left_vel + right_vel) / 2.0);
- EXPECT_EQ(Xdot_ekf(StateIdx::kTheta, 0), (right_vel - left_vel) / diameter);
- EXPECT_EQ(Xdot_ekf(StateIdx::kLeftEncoder, 0),
- left_vel + X(StateIdx::kAngularError, 0));
- EXPECT_EQ(Xdot_ekf(StateIdx::kRightEncoder, 0),
- right_vel - X(StateIdx::kAngularError, 0));
+ const double theta = X(StateIdx::kTheta);
+ const double stheta = std::sin(theta);
+ const double ctheta = std::cos(theta);
+ const double left_vel = X(StateIdx::kLeftVelocity);
+ const double right_vel = X(StateIdx::kRightVelocity);
+ const double lng_vel = (left_vel + right_vel) / 2.0;
+ const double yaw_rate =
+ (right_vel - left_vel) / 2.0 / dt_config_.robot_radius;
+ const double lat_vel = X(StateIdx::kLateralVelocity);
+ const State Xdot_ekf = DiffEq(X, U, ignore_accel);
+ EXPECT_EQ(Xdot_ekf(StateIdx::kX), ctheta * lng_vel - stheta * lat_vel);
+ EXPECT_EQ(Xdot_ekf(StateIdx::kY), stheta * lng_vel + ctheta * lat_vel);
+ EXPECT_EQ(Xdot_ekf(StateIdx::kTheta), (right_vel - left_vel) / diameter);
+ EXPECT_FLOAT_EQ(Xdot_ekf(StateIdx::kLeftEncoder),
+ left_vel + X(StateIdx::kAngularError) -
+ X(StateIdx::kLongitudinalVelocityOffset));
+ EXPECT_FLOAT_EQ(Xdot_ekf(StateIdx::kRightEncoder),
+ right_vel - X(StateIdx::kAngularError) -
+ X(StateIdx::kLongitudinalVelocityOffset));
- Eigen::Matrix<double, 2, 1> vel_x(X(StateIdx::kLeftVelocity, 0),
- X(StateIdx::kRightVelocity, 0));
- Eigen::Matrix<double, 2, 1> expected_vel_X =
+ const Eigen::Matrix<double, 2, 1> vel_x(X(StateIdx::kLeftVelocity),
+ X(StateIdx::kRightVelocity));
+ // Don't expect any contribution from the voltage terms, since we currently
+ // disable them.
+ const Eigen::Matrix<double, 2, 1> expected_accel =
velocity_plant_coefs_.A_continuous * vel_x +
velocity_plant_coefs_.B_continuous *
- (U + X.middleRows<2>(StateIdx::kLeftVoltageError));
- EXPECT_EQ(Xdot_ekf(StateIdx::kLeftVelocity, 0), expected_vel_X(0, 0));
- EXPECT_EQ(Xdot_ekf(StateIdx::kRightVelocity, 0), expected_vel_X(1, 0));
+ (U.topRows(2) + X.block<2, 1>(StateIdx::kLeftVoltageError, 0));
+ const double nominal_lng_accel =
+ (expected_accel(0) + expected_accel(1)) / 2.0;
+ if (ignore_accel) {
+ EXPECT_FLOAT_EQ(Xdot_ekf(StateIdx::kLeftVelocity), expected_accel(0));
+ EXPECT_FLOAT_EQ(Xdot_ekf(StateIdx::kRightVelocity), expected_accel(1));
+ EXPECT_EQ(Xdot_ekf(StateIdx::kLateralVelocity), 0.0);
+ EXPECT_EQ(
+ Xdot_ekf(StateIdx::kLongitudinalVelocityOffset), 0.0);
+ } else {
+ const double lng_accel = U(InputIdx::kLongitudinalAccel) +
+ lat_vel * yaw_rate + ekf_.VelocityAccel(lng_vel);
+ EXPECT_FLOAT_EQ(Xdot_ekf(StateIdx::kLeftVelocity),
+ lng_accel + expected_accel(0) - nominal_lng_accel);
+ EXPECT_FLOAT_EQ(Xdot_ekf(StateIdx::kRightVelocity),
+ lng_accel + expected_accel(1) - nominal_lng_accel);
+
+ EXPECT_FLOAT_EQ(Xdot_ekf(StateIdx::kLongitudinalVelocityOffset),
+ -X(StateIdx::kLongitudinalVelocityOffset) /
+ HybridEkf<>::kVelocityOffsetTimeConstant);
+ const double centripetal_accel = lng_vel * yaw_rate;
+ const double lat_accel = U(InputIdx::kLateralAccel) - centripetal_accel;
+ EXPECT_EQ(Xdot_ekf(StateIdx::kLateralVelocity),
+ lat_accel - X(StateIdx::kLateralVelocity) /
+ HybridEkf<>::kLateralVelocityTimeConstant);
+ }
// Dynamics don't expect error terms to change:
- EXPECT_EQ(0.0, Xdot_ekf.bottomRows<3>().squaredNorm());
+ EXPECT_EQ(0.0, Xdot_ekf(StateIdx::kLeftVoltageError));
+ EXPECT_EQ(0.0, Xdot_ekf(StateIdx::kRightVoltageError));
+ EXPECT_EQ(0.0, Xdot_ekf(StateIdx::kAngularError));
}
- State DiffEq(const State &X, const Input &U) { return ekf_.DiffEq(X, U); }
+ State DiffEq(const State &X, const Input &U, bool ignore_accel) {
+ return ekf_.DiffEq(X, U, ignore_accel);
+ }
+ StateSquare AForState(const State &X, bool ignore_accel) {
+ return ekf_.AForState(X, ignore_accel);
+ }
// Returns a random value sampled from a normal distribution with a standard
// deviation of std and a mean of zero.
@@ -75,49 +114,113 @@
DrivetrainConfig<double> dt_config_;
HybridEkf<> ekf_;
- ::aos::monotonic_clock::time_point t0_;
+ aos::monotonic_clock::time_point t0_;
StateFeedbackHybridPlantCoefficients<2, 2, 2> velocity_plant_coefs_;
- ::std::mt19937 gen_{static_cast<uint32_t>(::aos::testing::RandomSeed())};
- ::std::normal_distribution<> normal_;
+ std::mt19937 gen_{static_cast<uint32_t>(::aos::testing::RandomSeed())};
+ std::normal_distribution<> normal_;
};
-// Tests that the dynamics from DiffEq behave as expected:
-TEST_F(HybridEkfTest, CheckDynamics) {
- CheckDiffEq(State::Zero(), Input::Zero());
- CheckDiffEq(State::Zero(), {-5.0, 5.0});
- CheckDiffEq(State::Zero(), {12.0, -3.0});
- CheckDiffEq(
- (State() << 100.0, 200.0, M_PI, 1.234, 0.5, 1.2, 0.6, 3.0, -4.0, 0.3)
- .finished(),
- {5.0, 6.0});
- CheckDiffEq(
- (State() << 100.0, 200.0, 2.0, 1.234, 0.5, 1.2, 0.6, 3.0, -4.0, 0.3)
- .finished(),
- {5.0, 6.0});
- CheckDiffEq(
- (State() << 100.0, 200.0, -2.0, 1.234, 0.5, 1.2, 0.6, 3.0, -4.0, 0.3)
- .finished(),
- {5.0, 6.0});
- // And check that a theta outisde of [-M_PI, M_PI] works.
- CheckDiffEq(
- (State() << 100.0, 200.0, 200.0, 1.234, 0.5, 1.2, 0.6, 3.0, -4.0, 0.3)
- .finished(),
- {5.0, 6.0});
-}
// Tests that if we provide a bunch of observations of the position
// with zero change in time, the state should approach the estimation.
+struct DiffEqInputs {
+ State X;
+ Input U;
+ bool ignore_accel;
+};
+
+DiffEqInputs MakeDiffEqInputs(State X, Input U, bool ignore_accel) {
+ return {.X = X, .U = U, .ignore_accel = ignore_accel};
+}
+
+class HybridEkfDiffEqTest : public HybridEkfTest,
+ public ::testing::WithParamInterface<DiffEqInputs> {
+};
+
+// Tests that the dynamics from DiffEq behave as expected:
+TEST_P(HybridEkfDiffEqTest, CheckDynamics) {
+ CheckDiffEq(GetParam().X, GetParam().U, GetParam().ignore_accel);
+
+ // Calculate whether A seems to be empirically correct.
+ const StateSquare A = AForState(GetParam().X, GetParam().ignore_accel);
+ for (int ii = 0; ii < HybridEkf<>::kNStates; ++ii) {
+ SCOPED_TRACE(ii);
+ State perturbation = State::Zero();
+ constexpr double kEps = 1e-5;
+ perturbation(ii) = kEps;
+ const State Xdot_plus = DiffEq(GetParam().X + perturbation, GetParam().U,
+ GetParam().ignore_accel);
+ const State Xdot_minus = DiffEq(GetParam().X - perturbation, GetParam().U,
+ GetParam().ignore_accel);
+ const State numerical_dXdot_dX = (Xdot_plus - Xdot_minus) / (2.0 * kEps);
+ const State A_based_dXdot_dX = A * perturbation / kEps;
+ EXPECT_LT((A_based_dXdot_dX - numerical_dXdot_dX).norm(), 1e-8)
+ << "A * X: " << A_based_dXdot_dX << " numerical: " << numerical_dXdot_dX
+ << " difference: " << (A_based_dXdot_dX - numerical_dXdot_dX);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(
+ CheckMathTest, HybridEkfDiffEqTest,
+ ::testing::Values(DiffEqInputs{State::Zero(), Input::Zero(), false},
+ DiffEqInputs{State::Zero(), Input::Zero(), true},
+ DiffEqInputs{State::Zero(), {-5.0, 5.0, 0.0, 0.0}, false},
+ DiffEqInputs{State::Zero(), {-5.0, 5.0, 0.0, 0.0}, true},
+ DiffEqInputs{State::Zero(), {12.0, 3.0, 0.0, 0.0}, false},
+ DiffEqInputs{State::Zero(), {12.0, 3.0, 0.0, 0.0}, true},
+ DiffEqInputs{(State() << 100.0, 200.0, M_PI, 1.234, 0.5,
+ 1.2, 0.6, 3.0, -4.0, 0.3, 0.0, 0.0)
+ .finished(),
+ {3.0, 4.0, 5.0, 6.0},
+ false},
+ DiffEqInputs{(State() << 100.0, 200.0, M_PI, 1.234, 0.5,
+ 1.2, 0.6, 3.0, -4.0, 0.3, 0.0, 0.0)
+ .finished(),
+ {3.0, 4.0, 5.0, 6.0},
+ true},
+ DiffEqInputs{(State() << 100.0, 200.0, 2.0, 1.234, 0.5,
+ 1.2, 0.6, 3.0, -4.0, 0.3, 0.0, 0.0)
+ .finished(),
+ {3.0, 4.0, 5.0, 6.0},
+ false},
+ DiffEqInputs{(State() << 100.0, 200.0, 2.0, 1.234, 0.5,
+ 1.2, 0.6, 3.0, -4.0, 0.3, 0.0, 0.0)
+ .finished(),
+ {3.0, 4.0, 5.0, 6.0},
+ true},
+ DiffEqInputs{(State() << 100.0, 200.0, 2.0, 1.234, 0.5,
+ 1.2, 0.6, 3.0, -4.0, 0.3, 0.1, 0.2)
+ .finished(),
+ {3.0, 4.0, 5.0, 6.0},
+ false},
+ DiffEqInputs{(State() << 100.0, 200.0, -2.0, 1.234, 0.5,
+ 1.2, 0.6, 3.0, -4.0, 0.3, 0.0, 0.0)
+ .finished(),
+ {-3.0, -4.0, -5.0, -6.0},
+ false},
+ DiffEqInputs{(State() << 100.0, 200.0, -2.0, 1.234, 0.5,
+ 1.2, 0.6, 3.0, -4.0, 0.3, 0.0, 0.0)
+ .finished(),
+ {-3.0, -4.0, -5.0, -6.0},
+ true},
+ // And check that a theta outside of [-M_PI, M_PI] works.
+ DiffEqInputs{(State() << 100.0, 200.0, 200.0, 1.234, 0.5,
+ 1.2, 0.6, 3.0, -4.0, 0.3, 0.0, 0.0)
+ .finished(),
+ {3.0, 4.0, 5.0, 6.0},
+ false}));
+
TEST_F(HybridEkfTest, ZeroTimeCorrect) {
HybridEkf<>::Output Z(0.5, 0.5, 1);
- Eigen::Matrix<double, 3, 10> H;
+ Eigen::Matrix<double, 3, 12> H;
H.setIdentity();
auto h = [H](const State &X, const Input &) { return H * X; };
auto dhdx = [H](const State &) { return H; };
Eigen::Matrix<double, 3, 3> R;
R.setIdentity();
R *= 1e-3;
- Input U(0, 0);
+ Input U = Input::Zero();
EXPECT_EQ(0.0, ekf_.X_hat(StateIdx::kTheta));
const double starting_p_norm = ekf_.P().norm();
for (int ii = 0; ii < 100; ++ii) {
@@ -138,14 +241,14 @@
HybridEkf<>::Output Z(0, 0, 0);
// Use true_X to track what we think the true robot state is.
State true_X = ekf_.X_hat();
- Eigen::Matrix<double, 3, 10> H;
+ Eigen::Matrix<double, 3, 12> H;
H.setZero();
auto h = [H](const State &X, const Input &) { return H * X; };
auto dhdx = [H](const State &) { return H; };
// Provide constant input voltage.
- Eigen::Matrix<double, 2, 1> U;
- U << 12.0, 10.0;
- // The exact value of the noise matrix ix unimportant so long as it is
+ Input U;
+ U << 12.0, 10.0, 1.0, -0.1;
+ // The exact value of the noise matrix is unimportant so long as it is
// non-zero.
Eigen::Matrix<double, 3, 3> R;
R.setIdentity();
@@ -153,25 +256,23 @@
const double starting_p_norm = ekf_.P().norm();
for (int ii = 0; ii < 100; ++ii) {
ekf_.Correct(Z, &U, {}, h, dhdx, R, t0_ + dt_config_.dt * (ii + 1));
- true_X = Update(true_X, U);
+ true_X = Update(true_X, U, false);
EXPECT_EQ(true_X, ekf_.X_hat());
}
// We don't care about precise results, just that they are generally sane:
// robot should've travelled forwards and slightly to the right.
- EXPECT_NEAR(0.9, ekf_.X_hat(StateIdx::kX), 0.1);
- EXPECT_NEAR(-0.15, ekf_.X_hat(StateIdx::kY), 0.05);
- EXPECT_NEAR(-0.3, ekf_.X_hat(StateIdx::kTheta), 0.05);
- EXPECT_NEAR(1.0, ekf_.X_hat(StateIdx::kLeftEncoder), 0.1);
- EXPECT_NEAR(ekf_.X_hat(StateIdx::kLeftEncoder) * 0.8,
- ekf_.X_hat(StateIdx::kRightEncoder),
- ekf_.X_hat(StateIdx::kLeftEncoder) * 0.1);
- EXPECT_NEAR(2.4, ekf_.X_hat(StateIdx::kLeftVelocity), 0.1);
- EXPECT_NEAR(ekf_.X_hat(StateIdx::kLeftVelocity) * 0.8,
- ekf_.X_hat(StateIdx::kRightVelocity),
- ekf_.X_hat(StateIdx::kLeftVelocity) * 0.1);
+ EXPECT_NEAR(0.1, ekf_.X_hat(StateIdx::kX), 0.05);
+ EXPECT_NEAR(-0.02, ekf_.X_hat(StateIdx::kY), 0.01);
+ EXPECT_GT(0.01, ekf_.X_hat(StateIdx::kTheta));
+ EXPECT_NEAR(0.3, ekf_.X_hat(StateIdx::kLeftEncoder), 0.1);
+ EXPECT_NEAR(0.7, ekf_.X_hat(StateIdx::kLeftVelocity), 0.1);
+ EXPECT_NEAR(0.3, ekf_.X_hat(StateIdx::kRightVelocity), 0.1);
EXPECT_EQ(0.0, ekf_.X_hat(StateIdx::kLeftVoltageError));
EXPECT_EQ(0.0, ekf_.X_hat(StateIdx::kRightVoltageError));
EXPECT_EQ(0.0, ekf_.X_hat(StateIdx::kAngularError));
+ // Without encoder updates, the longitudinal velocity offset should be zero.
+ EXPECT_EQ(0.0, ekf_.X_hat(StateIdx::kLongitudinalVelocityOffset));
+ EXPECT_NEAR(0.04, ekf_.X_hat(StateIdx::kLateralVelocity), 0.01);
const double ending_p_norm = ekf_.P().norm();
// Due to lack of corrections, noise should've increased.
EXPECT_GT(ending_p_norm, starting_p_norm * 1.10);
@@ -191,12 +292,12 @@
TEST_P(HybridEkfOldCorrectionsTest, CreateOldCorrection) {
HybridEkf<>::Output Z;
Z.setZero();
- Eigen::Matrix<double, 3, 10> H;
+ Eigen::Matrix<double, 3, 12> H;
H.setZero();
auto h_zero = [H](const State &X, const Input &) { return H * X; };
auto dhdx_zero = [H](const State &) { return H; };
- Eigen::Matrix<double, 2, 1> U;
- U << 12.0, 12.0;
+ Input U;
+ U << 12.0, 12.0, 0.0, 0.0;
Eigen::Matrix<double, 3, 3> R;
R = R.Identity();
EXPECT_EQ(0.0, ekf_.X_hat().norm());
@@ -247,11 +348,12 @@
TEST_F(HybridEkfTest, DiscardTooOldCorrection) {
HybridEkf<>::Output Z;
Z.setZero();
- Eigen::Matrix<double, 3, 10> H;
+ Eigen::Matrix<double, 3, 12> H;
H.setZero();
auto h_zero = [H](const State &X, const Input &) { return H * X; };
auto dhdx_zero = [H](const State &) { return H; };
- Eigen::Matrix<double, 2, 1> U(12.0, 12.0);
+ Input U;
+ U << 12.0, 12.0, 0.0, 0.0;
Eigen::Matrix<double, 3, 3> R;
R.setIdentity();
@@ -286,15 +388,16 @@
// provided:
TEST_F(HybridEkfTest, PerfectEncoderUpdate) {
State true_X = ekf_.X_hat();
- Input U(-1.0, 5.0);
+ Input U;
+ U << -1.0, 5.0, 0.0, 0.0;
for (int ii = 0; ii < 100; ++ii) {
- true_X = Update(true_X, U);
- ekf_.UpdateEncodersAndGyro(true_X(StateIdx::kLeftEncoder, 0),
- true_X(StateIdx::kRightEncoder, 0),
- (true_X(StateIdx::kRightVelocity, 0) -
- true_X(StateIdx::kLeftVelocity, 0)) /
- dt_config_.robot_radius / 2.0,
- U, t0_ + (ii + 1) * dt_config_.dt);
+ true_X = Update(true_X, U, false);
+ ekf_.RawUpdateEncodersAndGyro(true_X(StateIdx::kLeftEncoder, 0),
+ true_X(StateIdx::kRightEncoder, 0),
+ (true_X(StateIdx::kRightVelocity, 0) -
+ true_X(StateIdx::kLeftVelocity, 0)) /
+ dt_config_.robot_radius / 2.0,
+ U, t0_ + (ii + 1) * dt_config_.dt);
EXPECT_NEAR((true_X - ekf_.X_hat()).squaredNorm(), 0.0, 1e-25)
<< "Expected only floating point precision errors in update step. "
"Estimated X_hat:\n"
@@ -303,29 +406,6 @@
}
}
-// Tests that encoder updates cause everything to converge properly in the
-// presence of voltage error.
-TEST_F(HybridEkfTest, PerfectEncoderUpdatesWithVoltageError) {
- State true_X = ekf_.X_hat();
- true_X(StateIdx::kLeftVoltageError, 0) = 2.0;
- true_X(StateIdx::kRightVoltageError, 0) = 2.0;
- Input U(5.0, 5.0);
- for (int ii = 0; ii < 1000; ++ii) {
- true_X = Update(true_X, U);
- ekf_.UpdateEncodersAndGyro(true_X(StateIdx::kLeftEncoder, 0),
- true_X(StateIdx::kRightEncoder, 0),
- (true_X(StateIdx::kRightVelocity, 0) -
- true_X(StateIdx::kLeftVelocity, 0)) /
- dt_config_.robot_radius / 2.0,
- U, t0_ + (ii + 1) * dt_config_.dt);
- }
- EXPECT_NEAR((true_X - ekf_.X_hat()).norm(), 0.0, 5e-2)
- << "Expected non-x/y estimates to converge to correct. "
- "Estimated X_hat:\n"
- << ekf_.X_hat() << "\ntrue X:\n"
- << true_X;
-}
-
// Tests encoder/gyro updates when we have some errors in our estimate.
TEST_F(HybridEkfTest, PerfectEncoderUpdateConverges) {
// In order to simulate modelling errors, we add an angular_error and start
@@ -340,15 +420,16 @@
// Note: Because we don't have any absolute measurements used for corrections,
// we will get slightly off on the absolute x/y/theta, but the errors are so
// small they are negligible.
- Input U(10.0, 5.0);
+ Input U;
+ U << 10.0, 5.0, 0.0, 0.0;
for (int ii = 0; ii < 100; ++ii) {
- true_X = Update(true_X, U);
- ekf_.UpdateEncodersAndGyro(true_X(StateIdx::kLeftEncoder, 0),
- true_X(StateIdx::kRightEncoder, 0),
- (true_X(StateIdx::kRightVelocity, 0) -
- true_X(StateIdx::kLeftVelocity, 0)) /
- dt_config_.robot_radius / 2.0,
- U, t0_ + (ii + 1) * dt_config_.dt);
+ true_X = Update(true_X, U, false);
+ ekf_.RawUpdateEncodersAndGyro(true_X(StateIdx::kLeftEncoder, 0),
+ true_X(StateIdx::kRightEncoder, 0),
+ (true_X(StateIdx::kRightVelocity, 0) -
+ true_X(StateIdx::kLeftVelocity, 0)) /
+ dt_config_.robot_radius / 2.0,
+ U, t0_ + (ii + 1) * dt_config_.dt);
}
EXPECT_NEAR((true_X - ekf_.X_hat()).norm(), 0.0, 3e-5)
<< "Expected non-x/y estimates to converge to correct. "
@@ -365,10 +446,11 @@
true_X(StateIdx::kAngularError, 0) = 1.0;
true_X(StateIdx::kLeftEncoder, 0) += 2.0;
true_X(StateIdx::kRightEncoder, 0) -= 2.0;
- Input U(10.0, 5.0);
+ Input U;
+ U << 10.0, 5.0, 0.0, 0.0;
for (int ii = 0; ii < 100; ++ii) {
- true_X = Update(true_X, U);
- ekf_.UpdateEncodersAndGyro(
+ true_X = Update(true_X, U, false);
+ ekf_.RawUpdateEncodersAndGyro(
true_X(StateIdx::kLeftEncoder, 0) + Normal(1e-3),
true_X(StateIdx::kRightEncoder, 0) + Normal(1e-3),
(true_X(StateIdx::kRightVelocity, 0) -
@@ -391,15 +473,16 @@
TEST_F(HybridEkfDeathTest, DieIfUninitialized) {
HybridEkf<> ekf(dt_config_);
// Expect death if we fail to initialize before starting to provide updates.
- EXPECT_DEATH(ekf.UpdateEncodersAndGyro(0.0, 1.0, 2.0, {3.0, 3.0}, t0_),
- "observations_.empty()");
+ EXPECT_DEATH(
+ ekf.RawUpdateEncodersAndGyro(0.0, 1.0, 2.0, {3.0, 3.0, 0.0, 0.0}, t0_),
+ "observations_.empty()");
}
TEST_F(HybridEkfDeathTest, DieOnNoU) {
// Expect death if the user does not provide U when creating a fresh
// measurement.
EXPECT_DEATH(ekf_.Correct({1, 2, 3}, nullptr, {}, {}, {}, {},
- t0_ + ::std::chrono::seconds(1)),
+ t0_ + std::chrono::seconds(1)),
"U != nullptr");
}
@@ -407,22 +490,23 @@
// that we die when an improper combination is provided.
TEST_F(HybridEkfDeathTest, DieOnNoH) {
// Check that we die when no h-related functions are provided:
- Input U(1, 2);
+ Input U;
+ U << 1.0, 2.0, 0.0, 0.0;
EXPECT_DEATH(ekf_.Correct({1, 2, 3}, &U, {}, {}, {}, {},
- t0_ + ::std::chrono::seconds(1)),
+ t0_ + std::chrono::seconds(1)),
"make_h");
// Check that we die when only one of h and dhdx are provided:
EXPECT_DEATH(ekf_.Correct({1, 2, 3}, &U, {}, {},
[](const State &) {
- return Eigen::Matrix<double, 3, 10>::Zero();
+ return Eigen::Matrix<double, 3, 12>::Zero();
},
- {}, t0_ + ::std::chrono::seconds(1)),
+ {}, t0_ + std::chrono::seconds(1)),
"make_h");
EXPECT_DEATH(ekf_.Correct({1, 2, 3}, &U, {},
[](const State &, const Input &) {
return Eigen::Matrix<double, 3, 1>::Zero();
},
- {}, {}, t0_ + ::std::chrono::seconds(1)),
+ {}, {}, t0_ + std::chrono::seconds(1)),
"make_h");
}
diff --git a/frc971/control_loops/drivetrain/libspline.cc b/frc971/control_loops/drivetrain/libspline.cc
index f0b9537..5ad175e 100644
--- a/frc971/control_loops/drivetrain/libspline.cc
+++ b/frc971/control_loops/drivetrain/libspline.cc
@@ -161,7 +161,7 @@
// This assumes that res is created in python to be getPathLength() long.
// Likely to SEGFAULT otherwise.
- void Distances(Trajectory *t, double *res) {
+ void TrajectoryDistances(Trajectory *t, double *res) {
const ::std::vector<double> &distances = t->Distances();
::std::memcpy(res, distances.data(), sizeof(double) * distances.size());
}
diff --git a/frc971/control_loops/drivetrain/localizer.h b/frc971/control_loops/drivetrain/localizer.h
index 2589d4b..6677145 100644
--- a/frc971/control_loops/drivetrain/localizer.h
+++ b/frc971/control_loops/drivetrain/localizer.h
@@ -3,6 +3,7 @@
#include "aos/events/event_loop.h"
#include "frc971/control_loops/drivetrain/drivetrain_config.h"
+#include "frc971/control_loops/drivetrain/drivetrain_status_generated.h"
#include "frc971/control_loops/drivetrain/hybrid_ekf.h"
#include "frc971/control_loops/pose.h"
@@ -36,6 +37,11 @@
// Defines an interface for classes that provide field-global localization.
class LocalizerInterface {
public:
+ typedef HybridEkf<double> Ekf;
+ typedef typename Ekf::StateIdx StateIdx;
+
+ virtual ~LocalizerInterface() {}
+
// Perform a single step of the filter, using the information that is
// available on every drivetrain iteration.
// The user should pass in the U that the real system experienced from the
@@ -48,23 +54,51 @@
virtual void Update(const ::Eigen::Matrix<double, 2, 1> &U,
::aos::monotonic_clock::time_point now,
double left_encoder, double right_encoder,
- double gyro_rate, double longitudinal_accelerometer) = 0;
+ double gyro_rate, const Eigen::Vector3d &accel) = 0;
// Reset the absolute position of the estimator.
virtual void ResetPosition(::aos::monotonic_clock::time_point t, double x,
double y, double theta, double theta_uncertainty,
bool reset_theta) = 0;
+ flatbuffers::Offset<LocalizerState> PopulateStatus(
+ flatbuffers::FlatBufferBuilder *fbb) {
+ LocalizerState::Builder builder(*fbb);
+ builder.add_x(x());
+ builder.add_y(y());
+ builder.add_theta(theta());
+ builder.add_left_velocity(left_velocity());
+ builder.add_right_velocity(right_velocity());
+ builder.add_left_encoder(left_encoder());
+ builder.add_right_encoder(right_encoder());
+ builder.add_left_voltage_error(left_voltage_error());
+ builder.add_right_voltage_error(right_voltage_error());
+ builder.add_angular_error(angular_error());
+ builder.add_longitudinal_velocity_offset(longitudinal_velocity_offset());
+ builder.add_lateral_velocity(lateral_velocity());
+ return builder.Finish();
+ }
+ virtual Ekf::State Xhat() const = 0;
// There are several subtly different norms floating around for state
- // matrices. In order to avoid that mess, we jus tprovide direct accessors for
+ // matrices. In order to avoid that mess, we just provide direct accessors for
// the values that most people care about.
- virtual double x() const = 0;
- virtual double y() const = 0;
- virtual double theta() const = 0;
- virtual double left_velocity() const = 0;
- virtual double right_velocity() const = 0;
- virtual double left_encoder() const = 0;
- virtual double right_encoder() const = 0;
- virtual double left_voltage_error() const = 0;
- virtual double right_voltage_error() const = 0;
+ double x() const { return Xhat()(StateIdx::kX); }
+ double y() const { return Xhat()(StateIdx::kY); }
+ double theta() const { return Xhat()(StateIdx::kTheta); }
+ double left_velocity() const { return Xhat()(StateIdx::kLeftVelocity); }
+ double right_velocity() const { return Xhat()(StateIdx::kRightVelocity); }
+ double left_encoder() const { return Xhat()(StateIdx::kLeftEncoder); }
+ double right_encoder() const { return Xhat()(StateIdx::kRightEncoder); }
+ double left_voltage_error() const {
+ return Xhat()(StateIdx::kLeftVoltageError);
+ }
+ double right_voltage_error() const {
+ return Xhat()(StateIdx::kRightVoltageError);
+ }
+ double angular_error() const { return Xhat()(StateIdx::kAngularError); }
+ double longitudinal_velocity_offset() const {
+ return Xhat()(StateIdx::kLongitudinalVelocityOffset);
+ }
+ double lateral_velocity() const { return Xhat()(StateIdx::kLateralVelocity); }
+
virtual TargetSelectorInterface *target_selector() = 0;
};
@@ -93,9 +127,6 @@
// This provides no method for using cameras or the such to get global
// measurements and just assumes that you can dead-reckon perfectly.
class DeadReckonEkf : public LocalizerInterface {
- typedef HybridEkf<double> Ekf;
- typedef typename Ekf::StateIdx StateIdx;
-
public:
DeadReckonEkf(::aos::EventLoop *event_loop,
const DrivetrainConfig<double> &dt_config)
@@ -107,11 +138,14 @@
target_selector_.set_has_target(false);
}
+ virtual ~DeadReckonEkf() {}
+
void Update(const ::Eigen::Matrix<double, 2, 1> &U,
::aos::monotonic_clock::time_point now, double left_encoder,
double right_encoder, double gyro_rate,
- double /*longitudinal_accelerometer*/) override {
- ekf_.UpdateEncodersAndGyro(left_encoder, right_encoder, gyro_rate, U, now);
+ const Eigen::Vector3d &accel) override {
+ ekf_.UpdateEncodersAndGyro(left_encoder, right_encoder, gyro_rate, U, accel,
+ now);
}
void ResetPosition(::aos::monotonic_clock::time_point t, double x, double y,
@@ -119,33 +153,14 @@
bool /*reset_theta*/) override {
const double left_encoder = ekf_.X_hat(StateIdx::kLeftEncoder);
const double right_encoder = ekf_.X_hat(StateIdx::kRightEncoder);
- ekf_.ResetInitialState(t, (Ekf::State() << x, y, theta, left_encoder, 0,
- right_encoder, 0, 0, 0,
- 0).finished(),
+ ekf_.ResetInitialState(t,
+ (Ekf::State() << x, y, theta, left_encoder, 0,
+ right_encoder, 0, 0, 0, 0, 0, 0)
+ .finished(),
ekf_.P());
- };
+ }
- double x() const override { return ekf_.X_hat(StateIdx::kX); }
- double y() const override { return ekf_.X_hat(StateIdx::kY); }
- double theta() const override { return ekf_.X_hat(StateIdx::kTheta); }
- double left_encoder() const override {
- return ekf_.X_hat(StateIdx::kLeftEncoder);
- }
- double right_encoder() const override {
- return ekf_.X_hat(StateIdx::kRightEncoder);
- }
- double left_velocity() const override {
- return ekf_.X_hat(StateIdx::kLeftVelocity);
- }
- double right_velocity() const override {
- return ekf_.X_hat(StateIdx::kRightVelocity);
- }
- double left_voltage_error() const override {
- return ekf_.X_hat(StateIdx::kLeftVoltageError);
- }
- double right_voltage_error() const override {
- return ekf_.X_hat(StateIdx::kRightVoltageError);
- }
+ Ekf::State Xhat() const override { return ekf_.X_hat(); }
TrivialTargetSelector *target_selector() override {
return &target_selector_;
diff --git a/frc971/control_loops/profiled_subsystem.h b/frc971/control_loops/profiled_subsystem.h
index b9ec5c5..439f14f 100644
--- a/frc971/control_loops/profiled_subsystem.h
+++ b/frc971/control_loops/profiled_subsystem.h
@@ -229,7 +229,7 @@
double default_acceleration)
: ProfiledSubsystem<3, 1, ZeroingEstimator>(
::std::move(loop), {{ZeroingEstimator(zeroing_constants)}}),
- profile_(::aos::controls::kLoopFrequency),
+ profile_(this->loop_->plant().coefficients().dt),
range_(range),
default_velocity_(default_velocity),
default_acceleration_(default_acceleration) {
@@ -275,7 +275,7 @@
builder.add_voltage_error(this->X_hat(2, 0));
builder.add_calculated_velocity(
(position() - last_position_) /
- ::aos::time::DurationInSeconds(::aos::controls::kLoopFrequency));
+ ::aos::time::DurationInSeconds(this->loop_->plant().coefficients().dt));
builder.add_estimator_state(estimator_state);
diff --git a/frc971/control_loops/python/BUILD b/frc971/control_loops/python/BUILD
index 50764c7..1fae02b 100644
--- a/frc971/control_loops/python/BUILD
+++ b/frc971/control_loops/python/BUILD
@@ -164,18 +164,25 @@
)
py_binary(
- name = "path_edit",
+ name = "spline_graph",
srcs = [
- "basic_window.py",
"color.py",
+ "spline_drawing.py",
+ "spline_writer.py",
"path_edit.py",
+ "points.py",
+ "graph.py",
+ "spline_graph.py",
],
+ legacy_create_init = False,
restricted_to = ["//tools:k8"],
visibility = ["//visibility:public"],
deps = [
":libspline",
":python_init",
"@python_gtk",
+ "@matplotlib_repo//:matplotlib2.7",
+ ":basic_window",
],
)
diff --git a/frc971/control_loops/python/basic_window.py b/frc971/control_loops/python/basic_window.py
old mode 100644
new mode 100755
index 78324a3..827fe64
--- a/frc971/control_loops/python/basic_window.py
+++ b/frc971/control_loops/python/basic_window.py
@@ -1,3 +1,4 @@
+#!/usr/bin/python3
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
@@ -5,10 +6,10 @@
from gi.repository import Gdk
from gi.repository import GdkX11
import cairo
+from constants import *
identity = cairo.Matrix()
-
# Override the matrix of a cairo context.
class OverrideMatrix(object):
def __init__(self, cr, matrix):
@@ -34,7 +35,6 @@
def quit_main_loop(*args):
mainloop.quit()
-
def RunApp():
try:
mainloop.run()
@@ -49,11 +49,10 @@
# Draw in response to an expose-event
def __init__(self):
super(BaseWindow, self).__init__()
- #self.window.connect("destroy", quit_main_loop)
- self.set_size_request(640, 600)
+ self.set_size_request(2*SCREEN_SIZE, SCREEN_SIZE)
self.center = (0, 0)
- self.shape = (640, 400)
+ self.shape = (2*SCREEN_SIZE, SCREEN_SIZE)
self.needs_redraw = False
def get_current_scale(self):
@@ -81,3 +80,4 @@
# Handle the expose-event by drawing
def handle_draw(self, cr):
pass
+
diff --git a/frc971/control_loops/python/color.py b/frc971/control_loops/python/color.py
index ec53e82..5634042 100644
--- a/frc971/control_loops/python/color.py
+++ b/frc971/control_loops/python/color.py
@@ -1,9 +1,10 @@
class Color:
def __init__(self, r, g, b, a=1.0):
- self.r = r
- self.g = g
- self.b = b
- self.a = a
+ self.r = r
+ self.g = g
+ self.b = b
+ self.a = a
+
palette = {
"RED": Color(1, 0, 0),
@@ -16,5 +17,6 @@
"WHITE": Color(1, 1, 1),
"GREY": Color(0.5, 0.5, 0.5),
"LIGHT_GREY": Color(0.75, 0.75, 0.75),
- "DARK_GREY": Color(0.25, 0.25, 0.25)
+ "DARK_GREY": Color(0.25, 0.25, 0.25),
+ "ORANGE": Color(1, 0.65, 0)
}
diff --git a/frc971/control_loops/python/constants.py b/frc971/control_loops/python/constants.py
new file mode 100644
index 0000000..7b45215
--- /dev/null
+++ b/frc971/control_loops/python/constants.py
@@ -0,0 +1,28 @@
+import argparse
+
+arg_parser = argparse.ArgumentParser(description='spline_editor')
+arg_parser.add_argument(
+ 'size',
+ metavar='N',
+ default=800,
+ type=int,
+ nargs='?',
+ help="size of the screen")
+args = arg_parser.parse_args()
+SCREEN_SIZE = args.size
+
+WIDTH_OF_FIELD_IN_METERS = 8.258302
+
+WIDTH_OF_ROBOT = 0.65
+LENGTH_OF_ROBOT = 0.8
+
+ROBOT_SIDE_TO_BALL_CENTER = 0.15 #Placeholder value
+BALL_RADIUS = 0.165
+ROBOT_SIDE_TO_HATCH_PANEL = 0.1 #Placeholder value
+HATCH_PANEL_WIDTH = 0.4826
+
+def pxToM(p):
+ return p * WIDTH_OF_FIELD_IN_METERS / SCREEN_SIZE
+
+def mToPx(m):
+ return (m*SCREEN_SIZE/WIDTH_OF_FIELD_IN_METERS)
diff --git a/frc971/control_loops/python/drawing_constants.py b/frc971/control_loops/python/drawing_constants.py
new file mode 100644
index 0000000..89979f0
--- /dev/null
+++ b/frc971/control_loops/python/drawing_constants.py
@@ -0,0 +1,314 @@
+import cairo
+from color import Color, palette
+from constants import *
+import numpy as np
+
+
+def set_color(cr, color, a=1):
+ if color.a == 1.0:
+ cr.set_source_rgba(color.r, color.g, color.b, a)
+ else:
+ cr.set_source_rgba(color.r, color.g, color.b, color.a)
+
+
+def draw_px_cross(cr, x, y, length_px, color=palette["RED"]):
+ """Draws a cross with fixed dimensions in pixel space."""
+ set_color(cr, color)
+ cr.move_to(x, y - length_px)
+ cr.line_to(x, y + length_px)
+ cr.stroke()
+
+ cr.move_to(x - length_px, y)
+ cr.line_to(x + length_px, y)
+ cr.stroke()
+ set_color(cr, palette["WHITE"])
+
+
+def draw_px_x(cr, x, y, length_px1, color=palette["BLACK"]):
+ """Draws a x with fixed dimensions in pixel space."""
+ length_px = length_px1 / np.sqrt(2)
+ set_color(cr, color)
+ cr.move_to(x - length_px, y - length_px)
+ cr.line_to(x + length_px, y + length_px)
+ cr.stroke()
+
+ cr.move_to(x - length_px, y + length_px)
+ cr.line_to(x + length_px, y - length_px)
+ cr.stroke()
+ set_color(cr, palette["WHITE"])
+
+
+def draw_control_points(cr, points, width=10, radius=4, color=palette["BLUE"]):
+ for i in range(0, len(points)):
+ draw_px_x(cr, points[i][0], points[i][1], width, color)
+ set_color(cr, color)
+ cr.arc(points[i][0], points[i][1], radius, 0, 2.0 * np.pi)
+ cr.fill()
+ set_color(cr, palette["WHITE"])
+
+
+def display_text(cr, text, widtha, heighta, widthb, heightb):
+ cr.scale(widtha, -heighta)
+ cr.show_text(text)
+ cr.scale(widthb, -heightb)
+
+
+def draw_HAB(cr):
+ # BASE Constants
+ X_BASE = 0
+ Y_BASE = 0
+ R = 0.381 - .1
+ BACKWALL_X = X_BASE
+ LOADING_Y = mToPx(4.129151) - mToPx(0.696976)
+ # HAB Levels 2 and 3 called in variables backhab
+ # draw loading stations
+ cr.move_to(0, LOADING_Y)
+ cr.line_to(mToPx(0.6), LOADING_Y)
+ cr.move_to(mToPx(R), LOADING_Y)
+ cr.arc(mToPx(R), LOADING_Y, 5, 0, np.pi * 2.0)
+ cr.move_to(0, -1.0 * LOADING_Y)
+ cr.line_to(mToPx(0.6), -1.0 * LOADING_Y)
+ cr.move_to(mToPx(R), -1.0 * LOADING_Y)
+ cr.arc(mToPx(R), -1.0 * LOADING_Y, 5, 0, np.pi * 2.0)
+
+ # HAB Levels 2 and 3 called in variables backhab
+ WIDTH_BACKHAB = mToPx(1.2192)
+
+ Y_TOP_BACKHAB_BOX = Y_BASE + mToPx(0.6096)
+ BACKHAB_LV2_LENGTH = mToPx(1.016)
+
+ BACKHAB_LV3_LENGTH = mToPx(1.2192)
+ Y_LV3_BOX = Y_TOP_BACKHAB_BOX - BACKHAB_LV3_LENGTH
+
+ Y_BOTTOM_BACKHAB_BOX = Y_LV3_BOX - BACKHAB_LV2_LENGTH
+
+ # HAB LEVEL 1
+ X_LV1_BOX = BACKWALL_X + WIDTH_BACKHAB
+
+ WIDTH_LV1_BOX = mToPx(0.90805)
+ LENGTH_LV1_BOX = mToPx(1.6256)
+
+ Y_BOTTOM_LV1_BOX = Y_BASE - LENGTH_LV1_BOX
+
+ # Ramp off Level 1
+ X_RAMP = X_LV1_BOX
+
+ Y_TOP_RAMP = Y_BASE + LENGTH_LV1_BOX
+ WIDTH_TOP_RAMP = mToPx(1.20015)
+ LENGTH_TOP_RAMP = Y_BASE + mToPx(0.28306)
+
+ X_MIDDLE_RAMP = X_RAMP + WIDTH_LV1_BOX
+ Y_MIDDLE_RAMP = Y_BOTTOM_LV1_BOX
+ LENGTH_MIDDLE_RAMP = 2 * LENGTH_LV1_BOX
+ WIDTH_MIDDLE_RAMP = WIDTH_TOP_RAMP - WIDTH_LV1_BOX
+
+ Y_BOTTOM_RAMP = Y_BASE - LENGTH_LV1_BOX - LENGTH_TOP_RAMP
+
+ # Side Bars to Hold in balls
+ X_BARS = BACKWALL_X
+ WIDTH_BARS = WIDTH_BACKHAB
+ LENGTH_BARS = mToPx(0.574675)
+
+ Y_TOP_BAR = Y_TOP_BACKHAB_BOX + BACKHAB_LV2_LENGTH
+
+ Y_BOTTOM_BAR = Y_BOTTOM_BACKHAB_BOX - LENGTH_BARS
+
+ set_color(cr, palette["BLACK"])
+ cr.rectangle(BACKWALL_X, Y_TOP_BACKHAB_BOX, WIDTH_BACKHAB,
+ BACKHAB_LV2_LENGTH)
+ cr.rectangle(BACKWALL_X, Y_LV3_BOX, WIDTH_BACKHAB, BACKHAB_LV3_LENGTH)
+ cr.rectangle(BACKWALL_X, Y_BOTTOM_BACKHAB_BOX, WIDTH_BACKHAB,
+ BACKHAB_LV2_LENGTH)
+ cr.rectangle(X_LV1_BOX, Y_BASE, WIDTH_LV1_BOX, LENGTH_LV1_BOX)
+ cr.rectangle(X_LV1_BOX, Y_BOTTOM_LV1_BOX, WIDTH_LV1_BOX, LENGTH_LV1_BOX)
+ cr.rectangle(X_RAMP, Y_TOP_RAMP, WIDTH_TOP_RAMP, LENGTH_TOP_RAMP)
+ cr.rectangle(X_MIDDLE_RAMP, Y_MIDDLE_RAMP, WIDTH_MIDDLE_RAMP,
+ LENGTH_MIDDLE_RAMP)
+ cr.rectangle(X_RAMP, Y_BOTTOM_RAMP, WIDTH_TOP_RAMP, LENGTH_TOP_RAMP)
+ cr.rectangle(X_BARS, Y_TOP_BAR, WIDTH_BARS, LENGTH_BARS)
+ cr.rectangle(X_BARS, Y_BOTTOM_BAR, WIDTH_BARS, LENGTH_BARS)
+ cr.stroke()
+
+ cr.set_line_join(cairo.LINE_JOIN_ROUND)
+
+ cr.stroke()
+
+ #draw 0, 0
+ set_color(cr, palette["BLACK"])
+ cr.move_to(0, 0)
+ cr.line_to(0, 0 + mToPx(8.2296 / 2.0))
+ cr.move_to(0, 0)
+ cr.line_to(0, 0 + mToPx(-8.2296 / 2.0))
+ cr.move_to(0, 0)
+ cr.line_to(0 + mToPx(8.2296), 0)
+
+ cr.stroke()
+
+
+def draw_rockets(cr):
+ # BASE Constants
+ X_BASE = mToPx(2.41568)
+ Y_BASE = 0
+ # Robot longitudinal radius
+ R = 0.381
+ near_side_rocket_center = [
+ X_BASE + mToPx((2.89973 + 3.15642) / 2.0), Y_BASE + mToPx(
+ (3.86305 + 3.39548) / 2.0)
+ ]
+ middle_rocket_center = [
+ X_BASE + mToPx((3.15642 + 3.6347) / 2.0), Y_BASE + mToPx(
+ (3.39548 + 3.392380) / 2.0)
+ ]
+ far_side_rocket_center = [
+ X_BASE + mToPx((3.63473 + 3.89984) / 2.0), Y_BASE + mToPx(
+ (3.39238 + 3.86305) / 2.0)
+ ]
+
+ cr.move_to(near_side_rocket_center[0], near_side_rocket_center[1])
+ cr.line_to(near_side_rocket_center[0] - 0.8 * mToPx(0.866),
+ near_side_rocket_center[1] - 0.8 * mToPx(0.5))
+ cr.move_to(near_side_rocket_center[0] - R * mToPx(0.866),
+ near_side_rocket_center[1] - R * mToPx(0.5))
+ cr.arc(near_side_rocket_center[0] - R * mToPx(0.866),
+ near_side_rocket_center[1] - R * mToPx(0.5), 5, 0, np.pi * 2.0)
+
+ cr.move_to(middle_rocket_center[0], middle_rocket_center[1])
+ cr.line_to(middle_rocket_center[0], middle_rocket_center[1] - mToPx(0.8))
+ cr.move_to(middle_rocket_center[0], middle_rocket_center[1] - mToPx(R))
+ cr.arc(middle_rocket_center[0], middle_rocket_center[1] - mToPx(R), 5, 0,
+ np.pi * 2.0)
+
+ cr.move_to(far_side_rocket_center[0], far_side_rocket_center[1])
+ cr.line_to(far_side_rocket_center[0] + 0.8 * mToPx(0.866),
+ far_side_rocket_center[1] - 0.8 * mToPx(0.5))
+ cr.move_to(far_side_rocket_center[0] + R * mToPx(0.866),
+ far_side_rocket_center[1] - R * mToPx(0.5))
+ cr.arc(far_side_rocket_center[0] + R * mToPx(0.866),
+ far_side_rocket_center[1] - R * mToPx(0.5), 5, 0, np.pi * 2.0)
+
+ #print(far_side_rocket_center)
+ near_side_rocket_center = [
+ X_BASE + mToPx((2.89973 + 3.15642) / 2.0), Y_BASE - mToPx(
+ (3.86305 + 3.39548) / 2.0)
+ ]
+ middle_rocket_center = [
+ X_BASE + mToPx((3.15642 + 3.6347) / 2.0), Y_BASE - mToPx(
+ (3.39548 + 3.392380) / 2.0)
+ ]
+ far_side_rocket_center = [
+ X_BASE + mToPx((3.63473 + 3.89984) / 2.0), Y_BASE - mToPx(
+ (3.39238 + 3.86305) / 2.0)
+ ]
+
+ cr.move_to(near_side_rocket_center[0], near_side_rocket_center[1])
+ cr.line_to(near_side_rocket_center[0] - 0.8 * mToPx(0.866),
+ near_side_rocket_center[1] + 0.8 * mToPx(0.5))
+
+ cr.move_to(middle_rocket_center[0], middle_rocket_center[1])
+ cr.line_to(middle_rocket_center[0], middle_rocket_center[1] + mToPx(0.8))
+
+ cr.move_to(far_side_rocket_center[0], far_side_rocket_center[1])
+ cr.line_to(far_side_rocket_center[0] + 0.8 * mToPx(0.866),
+ far_side_rocket_center[1] + 0.8 * mToPx(0.5))
+
+ # Leftmost Line
+ cr.move_to(X_BASE + mToPx(2.89973), Y_BASE + mToPx(3.86305))
+ cr.line_to(X_BASE + mToPx(3.15642), Y_BASE + mToPx(3.39548))
+
+ # Top Line
+ cr.move_to(X_BASE + mToPx(3.15642), Y_BASE + mToPx(3.39548))
+ cr.line_to(X_BASE + mToPx(3.63473), Y_BASE + mToPx(3.39238))
+
+ #Rightmost Line
+ cr.move_to(X_BASE + mToPx(3.63473), Y_BASE + mToPx(3.39238))
+ cr.line_to(X_BASE + mToPx(3.89984), Y_BASE + mToPx(3.86305))
+
+ #Back Line
+ cr.move_to(X_BASE + mToPx(2.89973), Y_BASE + mToPx(3.86305))
+ cr.line_to(X_BASE + mToPx(3.89984), Y_BASE + mToPx(3.86305))
+
+ # Bottom Rocket
+ # Leftmost Line
+ cr.move_to(X_BASE + mToPx(2.89973), Y_BASE - mToPx(3.86305))
+ cr.line_to(X_BASE + mToPx(3.15642), Y_BASE - mToPx(3.39548))
+
+ # Top Line
+ cr.move_to(X_BASE + mToPx(3.15642), Y_BASE - mToPx(3.39548))
+ cr.line_to(X_BASE + mToPx(3.63473), Y_BASE - mToPx(3.39238))
+
+ #Rightmost Line
+ cr.move_to(X_BASE + mToPx(3.63473), Y_BASE - mToPx(3.39238))
+ cr.line_to(X_BASE + mToPx(3.89984), Y_BASE - mToPx(3.86305))
+
+ #Back Line
+ cr.move_to(X_BASE + mToPx(2.89973), Y_BASE - mToPx(3.86305))
+ cr.line_to(X_BASE + mToPx(3.89984), Y_BASE - mToPx(3.86305))
+
+ cr.stroke()
+
+
+def draw_cargo_ship(cr):
+ # BASE Constants
+ X_BASE = 0 + mToPx(5.59435)
+ Y_BASE = 0 + 0 #mToPx(4.129151)
+ R = 0.381 - 0.1
+
+ FRONT_PEG_DELTA_Y = mToPx(0.276352)
+ cr.move_to(X_BASE, Y_BASE + FRONT_PEG_DELTA_Y)
+ cr.line_to(X_BASE - mToPx(0.8), Y_BASE + FRONT_PEG_DELTA_Y)
+
+ cr.move_to(X_BASE, Y_BASE + FRONT_PEG_DELTA_Y)
+ cr.arc(X_BASE - mToPx(R), Y_BASE + FRONT_PEG_DELTA_Y, 5, 0, np.pi * 2.0)
+
+ cr.move_to(X_BASE, Y_BASE - FRONT_PEG_DELTA_Y)
+ cr.line_to(X_BASE - mToPx(0.8), Y_BASE - FRONT_PEG_DELTA_Y)
+
+ cr.move_to(X_BASE, Y_BASE - FRONT_PEG_DELTA_Y)
+ cr.arc(X_BASE - mToPx(R), Y_BASE - FRONT_PEG_DELTA_Y, 5, 0, np.pi * 2.0)
+
+ SIDE_PEG_Y = mToPx(1.41605 / 2.0)
+ SIDE_PEG_X = X_BASE + mToPx(1.148842)
+ SIDE_PEG_DX = mToPx(0.55245)
+
+ cr.move_to(SIDE_PEG_X, SIDE_PEG_Y)
+ cr.line_to(SIDE_PEG_X, SIDE_PEG_Y + mToPx(0.8))
+ cr.move_to(SIDE_PEG_X, SIDE_PEG_Y + mToPx(R))
+ cr.arc(SIDE_PEG_X, SIDE_PEG_Y + mToPx(R), 5, 0, np.pi * 2.0)
+
+ cr.move_to(SIDE_PEG_X + SIDE_PEG_DX, SIDE_PEG_Y)
+ cr.line_to(SIDE_PEG_X + SIDE_PEG_DX, SIDE_PEG_Y + mToPx(0.8))
+ cr.move_to(SIDE_PEG_X + SIDE_PEG_DX, SIDE_PEG_Y + mToPx(R))
+ cr.arc(SIDE_PEG_X + SIDE_PEG_DX, SIDE_PEG_Y + mToPx(R), 5, 0, np.pi * 2.0)
+
+ cr.move_to(SIDE_PEG_X + 2.0 * SIDE_PEG_DX, SIDE_PEG_Y)
+ cr.line_to(SIDE_PEG_X + 2.0 * SIDE_PEG_DX, SIDE_PEG_Y + mToPx(0.8))
+ cr.move_to(SIDE_PEG_X + 2.0 * SIDE_PEG_DX, SIDE_PEG_Y + mToPx(R))
+ cr.arc(SIDE_PEG_X + 2.0 * SIDE_PEG_DX, SIDE_PEG_Y + mToPx(R), 5, 0,
+ np.pi * 2.0)
+
+ cr.move_to(SIDE_PEG_X, -1.0 * SIDE_PEG_Y)
+ cr.line_to(SIDE_PEG_X, -1.0 * SIDE_PEG_Y - mToPx(0.8))
+ cr.move_to(SIDE_PEG_X, -1.0 * SIDE_PEG_Y - mToPx(R))
+ cr.arc(SIDE_PEG_X, -1.0 * SIDE_PEG_Y - mToPx(R), 5, 0, np.pi * 2.0)
+
+ cr.move_to(SIDE_PEG_X + SIDE_PEG_DX, -1.0 * SIDE_PEG_Y)
+ cr.line_to(SIDE_PEG_X + SIDE_PEG_DX, -1.0 * SIDE_PEG_Y - mToPx(0.8))
+ cr.move_to(SIDE_PEG_X + SIDE_PEG_DX, -1.0 * SIDE_PEG_Y - mToPx(R))
+ cr.arc(SIDE_PEG_X + SIDE_PEG_DX, -1.0 * SIDE_PEG_Y - mToPx(R), 5, 0,
+ np.pi * 2.0)
+
+ cr.move_to(SIDE_PEG_X + 2.0 * SIDE_PEG_DX, -1.0 * SIDE_PEG_Y)
+ cr.line_to(SIDE_PEG_X + 2.0 * SIDE_PEG_DX, -1.0 * SIDE_PEG_Y - mToPx(0.8))
+ cr.move_to(SIDE_PEG_X + 2.0 * SIDE_PEG_DX, -1.0 * SIDE_PEG_Y - mToPx(R))
+ cr.arc(SIDE_PEG_X + 2.0 * SIDE_PEG_DX, -1.0 * SIDE_PEG_Y - mToPx(R), 5, 0,
+ np.pi * 2.0)
+
+ cr.rectangle(X_BASE, Y_BASE - mToPx(1.41605 / 2.0), mToPx(2.43205),
+ mToPx(1.41605))
+ cr.stroke()
+
+
+def draw_points(cr, p, size):
+ for i in range(0, len(p)):
+ draw_px_cross(cr, p[i][0], p[i][1], size,
+ Color(0, np.sqrt(0.2 * i), 0))
diff --git a/frc971/control_loops/python/graph.py b/frc971/control_loops/python/graph.py
new file mode 100644
index 0000000..7e98a38
--- /dev/null
+++ b/frc971/control_loops/python/graph.py
@@ -0,0 +1,178 @@
+from constants import *
+import cairo
+from color import Color, palette
+from points import Points
+from drawing_constants import *
+from libspline import Spline, DistanceSpline, Trajectory
+
+AXIS_MARGIN_SPACE = 40
+
+
+class Graph(): # (TODO): Remove Computer Calculation
+ def __init__(self, cr, mypoints):
+ # Background Box
+ set_color(cr, palette["WHITE"])
+ cr.rectangle(-1.0 * SCREEN_SIZE, -0.5 * SCREEN_SIZE, SCREEN_SIZE,
+ SCREEN_SIZE * 0.6)
+ cr.fill()
+
+ cr.set_source_rgb(0, 0, 0)
+ cr.rectangle(-1.0 * SCREEN_SIZE, -0.5 * SCREEN_SIZE, SCREEN_SIZE,
+ SCREEN_SIZE * 0.6)
+ #Axis
+ cr.move_to(-1.0 * SCREEN_SIZE + AXIS_MARGIN_SPACE,
+ -0.5 * SCREEN_SIZE + AXIS_MARGIN_SPACE) # Y
+ cr.line_to(-1.0 * SCREEN_SIZE + AXIS_MARGIN_SPACE,
+ 0.1 * SCREEN_SIZE - 10)
+
+ cr.move_to(-1.0 * SCREEN_SIZE + AXIS_MARGIN_SPACE,
+ -0.5 * SCREEN_SIZE + AXIS_MARGIN_SPACE) # X
+ cr.line_to(-10, -0.5 * SCREEN_SIZE + AXIS_MARGIN_SPACE)
+ cr.stroke()
+
+ skip = 2
+ dT = 0.00505
+ start = AXIS_MARGIN_SPACE - SCREEN_SIZE
+ end = -2.0 * AXIS_MARGIN_SPACE
+ height = 0.5 * (SCREEN_SIZE) - AXIS_MARGIN_SPACE
+ zero = AXIS_MARGIN_SPACE - SCREEN_SIZE / 2.0
+ if mypoints.getLibsplines():
+ distanceSpline = DistanceSpline(mypoints.getLibsplines())
+ traj = Trajectory(distanceSpline)
+ traj.Plan()
+ XVA = traj.GetPlanXVA(dT)
+ self.draw_x_axis(cr, start, height, zero, XVA, end)
+ self.drawVelocity(cr, XVA, start, height, skip, zero, end)
+ self.drawAcceleration(cr, XVA, start, height, skip, zero,
+ AXIS_MARGIN_SPACE, end)
+ self.drawVoltage(cr, XVA, start, height, skip, traj, zero, end)
+ cr.set_source_rgb(0, 0, 0)
+ cr.move_to(-1.0 * AXIS_MARGIN_SPACE, zero + height / 2.0)
+ cr.line_to(AXIS_MARGIN_SPACE - SCREEN_SIZE, zero + height / 2.0)
+ cr.stroke()
+
+ def connectLines(self, cr, points, color):
+ for i in range(0, len(points) - 1):
+ set_color(cr, color)
+ cr.move_to(points[i][0], points[i][1])
+ cr.line_to(points[i + 1][0], points[i + 1][1])
+ cr.stroke()
+
+ def draw_x_axis(self, cr, start, height, zero, xva, end):
+ total_time = 0.00505 * len(xva[0])
+ for k in np.linspace(0, 1, 11):
+ self.tickMark(cr,
+ k * np.abs(start - end) + start, zero + height / 2.0,
+ 10, palette["BLACK"])
+ cr.move_to(k * np.abs(start - end) + start,
+ 10 + zero + height / 2.0)
+ txt_scale = SCREEN_SIZE / 1000.0
+ display_text(cr, str(round(k * total_time, 3)), txt_scale,
+ txt_scale, 1.0 / txt_scale, 1.0 / txt_scale)
+ cr.stroke()
+
+ def tickMark(self, cr, x, y, height, COLOR):
+ # X, Y is in the middle of the tick mark
+ set_color(cr, COLOR)
+ cr.move_to(x, y + (height / 2))
+ cr.line_to(x, y - (height / 2))
+ cr.stroke()
+
+ def HtickMark(self, cr, x, y, width, COLOR):
+ # X, Y is in the middle of the tick mark
+ set_color(cr, COLOR)
+ cr.move_to(x + (width / 2), y)
+ cr.line_to(x - (width / 2), y)
+ cr.stroke()
+
+ def drawVelocity(self, cr, xva, start, height, skip, zero, end):
+ COLOR = palette["RED"]
+ velocity = xva[1]
+ n_timesteps = len(velocity)
+ max_v = np.amax(velocity)
+ spacing = np.abs(start - end) / float(n_timesteps)
+ scaler = height / max_v
+ cr.set_source_rgb(1, 0, 0)
+ points = []
+ for i in range(0, len(velocity)):
+ if i % skip == 0:
+ points.append([
+ start + (i * spacing),
+ zero + height / 2.0 + (velocity[i] * scaler / 2.0)
+ ])
+ self.connectLines(cr, points, COLOR)
+
+ # draw axes marking
+ for i in np.linspace(-1, 1, 11):
+ self.HtickMark(cr, start, zero + i * height / 2.0 + height / 2.0,
+ 10, palette["BLACK"])
+ cr.set_source_rgb(1, 0, 0)
+ cr.move_to(start + 5, zero + i * height / 2.0 + height / 2.0)
+ txt_scale = SCREEN_SIZE / 1000.0
+ display_text(cr, str(round(i * max_v, 2)), txt_scale, txt_scale,
+ 1.0 / txt_scale, 1.0 / txt_scale)
+ cr.stroke()
+
+ def drawAcceleration(self, cr, xva, start, height, skip, zero, margin,
+ end):
+ COLOR = palette["BLUE"]
+ accel = xva[2]
+ max_a = np.amax(accel)
+ min_a = np.amin(accel)
+ n_timesteps = len(accel)
+ spacing = np.abs(start - end) / float(n_timesteps)
+ scaler = height / (max_a - min_a)
+ cr.set_source_rgb(1, 0, 0)
+ points = []
+ for i in range(0, len(accel)):
+ if i % skip == 0:
+ points.append([
+ start + (i * spacing), zero + ((accel[i] - min_a) * scaler)
+ ])
+ self.connectLines(cr, points, COLOR)
+
+ # draw axes marking
+ for i in np.linspace(0, 1, 11):
+ self.HtickMark(cr, -1.5 * margin, zero + i * height, 10,
+ palette["BLACK"])
+ cr.set_source_rgb(0, 0, 1)
+ cr.move_to(-1.2 * margin, zero + i * height)
+ txt_scale = SCREEN_SIZE / 1000.0
+ display_text(cr, str(round(i * (max_a - min_a) + min_a,
+ 2)), txt_scale, txt_scale,
+ 1.0 / txt_scale, 1.0 / txt_scale)
+ cr.stroke()
+
+ def drawVoltage(self, cr, xva, start, height, skip, traj, zero, end):
+ COLOR1 = palette["GREEN"]
+ COLOR2 = palette["CYAN"]
+ poses = xva[0]
+ n_timesteps = len(poses)
+ spacing = np.abs(start - end) / float(n_timesteps)
+ points1 = []
+ points2 = []
+ for i in range(0, len(poses)):
+ if i % skip == 0:
+ voltage = traj.Voltage(poses[i])
+ points1.append([
+ start + (i * spacing),
+ zero + height / 2 + height * (voltage[0] / 24.0)
+ ])
+ points2.append([
+ start + (i * spacing),
+ zero + height / 2 + height * (voltage[1] / 24.0)
+ ])
+ self.connectLines(cr, points1, COLOR1)
+ self.connectLines(cr, points2, COLOR2)
+
+ for i in np.linspace(-1, 1, 7):
+ self.HtickMark(cr, -1.0 * SCREEN_SIZE,
+ zero + i * height / 2.0 + height / 2.0, 10,
+ palette["BLACK"])
+ cr.set_source_rgb(0, 1, 1)
+ cr.move_to(-1.0 * SCREEN_SIZE,
+ zero + i * height / 2.0 + height / 2.0)
+ txt_scale = SCREEN_SIZE / 1000.0
+ display_text(cr, str(round(i * 12.0, 2)), txt_scale, txt_scale,
+ 1.0 / txt_scale, 1.0 / txt_scale)
+ cr.stroke()
diff --git a/frc971/control_loops/python/libspline.py b/frc971/control_loops/python/libspline.py
old mode 100644
new mode 100755
diff --git a/frc971/control_loops/python/path_edit.py b/frc971/control_loops/python/path_edit.py
old mode 100644
new mode 100755
index e59286f..0d1b263
--- a/frc971/control_loops/python/path_edit.py
+++ b/frc971/control_loops/python/path_edit.py
@@ -3,67 +3,23 @@
import os
import sys
import copy
-import basic_window
from color import Color, palette
import random
import gi
import numpy as np
-from libspline import Spline
import scipy.spatial.distance
gi.require_version('Gtk', '3.0')
+gi.require_version('Gdk', '3.0')
from gi.repository import Gdk, Gtk, GLib
import cairo
+from libspline import Spline, DistanceSpline, Trajectory
import enum
-import json # For writing to json files
-
-from basic_window import OverrideMatrix, identity, quit_main_loop, set_color
-
-WIDTH_OF_FIELD_IN_METERS = 8.258302
-PIXELS_ON_SCREEN = 300
-
-
-def pxToM(p):
- return p * WIDTH_OF_FIELD_IN_METERS / PIXELS_ON_SCREEN
-
-
-def mToPx(m):
- return (m*PIXELS_ON_SCREEN/WIDTH_OF_FIELD_IN_METERS)
-
-def px(cr):
- return OverrideMatrix(cr, identity)
-
-
-def draw_px_cross(cr, x, y, length_px, color=palette["RED"]):
- """Draws a cross with fixed dimensions in pixel space."""
- set_color(cr, color)
- cr.move_to(x, y - length_px)
- cr.line_to(x, y + length_px)
- cr.stroke()
-
- cr.move_to(x - length_px, y)
- cr.line_to(x + length_px, y)
- cr.stroke()
- set_color(cr, palette["LIGHT_GREY"])
-
-
-def draw_px_x(cr, x, y, length_px1, color=palette["BLACK"]):
- """Draws a x with fixed dimensions in pixel space."""
- length_px = length_px1 / np.sqrt(2)
- set_color(cr, color)
- cr.move_to(x - length_px, y - length_px)
- cr.line_to(x + length_px, y + length_px)
- cr.stroke()
-
- cr.move_to(x - length_px, y + length_px)
- cr.line_to(x + length_px, y - length_px)
- cr.stroke()
- set_color(cr, palette["LIGHT_GREY"])
-
-
-def draw_points(cr, p, size):
- for i in range(0, len(p)):
- draw_px_cross(cr, p[i][0], p[i][1], size, Color(
- 0, np.sqrt(0.2 * i), 0))
+import json
+from basic_window import *
+from constants import *
+from drawing_constants import *
+from points import Points
+from graph import Graph
class Mode(enum.Enum):
@@ -72,61 +28,25 @@
kEditing = 2
kExporting = 3
kImporting = 4
- kConstraint = 5
-class ConstraintType(enum.Enum):
- kMaxSpeed = 0
- kMaxAcceleration = 1
-
-
-def display_text(cr, text, widtha, heighta, widthb, heightb):
- cr.scale(widtha, -heighta)
- cr.show_text(text)
- cr.scale(widthb, -heightb)
-
-
-def redraw(needs_redraw, window):
- print("Redrew")
- if not needs_redraw:
- window.queue_draw()
-
-
-class Constraint():
- def __init__(self, start, end, constraint, value):
- self.start = start #Array with index and distance from start of spline
- self.end = end #Array with index and distance from start of spline
- self.constraint = constraint #INT
- self.value = value #INT
- if self.constraint == 0:
- self.conName = "kMaxSpeed"
- else:
- self.conName = "kMaxAcceleration"
-
- def toString(self):
-
- return "START: " + str(self.start[0]) + ", " + str(
- self.start[1]) + " | END: " + str(self.end[0]) + ", " + str(
- self.end[1]) + " | " + str(self.conName) + ": " + str(
- self.value)
-
-
-class GTK_Widget(basic_window.BaseWindow):
+class GTK_Widget(BaseWindow):
"""Create a GTK+ widget on which we will draw using Cairo"""
-
def __init__(self):
super(GTK_Widget, self).__init__()
+ self.points = Points()
+
# init field drawing
# add default spline for testing purposes
# init editing / viewing modes and pointer location
self.mode = Mode.kPlacing
self.x = 0
self.y = 0
-
module_path = os.path.dirname(os.path.realpath(sys.argv[0]))
self.path_to_export = os.path.join(module_path,
- 'points_for_pathedit.json')
+ 'points_for_pathedit.json')
+
# update list of control points
self.point_selected = False
# self.adding_spline = False
@@ -145,32 +65,20 @@
for c in palette:
self.colors.append(palette[c])
- self.selected_points = []
- self.splines = []
self.reinit_extents()
self.inStart = None
self.inEnd = None
- self.inConstraint = None
self.inValue = None
self.startSet = False
- #John also wrote this
- def add_point(self, x, y):
- if (len(self.selected_points) < 6):
- self.selected_points.append([pxToM(x), pxToM(y)])
- if (len(self.selected_points) == 6):
- self.mode = Mode.kEditing
- self.splines.append(np.array(self.selected_points))
- self.selected_points = []
-
"""set extents on images"""
def reinit_extents(self):
- self.extents_x_min = -800
- self.extents_x_max = 800
- self.extents_y_min = -800
- self.extents_y_max = 800
+ self.extents_x_min = -1.0 * SCREEN_SIZE
+ self.extents_x_max = SCREEN_SIZE
+ self.extents_y_min = -1.0 * SCREEN_SIZE
+ self.extents_y_max = SCREEN_SIZE
# this needs to be rewritten with numpy, i dont think this ought to have
# SciPy as a dependecy
@@ -184,192 +92,112 @@
def get_nearest_point(self):
return self.all_controls[self.get_index_of_nearest_point()]
- def set_index_to_nearest_spline_point(self):
- nearest = 50
- index_of_closest = 0
- self.spline_edit = 0
- cur_p = [self.x, self.y]
-
- for index_splines, points in enumerate(self.spline):
- for index_points, point in enumerate(points.curve):
- # pythagorean
- distance = np.sqrt((cur_p[0] - mToPx(point[0]))**2 +
- (cur_p[1] - mToPx(point[1])**2))
- if distance < nearest:
- nearest = distance
- print("DISTANCE: ", distance, " | INDEX: ", index_points)
- index_of_closest = index_points
- self.index_of_edit = index_of_closest
- self.spline_edit = index_splines
- self.held_x = self.x
- if self.startSet == False:
- self.inStart = [self.index_of_edit, self.findDistance()]
- self.startSet = True
- else:
- self.inEnd = [self.index_of_edit, self.findDistance()]
- self.startSet = False
- self.mode = Mode.kEditing
- self.spline_edit = -1
- self.index_of_edit = -1
-
- print("Nearest: " + str(nearest))
- print("Spline: " + str(self.spline_edit))
- print("Index: " + str(index_of_closest))
-
- def findDistance(self):
- """ findDistance goes through each point on the spline finding distance to that point from the point before.
- It does this to find the the length of the spline to the point that is currently selected.
- """
- distance = 0
- points = self.curves[self.spline_edit]
- for index, point in enumerate(points):
- if index > 0 and index <= self.index_of_edit:
- distance += np.sqrt((points[index - 1][0] - point[0])**2 +
- (points[index - 1][1] - point[1])**2)
- return distance
-
- def draw_HAB(self, cr):
- print("WENT IN")
- # BASE Constants
- X_BASE = -450+mToPx(2.41568)
- Y_BASE = -150+mToPx(4.129151)
-
- BACKWALL_X = -450
-
- # HAB Levels 2 and 3 called in variables backhab
-
- WIDTH_BACKHAB = mToPx(1.2192)
-
- Y_TOP_BACKHAB_BOX = Y_BASE + mToPx(0.6096)
- BACKHAB_LV2_LENGTH = mToPx(1.016)
-
- BACKHAB_LV3_LENGTH = mToPx(1.2192)
- Y_LV3_BOX = Y_TOP_BACKHAB_BOX - BACKHAB_LV3_LENGTH
-
- Y_BOTTOM_BACKHAB_BOX = Y_LV3_BOX -BACKHAB_LV2_LENGTH
-
- # HAB LEVEL 1
- X_LV1_BOX = BACKWALL_X + WIDTH_BACKHAB
-
- WIDTH_LV1_BOX = mToPx(0.90805)
- LENGTH_LV1_BOX = mToPx(1.6256)
-
- Y_BOTTOM_LV1_BOX = Y_BASE - LENGTH_LV1_BOX
-
- # Ramp off Level 1
- X_RAMP = X_LV1_BOX
-
- Y_TOP_RAMP = Y_BASE + LENGTH_LV1_BOX
- WIDTH_TOP_RAMP = mToPx(1.20015)
- LENGTH_TOP_RAMP = Y_BASE + mToPx(0.28306)
-
- X_MIDDLE_RAMP = X_RAMP + WIDTH_LV1_BOX
- Y_MIDDLE_RAMP = Y_BOTTOM_LV1_BOX
- LENGTH_MIDDLE_RAMP = 2*LENGTH_LV1_BOX
- WIDTH_MIDDLE_RAMP = WIDTH_TOP_RAMP - WIDTH_LV1_BOX
-
- Y_BOTTOM_RAMP = Y_BASE - LENGTH_LV1_BOX - LENGTH_TOP_RAMP
-
- # Side Bars to Hold in balls
- X_BARS = BACKWALL_X
- WIDTH_BARS = WIDTH_BACKHAB
- LENGTH_BARS = mToPx(0.574675)
-
- Y_TOP_BAR = Y_TOP_BACKHAB_BOX + BACKHAB_LV2_LENGTH
-
- Y_BOTTOM_BAR = Y_BOTTOM_BACKHAB_BOX - LENGTH_BARS
-
- set_color(cr, palette["BLACK"])
- cr.rectangle(BACKWALL_X, Y_TOP_BACKHAB_BOX, WIDTH_BACKHAB,
- BACKHAB_LV2_LENGTH)
- cr.rectangle(BACKWALL_X, Y_LV3_BOX, WIDTH_BACKHAB,
- BACKHAB_LV3_LENGTH)
- cr.rectangle(BACKWALL_X, Y_BOTTOM_BACKHAB_BOX, WIDTH_BACKHAB,
- BACKHAB_LV2_LENGTH)
- cr.rectangle(X_LV1_BOX, Y_BASE, WIDTH_LV1_BOX, LENGTH_LV1_BOX)
- cr.rectangle(X_LV1_BOX, Y_BOTTOM_LV1_BOX, WIDTH_LV1_BOX,
- LENGTH_LV1_BOX)
- cr.rectangle(X_RAMP, Y_TOP_RAMP, WIDTH_TOP_RAMP, LENGTH_TOP_RAMP)
- cr.rectangle(X_MIDDLE_RAMP, Y_MIDDLE_RAMP, WIDTH_MIDDLE_RAMP,
- LENGTH_MIDDLE_RAMP)
- cr.rectangle(X_RAMP, Y_BOTTOM_RAMP, WIDTH_TOP_RAMP, LENGTH_TOP_RAMP)
- cr.rectangle(X_BARS, Y_TOP_BAR, WIDTH_BARS, LENGTH_BARS)
- cr.rectangle(X_BARS, Y_BOTTOM_BAR, WIDTH_BARS, LENGTH_BARS)
- cr.stroke()
- #draw_px_x(cr, BACKWALL_X, 0, 10) # Midline Point
- #draw_px_x(cr, X_BASE, Y_BASE, 10) # Bases
- cr.set_line_join(cairo.LINE_JOIN_ROUND)
-
- cr.stroke()
-
- def draw_rockets(self, cr):
- # BASE Constants
- X_BASE = -450+mToPx(2.41568)
- Y_BASE = -150+mToPx(4.129151)
-
- # Top Rocket
-
- # Leftmost Line
- cr.move_to(X_BASE + mToPx(2.89973), Y_BASE + mToPx(3.86305))
- cr.line_to(X_BASE + mToPx(3.15642), Y_BASE + mToPx(3.39548))
-
- # Top Line
- cr.move_to(X_BASE + mToPx(3.15642), Y_BASE + mToPx(3.39548))
- cr.line_to(X_BASE + mToPx(3.63473), Y_BASE + mToPx(3.39238))
-
- #Rightmost Line
- cr.move_to(X_BASE + mToPx(3.63473), Y_BASE + mToPx(3.39238))
- cr.line_to(X_BASE + mToPx(3.89984), Y_BASE + mToPx(3.86305))
-
- #Back Line
- cr.move_to(X_BASE + mToPx(2.89973), Y_BASE + mToPx(3.86305))
- cr.line_to(X_BASE + mToPx(3.89984), Y_BASE + mToPx(3.86305))
-
- # Bottom Rocket
-
- # Leftmost Line
- cr.move_to(X_BASE + mToPx(2.89973), Y_BASE - mToPx(3.86305))
- cr.line_to(X_BASE + mToPx(3.15642), Y_BASE - mToPx(3.39548))
-
- # Top Line
- cr.move_to(X_BASE + mToPx(3.15642), Y_BASE - mToPx(3.39548))
- cr.line_to(X_BASE + mToPx(3.63473), Y_BASE - mToPx(3.39238))
-
- #Rightmost Line
- cr.move_to(X_BASE + mToPx(3.63473), Y_BASE - mToPx(3.39238))
- cr.line_to(X_BASE + mToPx(3.89984), Y_BASE - mToPx(3.86305))
-
- #Back Line
- cr.move_to(X_BASE + mToPx(2.89973), Y_BASE - mToPx(3.86305))
- cr.line_to(X_BASE + mToPx(3.89984), Y_BASE - mToPx(3.86305))
-
- cr.stroke()
-
- def draw_cargo_ship(self, cr):
- # BASE Constants
- X_BASE = -450+mToPx(2.41568)
- Y_BASE = -150+mToPx(4.129151)
-
- cr.rectangle(X_BASE + mToPx(3.15912), Y_BASE - mToPx(0.72326),
- mToPx(2.43205), mToPx(1.41605))
-
- cr.stroke()
-
def draw_field_elements(self, cr):
- self.draw_HAB(cr)
- self.draw_rockets(cr)
- self.draw_cargo_ship(cr)
+ draw_HAB(cr)
+ draw_rockets(cr)
+ draw_cargo_ship(cr)
- def handle_draw(self, cr):
- # print(self.new_point)
- # print("SELF.POINT_SELECTED: " + str(self.point_selected))
+ def draw_robot_at_point(self, cr, i, p, spline):
+ p1 = [mToPx(spline.Point(i)[0]), mToPx(spline.Point(i)[1])]
+ p2 = [mToPx(spline.Point(i + p)[0]), mToPx(spline.Point(i + p)[1])]
- # begin drawing
+ #Calculate Robot
+ distance = np.sqrt((p2[1] - p1[1])**2 + (p2[0] - p1[0])**2)
+ x_difference_o = p2[0] - p1[0]
+ y_difference_o = p2[1] - p1[1]
+ x_difference = x_difference_o * mToPx(LENGTH_OF_ROBOT / 2) / distance
+ y_difference = y_difference_o * mToPx(LENGTH_OF_ROBOT / 2) / distance
+
+ front_middle = []
+ front_middle.append(p1[0] + x_difference)
+ front_middle.append(p1[1] + y_difference)
+
+ back_middle = []
+ back_middle.append(p1[0] - x_difference)
+ back_middle.append(p1[1] - y_difference)
+
+ slope = [-(1 / x_difference_o) / (1 / y_difference_o)]
+ angle = np.arctan(slope)
+
+ x_difference = np.sin(angle[0]) * mToPx(WIDTH_OF_ROBOT / 2)
+ y_difference = np.cos(angle[0]) * mToPx(WIDTH_OF_ROBOT / 2)
+
+ front_1 = []
+ front_1.append(front_middle[0] - x_difference)
+ front_1.append(front_middle[1] - y_difference)
+
+ front_2 = []
+ front_2.append(front_middle[0] + x_difference)
+ front_2.append(front_middle[1] + y_difference)
+
+ back_1 = []
+ back_1.append(back_middle[0] - x_difference)
+ back_1.append(back_middle[1] - y_difference)
+
+ back_2 = []
+ back_2.append(back_middle[0] + x_difference)
+ back_2.append(back_middle[1] + y_difference)
+
+ x_difference = x_difference_o * mToPx(
+ LENGTH_OF_ROBOT / 2 + ROBOT_SIDE_TO_BALL_CENTER) / distance
+ y_difference = y_difference_o * mToPx(
+ LENGTH_OF_ROBOT / 2 + ROBOT_SIDE_TO_BALL_CENTER) / distance
+
+ #Calculate Ball
+ ball_center = []
+ ball_center.append(p1[0] + x_difference)
+ ball_center.append(p1[1] + y_difference)
+
+ x_difference = x_difference_o * mToPx(
+ LENGTH_OF_ROBOT / 2 + ROBOT_SIDE_TO_HATCH_PANEL) / distance
+ y_difference = y_difference_o * mToPx(
+ LENGTH_OF_ROBOT / 2 + ROBOT_SIDE_TO_HATCH_PANEL) / distance
+
+ #Calculate Panel
+ panel_center = []
+ panel_center.append(p1[0] + x_difference)
+ panel_center.append(p1[1] + y_difference)
+
+ x_difference = np.sin(angle[0]) * mToPx(HATCH_PANEL_WIDTH / 2)
+ y_difference = np.cos(angle[0]) * mToPx(HATCH_PANEL_WIDTH / 2)
+
+ panel_1 = []
+ panel_1.append(panel_center[0] + x_difference)
+ panel_1.append(panel_center[1] + y_difference)
+
+ panel_2 = []
+ panel_2.append(panel_center[0] - x_difference)
+ panel_2.append(panel_center[1] - y_difference)
+
+ #Draw Robot
+ cr.move_to(front_1[0], front_1[1])
+ cr.line_to(back_1[0], back_1[1])
+ cr.line_to(back_2[0], back_2[1])
+ cr.line_to(front_2[0], front_2[1])
+ cr.line_to(front_1[0], front_1[1])
+
+ cr.stroke()
+
+ #Draw Ball
+ set_color(cr, palette["ORANGE"], 0.5)
+ cr.move_to(back_middle[0], back_middle[1])
+ cr.line_to(ball_center[0], ball_center[1])
+ cr.arc(ball_center[0], ball_center[1], mToPx(BALL_RADIUS), 0,
+ 2 * np.pi)
+ cr.stroke()
+
+ #Draw Panel
+ set_color(cr, palette["YELLOW"], 0.5)
+ cr.move_to(panel_1[0], panel_1[1])
+ cr.line_to(panel_2[0], panel_2[1])
+
+ cr.stroke()
+ cr.set_source_rgba(0, 0, 0, 1)
+
+ def handle_draw(self, cr): # main
# Fill the background color of the window with grey
- set_color(cr, palette["GREY"])
+ set_color(cr, palette["WHITE"])
cr.paint()
- #Scale the field to fit within drawing area
- cr.scale(0.5, 0.5)
# Draw a extents rectangle
set_color(cr, palette["WHITE"])
@@ -378,209 +206,181 @@
self.extents_y_max - self.extents_y_min)
cr.fill()
- #Drawing the field
+ #Drawing the switch and scale in the field
cr.move_to(0, 50)
cr.show_text('Press "e" to export')
cr.show_text('Press "i" to import')
- set_color(cr, Color(0.3, 0.3, 0.3))
- cr.rectangle(-450, -150, 300, 300)
+ set_color(cr, palette["WHITE"])
+ cr.rectangle(0, -mToPx(8.2296 / 2.0), SCREEN_SIZE, SCREEN_SIZE)
cr.fill()
set_color(cr, palette["BLACK"])
- cr.rectangle(-450, -150, 300, 300)
+ cr.rectangle(0, -mToPx(8.2296 / 2.0), SCREEN_SIZE, SCREEN_SIZE)
cr.set_line_join(cairo.LINE_JOIN_ROUND)
cr.stroke()
-
self.draw_field_elements(cr)
-
y = 0
- # update all the things
+ # update everything
- if self.mode == Mode.kViewing:
+ if self.mode == Mode.kPlacing or self.mode == Mode.kViewing:
set_color(cr, palette["BLACK"])
- cr.move_to(-300, 170)
- cr.show_text("VIEWING")
- set_color(cr, palette["GREY"])
-
- if len(self.selected_points) > 0:
- print("SELECTED_POINTS: " + str(len(self.selected_points)))
- print("ITEMS:")
- # for item in self.selected_points:
- # print(str(item))
- for i, point in enumerate(self.selected_points):
- # print("I: " + str(i))
- draw_px_x(cr, point[0], point[1], 10)
- cr.move_to(point[0], point[1] - 15)
- display_text(cr, str(i), 0.5, 0.5, 2, 2)
-
- elif self.mode == Mode.kPlacing:
- set_color(cr, palette["BLACK"])
- cr.move_to(-300, 170)
- display_text(cr, "ADD", 1, 1, 1, 1)
- set_color(cr, palette["GREY"])
-
- if len(self.selected_points) > 0:
- print("SELECTED_POINTS: " + str(len(self.selected_points)))
- print("ITEMS:")
- for item in self.selected_points:
- print(str(item))
- for i, point in enumerate(self.selected_points):
- print("I: " + str(i))
+ cr.move_to(-SCREEN_SIZE, 170)
+ plotPoints = self.points.getPoints()
+ if plotPoints:
+ for i, point in enumerate(plotPoints):
draw_px_x(cr, mToPx(point[0]), mToPx(point[1]), 10)
cr.move_to(mToPx(point[0]), mToPx(point[1]) - 15)
display_text(cr, str(i), 0.5, 0.5, 2, 2)
+ set_color(cr, palette["WHITE"])
elif self.mode == Mode.kEditing:
set_color(cr, palette["BLACK"])
- cr.move_to(-300, 170)
+ cr.move_to(-SCREEN_SIZE, 170)
display_text(cr, "EDITING", 1, 1, 1, 1)
- if len(self.splines) > 0:
- holder_spline = []
- for i, points in enumerate(self.splines):
- array = np.zeros(shape=(6, 2), dtype=float)
- for j, point in enumerate(points):
- array[j, 0] = mToPx(point[0])
- array[j, 1] = mToPx(point[1])
- spline = Spline(np.ascontiguousarray(np.transpose(array)))
- for k in np.linspace(0.01, 1, 100):
- cr.move_to(
- spline.Point(k - 0.01)[0],
- spline.Point(k - 0.01)[1])
- cr.line_to(spline.Point(k)[0], spline.Point(k)[1])
- cr.stroke()
- holding = [
- spline.Point(k - 0.01)[0],
- spline.Point(k - 0.01)[1]
- ]
- holder_spline.append(holding)
- self.curves.append(holder_spline)
+ if self.points.getSplines():
+ self.draw_splines(cr)
+ for i, points in enumerate(self.points.getSplines()):
- for spline, points in enumerate(self.splines):
- # for item in points:
- # print(str(item))
- for i, point in enumerate(points):
- # print("I: " + str(i))
- if spline == self.spline_edit and i == self.index_of_edit:
- draw_px_x(cr, mToPx(point[0]), mToPx(point[1]), 15,
- self.colors[spline])
- elif (spline == 0 and not i == 5) or (not i == 0
- and not i == 5):
- draw_px_x(cr, mToPx(point[0]), mToPx(point[1]), 10,
- self.colors[spline])
- cr.move_to(mToPx(point[0]), mToPx(point[1]) - 15)
- display_text(cr, str(i), 0.5, 0.5, 2, 2)
+ p0 = np.array([mToPx(points[0][0]), mToPx(points[0][1])])
+ p1 = np.array([mToPx(points[1][0]), mToPx(points[1][1])])
+ p2 = np.array([mToPx(points[2][0]), mToPx(points[2][1])])
+ p3 = np.array([mToPx(points[3][0]), mToPx(points[3][1])])
+ p4 = np.array([mToPx(points[4][0]), mToPx(points[4][1])])
+ p5 = np.array([mToPx(points[5][0]), mToPx(points[5][1])])
- elif self.mode == Mode.kConstraint:
- print("Drawn")
- set_color(cr, palette["BLACK"])
- cr.move_to(-300, 170)
- display_text(cr, "Adding Constraint", 1, 1, 1, 1)
- if len(self.splines) > 0:
- # print("Splines: " + str(len(self.splines)))
- # print("ITEMS:")
- for s, points in enumerate(self.splines):
- # for item in points:
- # print(str(item))
- for i, point in enumerate(points):
- # print("I: " + str(i))
- draw_px_x(cr, point[0], point[1], 10, self.colors[s])
- cr.move_to(point[0], point[1] - 15)
- display_text(cr, str(i), 0.5, 0.5, 2, 2)
+ draw_control_points(cr, [p0, p1, p2, p3, p4, p5])
+ first_tangent = p0 + 2.0 * (p1 - p0)
+ second_tangent = p5 + 2.0 * (p4 - p5)
+ cr.set_source_rgb(0, 0.5, 0)
+ cr.move_to(p0[0], p0[1])
+ cr.set_line_width(1.0)
+ cr.line_to(first_tangent[0], first_tangent[1])
+ cr.move_to(first_tangent[0], first_tangent[1])
+ cr.line_to(p2[0], p2[1])
- cr.paint_with_alpha(.65)
+ cr.move_to(p5[0], p5[1])
+ cr.line_to(second_tangent[0], second_tangent[1])
+ cr.move_to(second_tangent[0], second_tangent[1])
+ cr.line_to(p3[0], p3[1])
+
+ cr.stroke()
+ cr.set_line_width(2.0)
+ self.points.update_lib_spline()
+ set_color(cr, palette["WHITE"])
+
+ cr.paint_with_alpha(0.2)
+
+ mygraph = Graph(cr, self.points)
draw_px_cross(cr, self.x, self.y, 10)
- def do_key_press(self, event):
+ def draw_splines(self, cr):
+ holder_spline = []
+ for i, points in enumerate(self.points.getSplines()):
+ array = np.zeros(shape=(6, 2), dtype=float)
+ for j, point in enumerate(points):
+ array[j, 0] = point[0]
+ array[j, 1] = point[1]
+ spline = Spline(np.ascontiguousarray(np.transpose(array)))
+ for k in np.linspace(0.01, 1, 100):
+ cr.move_to(mToPx(spline.Point(k - 0.01)[0]),
+ mToPx(spline.Point(k - 0.01)[1]))
+ cr.line_to(mToPx(spline.Point(k)[0]),
+ mToPx(spline.Point(k)[1]))
+ cr.stroke()
+ holding = [
+ spline.Point(k - 0.01)[0],
+ spline.Point(k - 0.01)[1]
+ ]
+ holder_spline.append(holding)
+ if i == 0:
+ self.draw_robot_at_point(cr, 0.00, 0.01, spline)
+ self.draw_robot_at_point(cr, 1, 0.01, spline)
+ self.curves.append(holder_spline)
+
+ def mouse_move(self, event):
+ old_x = self.x
+ old_y = self.y
+ self.x = event.x
+ self.y = event.y
+ dif_x = event.x - old_x
+ dif_y = event.y - old_y
+ difs = np.array([pxToM(dif_x), pxToM(dif_y)])
+
+ if self.mode == Mode.kEditing:
+ self.spline_edit = self.points.updates_for_mouse_move(
+ self.index_of_edit, self.spline_edit, self.x, self.y, difs)
+
+ def do_key_press(self, event, file_name):
keyval = Gdk.keyval_to_lower(event.keyval)
- # print("Gdk.KEY_" + Gdk.keyval_name(keyval))
+ module_path = os.path.dirname(os.path.realpath(sys.argv[0]))
+ self.path_to_export = os.path.join(module_path,
+ "spline_jsons/" + file_name)
if keyval == Gdk.KEY_q:
print("Found q key and exiting.")
quit_main_loop()
- if keyval == Gdk.KEY_e:
- # Will export to json file
- self.mode = Mode.kEditing
- print(str(sys.argv))
- print('out to: ', self.path_to_export)
- exportList = [l.tolist() for l in self.splines]
- with open(self.path_to_export, mode='w') as points_file:
- json.dump(exportList, points_file)
- print("Wrote: " + str(self.splines))
- if keyval == Gdk.KEY_i:
- # import from json file
- self.mode = Mode.kEditing
- self.selected_points = []
- self.splines = []
- with open(self.path_to_export) as points_file:
- self.splines = json.load(points_file)
- print("Added: " + str(self.splines))
+ file_name_end = file_name[-5:]
+ if file_name_end != ".json":
+ print("Error: Filename doesn't end in .json")
+ else:
+ if keyval == Gdk.KEY_e:
+ # Will export to json file
+ self.mode = Mode.kEditing
+ print('out to: ', self.path_to_export)
+ exportList = [l.tolist() for l in self.points.getSplines()]
+ with open(self.path_to_export, mode='w') as points_file:
+ json.dump(exportList, points_file)
+
+ if keyval == Gdk.KEY_i:
+ # import from json file
+ self.mode = Mode.kEditing
+ self.points.resetPoints()
+ self.points.resetSplines()
+ with open(self.path_to_export) as points_file:
+ self.points.setUpSplines(json.load(points_file))
+
+ self.points.update_lib_spline()
+
if keyval == Gdk.KEY_p:
self.mode = Mode.kPlacing
# F0 = A1
# B1 = 2F0 - E0
# C1= d0 + 4F0 - 4E0
- spline_index = len(self.splines) - 1
- self.selected_points = []
- f = self.splines[spline_index][5]
- e = self.splines[spline_index][4]
- d = self.splines[spline_index][3]
- self.selected_points.append(f)
- self.selected_points.append(f * 2 + e * -1)
- self.selected_points.append(d + f * 4 + e * -4)
-
- if keyval == Gdk.KEY_c:
- self.mode = Mode.kConstraint
+ spline_index = len(self.points.getSplines()) - 1
+ self.points.resetPoints()
+ self.points.extrapolate(
+ self.points.getSplines()[len(self.points.getSplines()) - 1][5],
+ self.points.getSplines()[len(self.points.getSplines()) - 1][4],
+ self.points.getSplines()[len(self.points.getSplines()) - 1][3])
def button_press_action(self):
if self.mode == Mode.kPlacing:
- #Check that the point clicked is on the field
- if (self.x < -150 and self.x > -450 and self.y < 150
- and self.y > -150):
- self.add_point(self.x, self.y)
+ if self.points.add_point(self.x, self.y):
+ self.mode = Mode.kEditing
elif self.mode == Mode.kEditing:
# Now after index_of_edit is not -1, the point is selected, so
# user can click for new point
if self.index_of_edit > -1 and self.held_x != self.x:
- print("INDEX OF EDIT: " + str(self.index_of_edit))
- self.splines[self.spline_edit][self.index_of_edit] = [
- pxToM(self.x), pxToM(self.y)
- ]
+ self.points.setSplines(self.spline_edit, self.index_of_edit,
+ pxToM(self.x), pxToM(self.y))
- if not self.spline_edit == len(self.splines) - 1:
- spline_edit = self.spline_edit + 1
- f = self.splines[self.spline_edit][5]
- e = self.splines[self.spline_edit][4]
- d = self.splines[self.spline_edit][3]
- self.splines[spline_edit][0] = f
- self.splines[spline_edit][1] = f * 2 + e * -1
- self.splines[spline_edit][2] = d + f * 4 + e * -4
-
- if not self.spline_edit == 0:
- spline_edit = self.spline_edit - 1
- a = self.splines[self.spline_edit][0]
- b = self.splines[self.spline_edit][1]
- c = self.splines[self.spline_edit][2]
- self.splines[spline_edit][5] = a
- self.splines[spline_edit][4] = a * 2 + b * -1
- self.splines[spline_edit][3] = c + a * 4 + b * -4
+ self.spline_edit = self.points.splineExtrapolate(
+ self.spline_edit)
self.index_of_edit = -1
self.spline_edit = -1
else:
- print("mode == 2")
# Get clicked point
# Find nearest
# Move nearest to clicked
cur_p = [pxToM(self.x), pxToM(self.y)]
- print("CUR_P: " + str(self.x) + " " + str(self.y))
# Get the distance between each for x and y
# Save the index of the point closest
nearest = 1 # Max distance away a the selected point can be in meters
index_of_closest = 0
- for index_splines, points in enumerate(self.splines):
+ for index_splines, points in enumerate(self.points.getSplines()):
for index_points, val in enumerate(points):
- # pythagorean
distance = np.sqrt((cur_p[0] - val[0])**2 +
(cur_p[1] - val[1])**2)
if distance < nearest:
@@ -591,10 +391,6 @@
self.index_of_edit = index_of_closest
self.spline_edit = index_splines
self.held_x = self.x
- elif self.mode == Mode.kConstraint:
- print("RAN")
- self.set_index_to_nearest_spline_point()
- print("FINISHED")
def do_button_press(self, event):
print("button press activated")
@@ -612,13 +408,23 @@
print("Method_connect ran")
self.connect(event, handler)
+ def mouse_move(self, event):
+ #Changes event.x and event.y to be relative to the center.
+ x = event.x - self.drawing_area.window_shape[0] / 2
+ y = self.drawing_area.window_shape[1] / 2 - event.y
+ scale = self.drawing_area.get_current_scale()
+ event.x = x / scale + self.drawing_area.center[0]
+ event.y = y / scale + self.drawing_area.center[1]
+ self.drawing_area.mouse_move(event)
+ self.queue_draw()
+
def button_press(self, event):
print("button press activated")
o_x = event.x
o_y = event.y
x = event.x - self.drawing_area.window_shape[0] / 2
y = self.drawing_area.window_shape[1] / 2 - event.y
- scale = self.drawing_area.get_current_scale()
+ scale = 2 * self.drawing_area.get_current_scale()
event.x = x / scale + self.drawing_area.center[0]
event.y = y / scale + self.drawing_area.center[1]
self.drawing_area.do_button_press(event)
@@ -627,21 +433,17 @@
def key_press(self, event):
print("key press activated")
- self.drawing_area.do_key_press(event)
+ self.drawing_area.do_key_press(event, self.file_name_box.get_text())
self.queue_draw()
def configure(self, event):
print("configure activated")
self.drawing_area.window_shape = (event.width, event.height)
- def on_submit_click(self, widget):
- self.drawing_area.inConstraint = int(self.constraint_box.get_text())
- self.drawing_area.inValue = int(self.value_box.get_text())
-
def __init__(self):
Gtk.Window.__init__(self)
- self.set_default_size(1366, 738)
+ self.set_default_size(1.5 * SCREEN_SIZE, SCREEN_SIZE)
flowBox = Gtk.FlowBox()
flowBox.set_valign(Gtk.Align.START)
@@ -663,41 +465,25 @@
| Gdk.EventMask.SCROLL_MASK
| Gdk.EventMask.KEY_PRESS_MASK)
+ #add the graph box
self.drawing_area = GTK_Widget()
self.eventBox.add(self.drawing_area)
self.method_connect("key-release-event", self.key_press)
self.method_connect("button-release-event", self.button_press)
self.method_connect("configure-event", self.configure)
+ self.method_connect("motion_notify_event", self.mouse_move)
- # Constraint Boxes
+ self.file_name_box = Gtk.Entry()
+ self.file_name_box.set_size_request(200, 40)
- self.start_box = Gtk.Entry()
- self.start_box.set_size_request(100, 20)
+ self.file_name_box.set_text("File")
+ self.file_name_box.set_editable(True)
- self.constraint_box = Gtk.Entry()
- self.constraint_box.set_size_request(100, 20)
-
- self.constraint_box.set_text("Constraint")
- self.constraint_box.set_editable(True)
-
- container.put(self.constraint_box, 700, 0)
-
- self.value_box = Gtk.Entry()
- self.value_box.set_size_request(100, 20)
-
- self.value_box.set_text("Value")
- self.value_box.set_editable(True)
-
- container.put(self.value_box, 700, 40)
-
- self.submit_button = Gtk.Button("Submit")
- self.submit_button.connect('clicked', self.on_submit_click)
-
- container.put(self.submit_button, 880, 0)
+ container.put(self.file_name_box, 0, 0)
self.show_all()
window = GridWindow()
-basic_window.RunApp()
+RunApp()
diff --git a/frc971/control_loops/python/points.py b/frc971/control_loops/python/points.py
new file mode 100644
index 0000000..8f94e3f
--- /dev/null
+++ b/frc971/control_loops/python/points.py
@@ -0,0 +1,110 @@
+from constants import *
+import numpy as np
+from libspline import Spline, DistanceSpline, Trajectory
+
+
+class Points():
+ def __init__(self):
+ self.points = [] # Holds all points not yet in spline
+ self.libsplines = [] # Formatted for libspline library usage
+ self.splines = [] # Formatted for drawing
+
+ def getPoints(self):
+ return self.points
+
+ def resetPoints(self):
+ self.points = []
+
+ def getLibsplines(self):
+ return self.libsplines
+
+ def splineExtrapolate(self, o_spline_edit):
+ spline_edit = o_spline_edit
+ if not spline_edit == len(self.splines) - 1:
+ spline_edit = spline_edit + 1
+ f = self.splines[spline_edit][5]
+ e = self.splines[spline_edit][4]
+ d = self.splines[spline_edit][3]
+ self.splines[spline_edit][0] = f
+ self.splines[spline_edit][1] = f * 2 + e * -1
+ self.splines[spline_edit][2] = d + f * 4 + e * -4
+
+ if not spline_edit == 0:
+ spline_edit = spline_edit - 1
+ a = self.splines[spline_edit][0]
+ b = self.splines[spline_edit][1]
+ c = self.splines[spline_edit][2]
+ self.splines[spline_edit][5] = a
+ self.splines[spline_edit][4] = a * 2 + b * -1
+ self.splines[spline_edit][3] = c + a * 4 + b * -4
+
+ return spline_edit
+
+ def updates_for_mouse_move(self, index_of_edit, spline_edit, x, y, difs):
+ if index_of_edit > -1:
+ self.splines[spline_edit][index_of_edit] = [pxToM(x), pxToM(y)]
+
+ if index_of_edit == 5:
+ self.splines[spline_edit][
+ index_of_edit -
+ 2] = self.splines[spline_edit][index_of_edit - 2] + difs
+ self.splines[spline_edit][
+ index_of_edit -
+ 1] = self.splines[spline_edit][index_of_edit - 1] + difs
+
+ if index_of_edit == 0:
+ self.splines[spline_edit][
+ index_of_edit +
+ 2] = self.splines[spline_edit][index_of_edit + 2] + difs
+ self.splines[spline_edit][
+ index_of_edit +
+ 1] = self.splines[spline_edit][index_of_edit + 1] + difs
+
+ if index_of_edit == 4:
+ self.splines[spline_edit][
+ index_of_edit -
+ 1] = self.splines[spline_edit][index_of_edit - 1] + difs
+
+ if index_of_edit == 1:
+ self.splines[spline_edit][
+ index_of_edit +
+ 1] = self.splines[spline_edit][index_of_edit + 1] + difs
+
+ return self.splineExtrapolate(spline_edit)
+
+ def update_lib_spline(self):
+ self.libsplines = []
+ array = np.zeros(shape=(6, 2), dtype=float)
+ for points in self.splines:
+ for j, point in enumerate(points):
+ array[j, 0] = point[0]
+ array[j, 1] = point[1]
+ spline = Spline(np.ascontiguousarray(np.transpose(array)))
+ self.libsplines.append(spline)
+
+ def getSplines(self):
+ return self.splines
+
+ def resetSplines(self):
+ self.splines = []
+
+ def setUpSplines(self, newSplines):
+ self.splines = newSplines
+
+ def setSplines(self, spline_edit, index_of_edit, x, y):
+ self.splines[spline_edit][index_of_edit] = [x, y]
+
+ def add_point(self, x, y):
+ if (len(self.points) < 6):
+ self.points.append([pxToM(x), pxToM(y)])
+ if (len(self.points) == 6):
+ self.splines.append(np.array(self.points))
+ self.points = []
+ self.update_lib_spline()
+ return True
+
+ def extrapolate(self, point1, point2,
+ point3): # where point3 is 3rd to last point
+ self.points.append(point1)
+ self.points.append(point1 * 2 - point2)
+ self.points.append(point3 + point1 * 4 - point2 * 4)
diff --git a/frc971/control_loops/python/spline.py b/frc971/control_loops/python/spline.py
index e69d874..b428c9a 100644
--- a/frc971/control_loops/python/spline.py
+++ b/frc971/control_loops/python/spline.py
@@ -15,7 +15,6 @@
from frc971.control_loops.python import drivetrain
from frc971.control_loops.python import controls
import y2016.control_loops.python.drivetrain
-
"""This file is my playground for implementing spline following.
All splines here are cubic bezier splines. See
@@ -73,8 +72,9 @@
if numpy.isscalar(alpha):
alpha = [alpha]
dalpha_matrix = [[
- -3.0 * (1.0 - a)**2.0, 3.0 * (1.0 - a)**2.0 + -2.0 * 3.0 *
- (1.0 - a) * a, -3.0 * a**2.0 + 2.0 * 3.0 * (1.0 - a) * a, 3.0 * a**2.0
+ -3.0 * (1.0 - a)**2.0,
+ 3.0 * (1.0 - a)**2.0 + -2.0 * 3.0 * (1.0 - a) * a,
+ -3.0 * a**2.0 + 2.0 * 3.0 * (1.0 - a) * a, 3.0 * a**2.0
] for a in alpha]
return control_points * numpy.matrix(dalpha_matrix).T
@@ -97,8 +97,7 @@
ddalpha_matrix = [[
2.0 * 3.0 * (1.0 - a),
-2.0 * 3.0 * (1.0 - a) + -2.0 * 3.0 * (1.0 - a) + 2.0 * 3.0 * a,
- -2.0 * 3.0 * a + 2.0 * 3.0 * (1.0 - a) - 2.0 * 3.0 * a,
- 2.0 * 3.0 * a
+ -2.0 * 3.0 * a + 2.0 * 3.0 * (1.0 - a) - 2.0 * 3.0 * a, 2.0 * 3.0 * a
] for a in alpha]
return control_points * numpy.matrix(ddalpha_matrix).T
@@ -119,10 +118,8 @@
if numpy.isscalar(alpha):
alpha = [alpha]
ddalpha_matrix = [[
- -2.0 * 3.0,
- 2.0 * 3.0 + 2.0 * 3.0 + 2.0 * 3.0,
- -2.0 * 3.0 - 2.0 * 3.0 - 2.0 * 3.0,
- 2.0 * 3.0
+ -2.0 * 3.0, 2.0 * 3.0 + 2.0 * 3.0 + 2.0 * 3.0,
+ -2.0 * 3.0 - 2.0 * 3.0 - 2.0 * 3.0, 2.0 * 3.0
] for a in alpha]
return control_points * numpy.matrix(ddalpha_matrix).T
@@ -143,7 +140,8 @@
dspline_points = dspline(alpha, control_points)
return numpy.arctan2(
- numpy.array(dspline_points)[1, :], numpy.array(dspline_points)[0, :])
+ numpy.array(dspline_points)[1, :],
+ numpy.array(dspline_points)[0, :])
def dspline_theta(alpha,
@@ -219,8 +217,8 @@
dddy = numpy.array(dddspline_points)[1, :]
return -1.0 / ((dx**2.0 + dy**2.0)**2.0) * (dx * ddy - dy * ddx) * 2.0 * (
- dy * ddy + dx * ddx) + 1.0 / (dx**2.0 + dy**2.0) * (dx * dddy - dy *
- dddx)
+ dy * ddy + dx * ddx) + 1.0 / (dx**2.0 + dy**2.0) * (dx * dddy -
+ dy * dddx)
class Path(object):
@@ -230,7 +228,8 @@
self._control_points = control_points
def spline_velocity(alpha):
- return numpy.linalg.norm(dspline(alpha, self._control_points), axis=0)
+ return numpy.linalg.norm(dspline(alpha, self._control_points),
+ axis=0)
self._point_distances = [0.0]
num_alpha = 100
@@ -240,8 +239,8 @@
# might think.
for alpha in numpy.linspace(0.0, 1.0, num_alpha)[1:]:
self._point_distances.append(
- scipy.integrate.fixed_quad(spline_velocity, alpha, alpha + 1.0
- / (num_alpha - 1.0))[0] +
+ scipy.integrate.fixed_quad(spline_velocity, alpha, alpha +
+ 1.0 / (num_alpha - 1.0))[0] +
self._point_distances[-1])
def distance_to_alpha(self, distance):
@@ -256,7 +255,8 @@
if numpy.isscalar(distance):
return numpy.array([self._distance_to_alpha_scalar(distance)])
else:
- return numpy.array([self._distance_to_alpha_scalar(d) for d in distance])
+ return numpy.array(
+ [self._distance_to_alpha_scalar(d) for d in distance])
def _distance_to_alpha_scalar(self, distance):
"""Helper to compute alpha for a distance for a single scalar."""
@@ -264,15 +264,17 @@
return 0.0
elif distance >= self.length():
return 1.0
- after_index = numpy.searchsorted(
- self._point_distances, distance, side='right')
+ after_index = numpy.searchsorted(self._point_distances,
+ distance,
+ side='right')
before_index = after_index - 1
# Linearly interpolate alpha from our (sorted) distance table.
return (distance - self._point_distances[before_index]) / (
self._point_distances[after_index] -
- self._point_distances[before_index]) * (1.0 / (
- len(self._point_distances) - 1.0)) + float(before_index) / (
+ self._point_distances[before_index]) * (
+ 1.0 /
+ (len(self._point_distances) - 1.0)) + float(before_index) / (
len(self._point_distances) - 1.0)
def length(self):
@@ -287,8 +289,8 @@
# TODO(austin): need a better name...
def dxy(self, distance):
"""Returns the xy velocity as a function of distance."""
- dspline_point = dspline(
- self.distance_to_alpha(distance), self._control_points)
+ dspline_point = dspline(self.distance_to_alpha(distance),
+ self._control_points)
return dspline_point / numpy.linalg.norm(dspline_point, axis=0)
# TODO(austin): need a better name...
@@ -298,8 +300,7 @@
dspline_points = dspline(alpha, self._control_points)
ddspline_points = ddspline(alpha, self._control_points)
- norm = numpy.linalg.norm(
- dspline_points, axis=0)**2.0
+ norm = numpy.linalg.norm(dspline_points, axis=0)**2.0
return ddspline_points / norm - numpy.multiply(
dspline_points, (numpy.array(dspline_points)[0, :] *
@@ -309,10 +310,9 @@
def theta(self, distance, dspline_points=None):
"""Returns the heading as a function of distance."""
- return spline_theta(
- self.distance_to_alpha(distance),
- self._control_points,
- dspline_points=dspline_points)
+ return spline_theta(self.distance_to_alpha(distance),
+ self._control_points,
+ dspline_points=dspline_points)
def dtheta(self, distance, dspline_points=None, ddspline_points=None):
"""Returns the angular velocity as a function of distance."""
@@ -327,9 +327,14 @@
return dtheta_points / numpy.linalg.norm(dspline_points, axis=0)
- def dtheta_dt(self, distance, velocity, dspline_points=None, ddspline_points=None):
+ def dtheta_dt(self,
+ distance,
+ velocity,
+ dspline_points=None,
+ ddspline_points=None):
"""Returns the angular velocity as a function of time."""
- return self.dtheta(distance, dspline_points, ddspline_points) * velocity
+ return self.dtheta(distance, dspline_points,
+ ddspline_points) * velocity
def ddtheta(self,
distance,
@@ -400,8 +405,8 @@
return 0.0
return (f(t, y) - a0) / y
- return (RungeKutta(integrablef, v, x, dx) - v
- ) + numpy.sqrt(2.0 * a0 * dx + v * v)
+ return (RungeKutta(integrablef, v, x, dx) - v) + numpy.sqrt(2.0 * a0 * dx +
+ v * v)
class Trajectory(object):
@@ -409,8 +414,8 @@
distance_count):
self._path = path
self._drivetrain = drivetrain
- self.distances = numpy.linspace(0.0,
- self._path.length(), distance_count)
+ self.distances = numpy.linspace(0.0, self._path.length(),
+ distance_count)
self._longitudal_accel = longitudal_accel
self._lateral_accel = lateral_accel
@@ -460,9 +465,10 @@
C = K3 * v * v + K4 * v
# Note: K345 are not quite constant over the step, but we are going
# to assume they are for now.
- accelerations = [(12.0 - C[0, 0]) / K5[0, 0], (12.0 - C[1, 0]) /
- K5[1, 0], (-12.0 - C[0, 0]) / K5[0, 0],
- (-12.0 - C[1, 0]) / K5[1, 0]]
+ accelerations = [
+ (12.0 - C[0, 0]) / K5[0, 0], (12.0 - C[1, 0]) / K5[1, 0],
+ (-12.0 - C[0, 0]) / K5[0, 0], (-12.0 - C[1, 0]) / K5[1, 0]
+ ]
maxa = -float('inf')
for a in accelerations:
U = K5 * a + K3 * v * v + K4 * v
@@ -473,8 +479,8 @@
# Constrain the longitudinal acceleration to keep in a pseudo friction
# circle. This will make it so we don't floor it while in a turn and
# cause extra wheel slip.
- long_accel = numpy.sqrt(1.0 - (lateral_accel / self._lateral_accel)**
- 2.0) * self._longitudal_accel
+ long_accel = numpy.sqrt(1.0 - (
+ lateral_accel / self._lateral_accel)**2.0) * self._longitudal_accel
return min(long_accel, maxa)
def forward_pass(self, plan):
@@ -503,9 +509,10 @@
C = K3 * v * v + K4 * v
# Note: K345 are not quite constant over the step, but we are going
# to assume they are for now.
- accelerations = [(12.0 - C[0, 0]) / K5[0, 0], (12.0 - C[1, 0]) /
- K5[1, 0], (-12.0 - C[0, 0]) / K5[0, 0],
- (-12.0 - C[1, 0]) / K5[1, 0]]
+ accelerations = [
+ (12.0 - C[0, 0]) / K5[0, 0], (12.0 - C[1, 0]) / K5[1, 0],
+ (-12.0 - C[0, 0]) / K5[0, 0], (-12.0 - C[1, 0]) / K5[1, 0]
+ ]
mina = float('inf')
for a in accelerations:
U = K5 * a + K3 * v * v + K4 * v
@@ -516,8 +523,8 @@
# Constrain the longitudinal acceleration to keep in a pseudo friction
# circle. This will make it so we don't floor it while in a turn and
# cause extra wheel slip.
- long_accel = -numpy.sqrt(1.0 - (lateral_accel / self._lateral_accel)**
- 2.0) * self._longitudal_accel
+ long_accel = -numpy.sqrt(1.0 - (
+ lateral_accel / self._lateral_accel)**2.0) * self._longitudal_accel
return max(long_accel, mina)
def backward_pass(self, plan):
@@ -546,8 +553,9 @@
if distance > self.distances[-1]:
distance = self.distances[-1]
else:
- after_index = numpy.searchsorted(
- self.distances, distance, side='right')
+ after_index = numpy.searchsorted(self.distances,
+ distance,
+ side='right')
before_index = after_index - 1
vforwards = integrate_accel_for_distance(
@@ -586,14 +594,15 @@
return U
def goal_state(self, distance, velocity):
- width = (
- self._drivetrain.robot_radius_l + self._drivetrain.robot_radius_r)
+ width = (self._drivetrain.robot_radius_l +
+ self._drivetrain.robot_radius_r)
goal = numpy.matrix(numpy.zeros((5, 1)))
goal[0:2, :] = self._path.xy(distance)
goal[2, :] = self._path.theta(distance)
- Ter = numpy.linalg.inv(numpy.matrix([[0.5, 0.5], [-1.0 / width, 1.0 / width]]))
+ Ter = numpy.linalg.inv(
+ numpy.matrix([[0.5, 0.5], [-1.0 / width, 1.0 / width]]))
goal[3:5, :] = Ter * numpy.matrix(
[[velocity], [self._path.dtheta_dt(distance, velocity)]])
return goal
@@ -618,25 +627,23 @@
# Compute theta and the two derivatives
theta = spline_theta(alphas, control_points, dspline_points=dspline_points)
- dtheta = dspline_theta(
- alphas, control_points, dspline_points=dspline_points)
- ddtheta = ddspline_theta(
- alphas,
- control_points,
- dspline_points=dspline_points,
- dddspline_points=dddspline_points)
+ dtheta = dspline_theta(alphas,
+ control_points,
+ dspline_points=dspline_points)
+ ddtheta = ddspline_theta(alphas,
+ control_points,
+ dspline_points=dspline_points,
+ dddspline_points=dddspline_points)
# Plot the control points and the spline.
pylab.figure()
- pylab.plot(
- numpy.array(control_points)[0, :],
- numpy.array(control_points)[1, :],
- '-o',
- label='control')
- pylab.plot(
- numpy.array(spline_points)[0, :],
- numpy.array(spline_points)[1, :],
- label='spline')
+ pylab.plot(numpy.array(control_points)[0, :],
+ numpy.array(control_points)[1, :],
+ '-o',
+ label='control')
+ pylab.plot(numpy.array(spline_points)[0, :],
+ numpy.array(spline_points)[1, :],
+ label='spline')
pylab.legend()
# For grins, confirm that the double integral of the acceleration (with
@@ -657,9 +664,9 @@
# Integrate up the spline velocity and heading to confirm that given a
# velocity (as a function of the spline parameter) and angle, we will move
# from the starting point to the ending point.
- thetaint_plot = numpy.zeros((len(alphas),))
+ thetaint_plot = numpy.zeros((len(alphas), ))
thetaint = theta[0]
- dthetaint_plot = numpy.zeros((len(alphas),))
+ dthetaint_plot = numpy.zeros((len(alphas), ))
dthetaint = dtheta[0]
thetaint_plot[0] = thetaint
dthetaint_plot[0] = dthetaint
@@ -670,15 +677,14 @@
for i in range(len(alphas) - 1):
dalpha = alphas[i + 1] - alphas[i]
txint += dalpha * numpy.linalg.norm(
- dspline_points[:, i]) * numpy.matrix(
- [[numpy.cos(theta[i])], [numpy.sin(theta[i])]])
+ dspline_points[:, i]) * numpy.matrix([[numpy.cos(theta[i])],
+ [numpy.sin(theta[i])]])
txint_plot[:, i + 1] = txint
thetaint += dalpha * dtheta[i]
dthetaint += dalpha * ddtheta[i]
thetaint_plot[i + 1] = thetaint
dthetaint_plot[i + 1] = dthetaint
-
# Now plot x, dx/dalpha, ddx/ddalpha, dddx/dddalpha, and integrals thereof
# to perform consistency checks.
pylab.figure()
@@ -709,11 +715,9 @@
pylab.plot(alphas, ddtheta, label='ddtheta')
pylab.plot(alphas, thetaint_plot, label='thetai')
pylab.plot(alphas, dthetaint_plot, label='dthetai')
- pylab.plot(
- alphas,
- numpy.linalg.norm(
- numpy.array(dspline_points), axis=0),
- label='velocity')
+ pylab.plot(alphas,
+ numpy.linalg.norm(numpy.array(dspline_points), axis=0),
+ label='velocity')
# Now, repeat as a function of path length as opposed to alpha
path = Path(control_points)
@@ -780,12 +784,11 @@
longitudal_accel = 3.0
lateral_accel = 2.0
- trajectory = Trajectory(
- path,
- drivetrain=velocity_drivetrain,
- longitudal_accel=longitudal_accel,
- lateral_accel=lateral_accel,
- distance_count=500)
+ trajectory = Trajectory(path,
+ drivetrain=velocity_drivetrain,
+ longitudal_accel=longitudal_accel,
+ lateral_accel=lateral_accel,
+ distance_count=500)
vmax = numpy.inf
vmax = 10.0
@@ -827,14 +830,16 @@
while spline_state[0, 0] < path.length():
t += dt
+
def along_spline_diffeq(t, x):
_, v, a = trajectory.ff_accel(backward_accel_plan, x[0, 0])
return numpy.matrix([[x[1, 0]], [a]])
- spline_state = RungeKutta(along_spline_diffeq,
- spline_state.copy(), t, dt)
+ spline_state = RungeKutta(along_spline_diffeq, spline_state.copy(), t,
+ dt)
- d, v, a = trajectory.ff_accel(backward_accel_plan, length_plan_x[-1][0, 0])
+ d, v, a = trajectory.ff_accel(backward_accel_plan,
+ length_plan_x[-1][0, 0])
length_plan_v.append(v)
length_plan_a.append(a)
@@ -877,9 +882,10 @@
[path.dtheta_dt(xva_plan[0, i], xva_plan[1, i])[0]]])
vel_lr = numpy.linalg.inv(Ter) * vel_omega_r
- state_plan[:, i] = numpy.matrix(
- [[x_r], [y_r], [theta_r], [vel_lr[0, 0]], [vel_lr[1, 0]]])
- u_plan[:, i] = trajectory.ff_voltage(backward_accel_plan, xva_plan[0, i])
+ state_plan[:, i] = numpy.matrix([[x_r], [y_r], [theta_r],
+ [vel_lr[0, 0]], [vel_lr[1, 0]]])
+ u_plan[:, i] = trajectory.ff_voltage(backward_accel_plan, xva_plan[0,
+ i])
Q = numpy.matrix(
numpy.diag([
@@ -906,16 +912,17 @@
linear_velocity = -kMinVelocity
width = (velocity_drivetrain.robot_radius_l +
- velocity_drivetrain.robot_radius_r)
- A_linearized_continuous = numpy.matrix([[
- 0.0, 0.0, -sintheta * linear_velocity, 0.5 * costheta, 0.5 *
- costheta
- ], [
- 0.0, 0.0, costheta * linear_velocity, 0.5 * sintheta, 0.5 *
- sintheta
- ], [0.0, 0.0, 0.0, -1.0 / width, 1.0 / width],
- [0.0, 0.0, 0.0, 0.0, 0.0],
- [0.0, 0.0, 0.0, 0.0, 0.0]])
+ velocity_drivetrain.robot_radius_r)
+ A_linearized_continuous = numpy.matrix(
+ [[
+ 0.0, 0.0, -sintheta * linear_velocity, 0.5 * costheta,
+ 0.5 * costheta
+ ],
+ [
+ 0.0, 0.0, costheta * linear_velocity, 0.5 * sintheta,
+ 0.5 * sintheta
+ ], [0.0, 0.0, 0.0, -1.0 / width, 1.0 / width],
+ [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]])
A_linearized_continuous[3:5, 3:5] = velocity_drivetrain.A_continuous
B_linearized_continuous = numpy.matrix(numpy.zeros((5, 2)))
B_linearized_continuous[3:5, :] = velocity_drivetrain.B_continuous
@@ -942,15 +949,15 @@
velocity_drivetrain.robot_radius_r)
accel = (velocity_drivetrain.A_continuous * velocity +
velocity_drivetrain.B_continuous * U)
- return numpy.matrix(
- [[numpy.cos(theta) * linear_velocity],
- [numpy.sin(theta) * linear_velocity], [angular_velocity],
- [accel[0, 0]], [accel[1, 0]]])
+ return numpy.matrix([[numpy.cos(theta) * linear_velocity],
+ [numpy.sin(theta) * linear_velocity],
+ [angular_velocity], [accel[0, 0]],
+ [accel[1, 0]]])
full_us[:, i] = U
- state = RungeKutta(lambda t, x: spline_diffeq(U, t, x),
- state, i * dt, dt)
+ state = RungeKutta(lambda t, x: spline_diffeq(U, t, x), state, i * dt,
+ dt)
pylab.figure()
pylab.plot(length_plan_t, numpy.array(xva_plan)[0, :], label='x')
@@ -963,17 +970,14 @@
pylab.legend()
pylab.figure()
- pylab.plot(
- numpy.array(states)[0, :],
- numpy.array(states)[1, :],
- label='robot')
- pylab.plot(
- numpy.array(spline_points)[0, :],
- numpy.array(spline_points)[1, :],
- label='spline')
+ pylab.plot(numpy.array(states)[0, :],
+ numpy.array(states)[1, :],
+ label='robot')
+ pylab.plot(numpy.array(spline_points)[0, :],
+ numpy.array(spline_points)[1, :],
+ label='spline')
pylab.legend()
-
def a(_, x):
return 2.0
return 2.0 + 0.0001 * x
diff --git a/frc971/control_loops/python/spline_drawing.py b/frc971/control_loops/python/spline_drawing.py
new file mode 100644
index 0000000..4e8b04b
--- /dev/null
+++ b/frc971/control_loops/python/spline_drawing.py
@@ -0,0 +1,292 @@
+from color import Color, palette
+import cairo
+
+import numpy as np
+from basic_window import OverrideMatrix, identity, quit_main_loop, set_color
+WIDTH_OF_FIELD_IN_METERS = 8.258302
+PIXELS_ON_SCREEN = 800
+
+X_TRANSLATE = 0
+Y_TRANSLATE = 0
+
+
+def pxToM(p):
+ return p * WIDTH_OF_FIELD_IN_METERS / PIXELS_ON_SCREEN
+
+
+def mToPx(m):
+ return (m * PIXELS_ON_SCREEN / WIDTH_OF_FIELD_IN_METERS)
+
+
+def px(cr):
+ return OverrideMatrix(cr, identity)
+
+
+def draw_px_cross(cr, x, y, length_px, color=palette["RED"]):
+ """Draws a cross with fixed dimensions in pixel space."""
+ set_color(cr, color)
+ cr.move_to(x, y - length_px)
+ cr.line_to(x, y + length_px)
+ cr.stroke()
+
+ cr.move_to(x - length_px, y)
+ cr.line_to(x + length_px, y)
+ cr.stroke()
+ set_color(cr, palette["LIGHT_GREY"])
+
+
+def draw_px_x(cr, x, y, length_px1, color=palette["BLACK"]):
+ """Draws a x with fixed dimensions in pixel space."""
+ length_px = length_px1 / np.sqrt(2)
+ set_color(cr, color)
+ cr.move_to(x - length_px, y - length_px)
+ cr.line_to(x + length_px, y + length_px)
+ cr.stroke()
+
+ cr.move_to(x - length_px, y + length_px)
+ cr.line_to(x + length_px, y - length_px)
+ cr.stroke()
+ set_color(cr, palette["LIGHT_GREY"])
+
+
+def display_text(cr, text, widtha, heighta, widthb, heightb):
+ cr.scale(widtha, -heighta)
+ cr.show_text(text)
+ cr.scale(widthb, -heightb)
+
+
+def redraw(needs_redraw, window):
+ print("Redrew")
+ if not needs_redraw:
+ window.queue_draw()
+
+
+def draw_HAB(cr):
+ print("WENT IN")
+ # BASE Constants
+ X_BASE = 0 + X_TRANSLATE #(2.41568)
+ Y_BASE = 0 + Y_TRANSLATE #mToPx(4.129151)
+ BACKWALL_X = X_TRANSLATE
+ LOADING_Y = mToPx(4.129151) - mToPx(0.696976)
+ # HAB Levels 2 and 3 called in variables backhab
+ # draw loading stations
+ cr.move_to(0, LOADING_Y)
+ cr.line_to(mToPx(0.6), LOADING_Y)
+ cr.move_to(0, -1.0 * LOADING_Y)
+ cr.line_to(mToPx(0.6), -1.0 * LOADING_Y)
+
+ BACKWALL_X = X_TRANSLATE
+
+ # HAB Levels 2 and 3 called in variables backhab
+
+ WIDTH_BACKHAB = mToPx(1.2192)
+
+ Y_TOP_BACKHAB_BOX = Y_BASE + mToPx(0.6096)
+ BACKHAB_LV2_LENGTH = mToPx(1.016)
+
+ BACKHAB_LV3_LENGTH = mToPx(1.2192)
+ Y_LV3_BOX = Y_TOP_BACKHAB_BOX - BACKHAB_LV3_LENGTH
+
+ Y_BOTTOM_BACKHAB_BOX = Y_LV3_BOX - BACKHAB_LV2_LENGTH
+
+ # HAB LEVEL 1
+ X_LV1_BOX = BACKWALL_X + WIDTH_BACKHAB
+
+ WIDTH_LV1_BOX = mToPx(0.90805)
+ LENGTH_LV1_BOX = mToPx(1.6256)
+
+ Y_BOTTOM_LV1_BOX = Y_BASE - LENGTH_LV1_BOX
+
+ # Ramp off Level 1
+ X_RAMP = X_LV1_BOX
+
+ Y_TOP_RAMP = Y_BASE + LENGTH_LV1_BOX
+ WIDTH_TOP_RAMP = mToPx(1.20015)
+ LENGTH_TOP_RAMP = Y_BASE + mToPx(0.28306)
+
+ X_MIDDLE_RAMP = X_RAMP + WIDTH_LV1_BOX
+ Y_MIDDLE_RAMP = Y_BOTTOM_LV1_BOX
+ LENGTH_MIDDLE_RAMP = 2 * LENGTH_LV1_BOX
+ WIDTH_MIDDLE_RAMP = WIDTH_TOP_RAMP - WIDTH_LV1_BOX
+
+ Y_BOTTOM_RAMP = Y_BASE - LENGTH_LV1_BOX - LENGTH_TOP_RAMP
+
+ # Side Bars to Hold in balls
+ X_BARS = BACKWALL_X
+ WIDTH_BARS = WIDTH_BACKHAB
+ LENGTH_BARS = mToPx(0.574675)
+
+ Y_TOP_BAR = Y_TOP_BACKHAB_BOX + BACKHAB_LV2_LENGTH
+
+ Y_BOTTOM_BAR = Y_BOTTOM_BACKHAB_BOX - LENGTH_BARS
+
+ set_color(cr, palette["BLACK"])
+ cr.rectangle(BACKWALL_X, Y_TOP_BACKHAB_BOX, WIDTH_BACKHAB,
+ BACKHAB_LV2_LENGTH)
+ cr.rectangle(BACKWALL_X, Y_LV3_BOX, WIDTH_BACKHAB, BACKHAB_LV3_LENGTH)
+ cr.rectangle(BACKWALL_X, Y_BOTTOM_BACKHAB_BOX, WIDTH_BACKHAB,
+ BACKHAB_LV2_LENGTH)
+ cr.rectangle(X_LV1_BOX, Y_BASE, WIDTH_LV1_BOX, LENGTH_LV1_BOX)
+ cr.rectangle(X_LV1_BOX, Y_BOTTOM_LV1_BOX, WIDTH_LV1_BOX, LENGTH_LV1_BOX)
+ cr.rectangle(X_RAMP, Y_TOP_RAMP, WIDTH_TOP_RAMP, LENGTH_TOP_RAMP)
+ cr.rectangle(X_MIDDLE_RAMP, Y_MIDDLE_RAMP, WIDTH_MIDDLE_RAMP,
+ LENGTH_MIDDLE_RAMP)
+ cr.rectangle(X_RAMP, Y_BOTTOM_RAMP, WIDTH_TOP_RAMP, LENGTH_TOP_RAMP)
+ cr.rectangle(X_BARS, Y_TOP_BAR, WIDTH_BARS, LENGTH_BARS)
+ cr.rectangle(X_BARS, Y_BOTTOM_BAR, WIDTH_BARS, LENGTH_BARS)
+ cr.stroke()
+
+ cr.set_line_join(cairo.LINE_JOIN_ROUND)
+
+ cr.stroke()
+
+ #draw 0, 0
+ set_color(cr, palette["BLACK"])
+ cr.move_to(X_TRANSLATE, Y_TRANSLATE)
+ cr.line_to(X_TRANSLATE, Y_TRANSLATE + mToPx(8.2296 / 2.0))
+ cr.move_to(X_TRANSLATE, Y_TRANSLATE)
+ cr.line_to(X_TRANSLATE, Y_TRANSLATE + mToPx(-8.2296 / 2.0))
+ cr.move_to(X_TRANSLATE, Y_TRANSLATE)
+ cr.line_to(X_TRANSLATE + mToPx(8.2296), Y_TRANSLATE)
+
+ cr.stroke()
+
+
+def draw_rockets(cr):
+ # BASE Constants
+ X_BASE = 0 + X_TRANSLATE + mToPx(2.41568)
+ Y_BASE = 0 + Y_TRANSLATE #mToPx(4.129151)
+
+ near_side_rocket_center = [
+ X_BASE + mToPx((2.89973 + 3.15642) / 2.0), Y_BASE + mToPx(
+ (3.86305 + 3.39548) / 2.0)
+ ]
+ middle_rocket_center = [
+ X_BASE + mToPx((3.15642 + 3.6347) / 2.0), Y_BASE + mToPx(
+ (3.39548 + 3.392380) / 2.0)
+ ]
+ far_side_rocket_center = [
+ X_BASE + mToPx((3.63473 + 3.89984) / 2.0), Y_BASE + mToPx(
+ (3.39238 + 3.86305) / 2.0)
+ ]
+
+ cr.move_to(near_side_rocket_center[0], near_side_rocket_center[1])
+ cr.line_to(near_side_rocket_center[0] - 0.4 * mToPx(0.866),
+ near_side_rocket_center[1] - 0.4 * mToPx(0.5))
+
+ cr.move_to(middle_rocket_center[0], middle_rocket_center[1])
+ cr.line_to(middle_rocket_center[0], middle_rocket_center[1] - mToPx(0.4))
+
+ cr.move_to(far_side_rocket_center[0], far_side_rocket_center[1])
+ cr.line_to(far_side_rocket_center[0] + 0.4 * mToPx(0.866),
+ far_side_rocket_center[1] - 0.4 * mToPx(0.5))
+
+ print("FAR SIDE ROCKET")
+ #print(far_side_rocket_center)
+ near_side_rocket_center = [
+ X_BASE + mToPx((2.89973 + 3.15642) / 2.0), Y_BASE - mToPx(
+ (3.86305 + 3.39548) / 2.0)
+ ]
+ middle_rocket_center = [
+ X_BASE + mToPx((3.15642 + 3.6347) / 2.0), Y_BASE - mToPx(
+ (3.39548 + 3.392380) / 2.0)
+ ]
+ far_side_rocket_center = [
+ X_BASE + mToPx((3.63473 + 3.89984) / 2.0), Y_BASE - mToPx(
+ (3.39238 + 3.86305) / 2.0)
+ ]
+
+ cr.move_to(near_side_rocket_center[0], near_side_rocket_center[1])
+ cr.line_to(near_side_rocket_center[0] - 0.4 * mToPx(0.866),
+ near_side_rocket_center[1] + 0.4 * mToPx(0.5))
+
+ cr.move_to(middle_rocket_center[0], middle_rocket_center[1])
+ cr.line_to(middle_rocket_center[0], middle_rocket_center[1] + mToPx(0.4))
+
+ cr.move_to(far_side_rocket_center[0], far_side_rocket_center[1])
+ cr.line_to(far_side_rocket_center[0] + 0.4 * mToPx(0.866),
+ far_side_rocket_center[1] + 0.4 * mToPx(0.5))
+
+ X_BASE = 0 + X_TRANSLATE #mToPx(2.41568)
+ Y_BASE = 0 + Y_TRANSLATE #mToPx(4.129151)
+
+ # Leftmost Line
+ cr.move_to(X_BASE + mToPx(2.89973), Y_BASE + mToPx(3.86305))
+ cr.line_to(X_BASE + mToPx(3.15642), Y_BASE + mToPx(3.39548))
+
+ # Top Line
+ cr.move_to(X_BASE + mToPx(3.15642), Y_BASE + mToPx(3.39548))
+ cr.line_to(X_BASE + mToPx(3.63473), Y_BASE + mToPx(3.39238))
+
+ #Rightmost Line
+ cr.move_to(X_BASE + mToPx(3.63473), Y_BASE + mToPx(3.39238))
+ cr.line_to(X_BASE + mToPx(3.89984), Y_BASE + mToPx(3.86305))
+
+ #Back Line
+ cr.move_to(X_BASE + mToPx(2.89973), Y_BASE + mToPx(3.86305))
+ cr.line_to(X_BASE + mToPx(3.89984), Y_BASE + mToPx(3.86305))
+
+ # Leftmost Line
+ cr.move_to(X_BASE + mToPx(2.89973), Y_BASE - mToPx(3.86305))
+ cr.line_to(X_BASE + mToPx(3.15642), Y_BASE - mToPx(3.39548))
+
+ # Top Line
+ cr.move_to(X_BASE + mToPx(3.15642), Y_BASE - mToPx(3.39548))
+ cr.line_to(X_BASE + mToPx(3.63473), Y_BASE - mToPx(3.39238))
+
+ #Rightmost Line
+ cr.move_to(X_BASE + mToPx(3.63473), Y_BASE - mToPx(3.39238))
+ cr.line_to(X_BASE + mToPx(3.89984), Y_BASE - mToPx(3.86305))
+
+ #Back Line
+ cr.move_to(X_BASE + mToPx(2.89973), Y_BASE - mToPx(3.86305))
+ cr.line_to(X_BASE + mToPx(3.89984), Y_BASE - mToPx(3.86305))
+
+ cr.stroke()
+
+
+def draw_cargo_ship(cr):
+ # BASE Constants
+
+ X_BASE = X_TRANSLATE + mToPx(5.59435)
+ Y_BASE = 0 + Y_TRANSLATE #mToPx(4.129151)
+
+ FRONT_PEG_DELTA_Y = mToPx(0.276352)
+ cr.move_to(X_BASE, Y_BASE + FRONT_PEG_DELTA_Y)
+ cr.line_to(X_BASE - mToPx(0.4), Y_BASE + FRONT_PEG_DELTA_Y)
+
+ cr.move_to(X_BASE, Y_BASE - FRONT_PEG_DELTA_Y)
+ cr.line_to(X_BASE - mToPx(0.4), Y_BASE - FRONT_PEG_DELTA_Y)
+
+ SIDE_PEG_Y = mToPx(1.41605 / 2.0)
+ SIDE_PEG_X = X_BASE + mToPx(1.148842)
+ SIDE_PEG_DX = mToPx(0.55245)
+
+ cr.move_to(SIDE_PEG_X, SIDE_PEG_Y)
+ cr.line_to(SIDE_PEG_X, SIDE_PEG_Y + mToPx(0.4))
+
+ cr.move_to(SIDE_PEG_X + SIDE_PEG_DX, SIDE_PEG_Y)
+ cr.line_to(SIDE_PEG_X + SIDE_PEG_DX, SIDE_PEG_Y + mToPx(0.4))
+
+ cr.move_to(SIDE_PEG_X + 2.0 * SIDE_PEG_DX, SIDE_PEG_Y)
+ cr.line_to(SIDE_PEG_X + 2.0 * SIDE_PEG_DX, SIDE_PEG_Y + mToPx(0.4))
+
+ cr.move_to(SIDE_PEG_X, -1.0 * SIDE_PEG_Y)
+ cr.line_to(SIDE_PEG_X, -1.0 * SIDE_PEG_Y - mToPx(0.4))
+
+ cr.move_to(SIDE_PEG_X + SIDE_PEG_DX, -1.0 * SIDE_PEG_Y)
+ cr.line_to(SIDE_PEG_X + SIDE_PEG_DX, -1.0 * SIDE_PEG_Y - mToPx(0.4))
+
+ cr.move_to(SIDE_PEG_X + 2.0 * SIDE_PEG_DX, -1.0 * SIDE_PEG_Y)
+ cr.line_to(SIDE_PEG_X + 2.0 * SIDE_PEG_DX, -1.0 * SIDE_PEG_Y - mToPx(0.4))
+
+ cr.rectangle(X_BASE, Y_BASE - mToPx(1.41605 / 2.0), mToPx(2.43205),
+ mToPx(1.41605))
+
+ X_BASE = X_TRANSLATE + mToPx(2.41568)
+ Y_BASE = 0 + Y_TRANSLATE #mToPx(4.129151)
+
+ cr.rectangle(X_BASE + mToPx(3.15912), Y_BASE - mToPx(0.72326),
+ mToPx(2.43205), mToPx(1.41605))
+
+ cr.stroke()
diff --git a/frc971/control_loops/python/spline_graph.py b/frc971/control_loops/python/spline_graph.py
new file mode 100755
index 0000000..2fd7964
--- /dev/null
+++ b/frc971/control_loops/python/spline_graph.py
@@ -0,0 +1,100 @@
+#!/usr/bin/python3
+import gi
+from path_edit import *
+import numpy as np
+gi.require_version('Gtk', '3.0')
+from gi.repository import Gdk, Gtk, GLib
+import cairo
+
+class GridWindow(Gtk.Window):
+ def method_connect(self, event, cb):
+ def handler(obj, *args):
+ cb(*args)
+
+ print("Method_connect ran")
+ self.connect(event, handler)
+
+ def mouse_move(self, event):
+ #Changes event.x and event.y to be relative to the center.
+ x = event.x - self.drawing_area.window_shape[0] / 2
+ y = self.drawing_area.window_shape[1] / 2 - event.y
+ scale = self.drawing_area.get_current_scale()
+ event.x = x / scale + self.drawing_area.center[0]
+ event.y = y / scale + self.drawing_area.center[1]
+ self.drawing_area.mouse_move(event)
+ self.queue_draw()
+
+ def button_press(self, event):
+ print("button press activated")
+ o_x = event.x
+ o_y = event.y
+ x = event.x - self.drawing_area.window_shape[0] / 2
+ y = self.drawing_area.window_shape[1] / 2 - event.y
+ scale = 2 * self.drawing_area.get_current_scale()
+ event.x = x / scale + self.drawing_area.center[0]
+ event.y = y / scale + self.drawing_area.center[1]
+ self.drawing_area.do_button_press(event)
+ event.x = o_x
+ event.y = o_y
+
+ def key_press(self, event):
+ print("key press activated")
+ self.drawing_area.do_key_press(event, self.file_name_box.get_text())
+ self.queue_draw()
+
+ def configure(self, event):
+ print("configure activated")
+ self.drawing_area.window_shape = (event.width, event.height)
+
+ # handle submitting a constraint
+ def on_submit_click(self, widget):
+ self.drawing_area.inConstraint = int(self.constraint_box.get_text())
+ self.drawing_area.inValue = int(self.value_box.get_text())
+
+ def __init__(self):
+ Gtk.Window.__init__(self)
+
+ self.set_default_size(1.5 * SCREEN_SIZE, SCREEN_SIZE)
+
+ flowBox = Gtk.FlowBox()
+ flowBox.set_valign(Gtk.Align.START)
+ flowBox.set_selection_mode(Gtk.SelectionMode.NONE)
+
+ flowBox.set_valign(Gtk.Align.START)
+
+ self.add(flowBox)
+
+ container = Gtk.Fixed()
+ flowBox.add(container)
+
+ self.eventBox = Gtk.EventBox()
+ container.add(self.eventBox)
+
+ self.eventBox.set_events(Gdk.EventMask.BUTTON_PRESS_MASK
+ | Gdk.EventMask.BUTTON_RELEASE_MASK
+ | Gdk.EventMask.POINTER_MOTION_MASK
+ | Gdk.EventMask.SCROLL_MASK
+ | Gdk.EventMask.KEY_PRESS_MASK)
+
+ #add the graph box
+ self.drawing_area = GTK_Widget()
+ self.eventBox.add(self.drawing_area)
+
+ self.method_connect("key-release-event", self.key_press)
+ self.method_connect("button-release-event", self.button_press)
+ self.method_connect("configure-event", self.configure)
+ self.method_connect("motion_notify_event", self.mouse_move)
+
+ self.file_name_box = Gtk.Entry()
+ self.file_name_box.set_size_request(200, 40)
+
+ self.file_name_box.set_text("File")
+ self.file_name_box.set_editable(True)
+
+ container.put(self.file_name_box, 0, 0)
+
+ self.show_all()
+
+
+window = GridWindow()
+RunApp()
diff --git a/frc971/control_loops/python/spline_graph_usage.txt b/frc971/control_loops/python/spline_graph_usage.txt
new file mode 100644
index 0000000..a698298
--- /dev/null
+++ b/frc971/control_loops/python/spline_graph_usage.txt
@@ -0,0 +1,22 @@
+Spline Graph Usage
+
+Run with: bazel run spline_graph -c opt "YOUR SCREEN SIZE"
+
+On Start Place down first 6 points
+Then you go into editing mode, where you can drag your points around
+Then to create another spline, press p and the first three points will be autogenerated
+
+To export type in your file in the top right textfield ex.test.json
+It will be exported to spline_jsons/YOURFILE
+
+Notes:
+If importing gi, gi requires #!/usr/bin/python3 be at the top of the file
+
+This was built on top of BaseWindow
+
+Known Bugs:
+libspline throws errors about viable friction limits and viable voltage limits not overlapping
+bazel run lib_spline_test -c opt
+John commented those lines out
+
+Upon closing, reopens window
diff --git a/frc971/control_loops/python/spline_jsons/test.json b/frc971/control_loops/python/spline_jsons/test.json
new file mode 100644
index 0000000..09737e0
--- /dev/null
+++ b/frc971/control_loops/python/spline_jsons/test.json
@@ -0,0 +1 @@
+[[[1.6680953371273437, -3.4989921261100347], [1.8841223685193285, -3.3960504631707944], [2.0281410171518672, -3.272708402385707], [2.192647314562578, -3.1286520914802223], [2.3984302077132025, -3.0360669258966317], [2.5936689198479943, -2.912724865111545]], [[2.5936689198479943, -2.912724865111545], [2.788907631982786, -2.789382804326458], [2.973602163101745, -2.635283848339876], [3.190230535451773, -2.5732992795250538], [3.396013428602397, -2.511627935280236], [3.6117404172199103, -2.4705139150185405]]]
\ No newline at end of file
diff --git a/frc971/control_loops/python/spline_writer.py b/frc971/control_loops/python/spline_writer.py
new file mode 100644
index 0000000..df622f3
--- /dev/null
+++ b/frc971/control_loops/python/spline_writer.py
@@ -0,0 +1,72 @@
+import os
+import numpy as np
+
+
+class SplineWriter(object):
+ def __init__(self, namespaces=None, filename="auto_splines.cc"):
+ if namespaces:
+ self._namespaces = namespaces
+ else:
+ self._namespaces = ['frc971', 'autonomous']
+
+ self._namespace_start = '\n'.join(
+ ['namespace %s {' % name for name in self._namespaces])
+
+ self._namespace_end = '\n'.join([
+ '} // namespace %s' % name for name in reversed(self._namespaces)
+ ])
+
+ self.filename_ = filename
+
+ def make_string_list(self, list):
+ string = "{{"
+ for i in range(0, len(list)):
+ if i == len(list) - 1:
+ string += str(list[i])
+ else:
+ string += str(list[i]) + ", "
+ return string + "}}"
+
+ def Write(self, spline_name, spline_idx, control_points):
+ """Writes the cc file to the file named cc_file."""
+ xs = control_points[:, 0]
+ ys = control_points[:, 1]
+ spline_count = (len(xs) - 6) / 5 + 1
+ with open(self.filename_, 'a') as fd:
+ fd.write(self._namespace_start)
+ #write the name
+ fd.write("\n\n::frc971::MultiSpline " + spline_name + "() {\n")
+ # write the objs, at the moment assumes a single constraint, needs fixing
+ fd.write(
+ "\t::frc971::MultiSpline spline;\n\t::frc971::Constraint constraints;\n"
+ )
+ fd.write(
+ "\tconstraints.constraint_type = 0;\n\tconstraints.value = 0;\n"
+ )
+ fd.write(
+ "\tconstraints.start_distance = 0;\n\tconstraints.end_distance = 0;\n"
+ )
+ fd.write('\n')
+ fd.write("\tspline.spline_idx = " + str(spline_idx) +
+ ";\n\tspline.spline_count = " + str(spline_count) + ";\n")
+ fd.write("\tspline.spline_x = " + self.make_string_list(xs) +
+ ";\n\tspline.spline_y = " + self.make_string_list(ys) +
+ ";\n")
+ fd.write(
+ "\tspline.constraints = {{constraints}};\n\treturn spline;\n")
+ fd.write("}")
+ fd.write("\n\n")
+ fd.write(self._namespace_end)
+ fd.write("\n\n")
+
+
+def main():
+ writer = SplineWriter()
+ points = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0],
+ [9.0, 10.0], [11.0, 12.0]])
+ spline_name = "test_spline"
+ spline_idx = 1
+ writer.Write(spline_name, spline_idx, points)
+
+
+main()
diff --git a/third_party/flatbuffers/src/idl_gen_js_ts.cpp b/third_party/flatbuffers/src/idl_gen_js_ts.cpp
index be0a205..cfb0cb6 100644
--- a/third_party/flatbuffers/src/idl_gen_js_ts.cpp
+++ b/third_party/flatbuffers/src/idl_gen_js_ts.cpp
@@ -153,10 +153,9 @@
imported_files.emplace(file.first);
}
- code += "export namespace " + file.second.target_namespace + " { \n";
+ //code += "export namespace " + file.second.target_namespace + " { \n";
code += "export import " + file.second.symbol + " = ";
- code += GenFileNamespacePrefix(file.first) + "." +
- file.second.source_namespace + "." + file.second.symbol +
+ code += file.second.symbol +
"; }\n";
}
}
@@ -334,7 +333,7 @@
std::string ns = GetNameSpace(enum_def);
std::string enum_def_name = enum_def.name + (reverse ? "Name" : "");
if (lang_.language == IDLOptions::kTs) {
- if (!ns.empty()) { code += "export namespace " + ns + "{\n"; }
+ //if (!ns.empty()) { code += "export namespace " + ns + "{\n"; }
code += "export enum " + enum_def.name + "{\n";
} else {
if (enum_def.defined_namespace->components.empty()) {
@@ -348,7 +347,7 @@
exports += "this." + enum_def_name + " = " + enum_def_name + ";\n";
}
}
- code += WrapInNameSpace(enum_def) + (reverse ? "Name" : "") + " = {\n";
+ code += enum_def.name + (reverse ? "Name" : "") + " = {\n";
}
for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) {
auto &ev = **it;
@@ -416,7 +415,7 @@
if (type.base_type == BASE_TYPE_BOOL) { getter = "!!" + getter; }
if (type.enum_def) {
getter = "/** " +
- GenTypeAnnotation(kType, WrapInNameSpace(*type.enum_def), "",
+ GenTypeAnnotation(kType, type.enum_def->name, "",
false) +
" */ (" + getter + ")";
}
@@ -433,15 +432,15 @@
if (value.type.enum_def) {
if (auto val = value.type.enum_def->FindByValue(value.constant)) {
if (lang_.language == IDLOptions::kTs) {
- return GenPrefixedTypeName(WrapInNameSpace(*value.type.enum_def),
+ return GenPrefixedTypeName(value.type.enum_def->name,
value.type.enum_def->file) +
"." + val->name;
} else {
- return WrapInNameSpace(*value.type.enum_def) + "." + val->name;
+ return value.type.enum_def->name + "." + val->name;
}
} else {
return "/** " +
- GenTypeAnnotation(kType, WrapInNameSpace(*value.type.enum_def),
+ GenTypeAnnotation(kType, value.type.enum_def->name,
"", false) +
"} */ (" + value.constant + ")";
}
@@ -473,7 +472,7 @@
if (type.base_type == BASE_TYPE_STRING) {
name = "string|Uint8Array";
} else {
- name = WrapInNameSpace(*type.struct_def);
+ name = type.struct_def->name;
}
return (allowNull) ? (name + "|null") : (name);
}
@@ -485,7 +484,7 @@
case BASE_TYPE_ULONG: return "flatbuffers.Long";
default:
if (IsScalar(type.base_type)) {
- if (type.enum_def) { return WrapInNameSpace(*type.enum_def); }
+ if (type.enum_def) { return type.enum_def->name; }
return "number";
}
return "flatbuffers.Offset";
@@ -675,9 +674,9 @@
if (lang_.language == IDLOptions::kTs) {
object_name = struct_def.name;
GenDocComment(struct_def.doc_comment, code_ptr, "@constructor");
- if (!object_namespace.empty()) {
- code += "export namespace " + object_namespace + "{\n";
- }
+ //if (!object_namespace.empty()) {
+ // code += "export namespace " + object_namespace + "{\n";
+ //}
code += "export class " + struct_def.name;
code += " {\n";
if (lang_.language != IDLOptions::kTs) {
@@ -695,7 +694,7 @@
code += " bb_pos:number = 0;\n";
} else {
bool isStatement = struct_def.defined_namespace->components.empty();
- object_name = WrapInNameSpace(struct_def);
+ object_name = struct_def.name;
GenDocComment(struct_def.doc_comment, code_ptr, "@constructor");
if (isStatement) {
if (parser_.opts.use_goog_js_export_format) {
@@ -851,7 +850,7 @@
else {
switch (field.value.type.base_type) {
case BASE_TYPE_STRUCT: {
- auto type = WrapInNameSpace(*field.value.type.struct_def);
+ auto type = field.value.type.struct_def->name;
GenDocComment(
field.doc_comment, code_ptr,
GenTypeAnnotation(kParam, type + "=", "obj") +
@@ -978,7 +977,7 @@
if (field.value.type.enum_def) {
code += "/** " +
GenTypeAnnotation(
- kType, WrapInNameSpace(*field.value.type.enum_def),
+ kType, field.value.type.enum_def->name,
"", false) +
" */ (" + field.value.constant + ")";
} else {
@@ -1359,9 +1358,9 @@
}
if (lang_.language == IDLOptions::kTs) {
- if (!object_namespace.empty()) {
- code += "}\n";
- }
+ //if (!object_namespace.empty()) {
+ // code += "}\n";
+ //}
code += "}\n";
}
}
diff --git a/y2019/control_loops/drivetrain/event_loop_localizer.cc b/y2019/control_loops/drivetrain/event_loop_localizer.cc
index c436232..16cd723 100644
--- a/y2019/control_loops/drivetrain/event_loop_localizer.cc
+++ b/y2019/control_loops/drivetrain/event_loop_localizer.cc
@@ -58,17 +58,17 @@
localizer_.ResetInitialState(now, state, newP);
}
-void EventLoopLocalizer::Update(
- const ::Eigen::Matrix<double, 2, 1> &U,
- ::aos::monotonic_clock::time_point now, double left_encoder,
- double right_encoder, double gyro_rate,
- double /*longitudinal_accelerometer*/) {
+void EventLoopLocalizer::Update(const ::Eigen::Matrix<double, 2, 1> &U,
+ ::aos::monotonic_clock::time_point now,
+ double left_encoder, double right_encoder,
+ double gyro_rate,
+ const Eigen::Vector3d &accel) {
AOS_CHECK(U.allFinite());
AOS_CHECK(::std::isfinite(left_encoder));
AOS_CHECK(::std::isfinite(right_encoder));
AOS_CHECK(::std::isfinite(gyro_rate));
localizer_.UpdateEncodersAndGyro(left_encoder, right_encoder, gyro_rate, U,
- now);
+ accel, now);
while (frame_fetcher_.FetchNext()) {
HandleFrame(frame_fetcher_.get());
}
diff --git a/y2019/control_loops/drivetrain/event_loop_localizer.h b/y2019/control_loops/drivetrain/event_loop_localizer.h
index 83b8e79..5305b8d 100644
--- a/y2019/control_loops/drivetrain/event_loop_localizer.h
+++ b/y2019/control_loops/drivetrain/event_loop_localizer.h
@@ -60,32 +60,9 @@
void Update(const ::Eigen::Matrix<double, 2, 1> &U,
::aos::monotonic_clock::time_point now, double left_encoder,
double right_encoder, double gyro_rate,
- double /*longitudinal_accelerometer*/) override;
+ const Eigen::Vector3d &accel) override;
- double x() const override {
- return localizer_.X_hat(StateIdx::kX); }
- double y() const override {
- return localizer_.X_hat(StateIdx::kY); }
- double theta() const override {
- return localizer_.X_hat(StateIdx::kTheta); }
- double left_encoder() const override {
- return localizer_.X_hat(StateIdx::kLeftEncoder);
- }
- double right_encoder() const override {
- return localizer_.X_hat(StateIdx::kRightEncoder);
- }
- double left_velocity() const override {
- return localizer_.X_hat(StateIdx::kLeftVelocity);
- }
- double right_velocity() const override {
- return localizer_.X_hat(StateIdx::kRightVelocity);
- }
- double left_voltage_error() const override {
- return localizer_.X_hat(StateIdx::kLeftVoltageError);
- }
- double right_voltage_error() const override {
- return localizer_.X_hat(StateIdx::kRightVoltageError);
- }
+ Localizer::State Xhat() const override { return localizer_.X_hat(); }
TargetSelector *target_selector() override {
return &target_selector_;
diff --git a/y2019/control_loops/drivetrain/localized_drivetrain_test.cc b/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
index 9b237f4..1459248 100644
--- a/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
+++ b/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
@@ -111,8 +111,7 @@
const Eigen::Matrix<double, 5, 1> true_state = drivetrain_plant_.state();
EXPECT_NEAR(localizer_.x(), true_state(0, 0), eps);
EXPECT_NEAR(localizer_.y(), true_state(1, 0), eps);
- // TODO(james): Uncomment this.
- //EXPECT_NEAR(localizer_.theta(), true_state(2, 0), 2.0 * eps);
+ EXPECT_NEAR(localizer_.theta(), true_state(2, 0), eps);
EXPECT_NEAR(localizer_.left_velocity(), true_state(3, 0), eps);
EXPECT_NEAR(localizer_.right_velocity(), true_state(4, 0), eps);
}
@@ -290,8 +289,7 @@
// Everything but X-value should be correct:
EXPECT_NEAR(true_state.x(), localizer_.x() + 0.05, 1e-5);
EXPECT_NEAR(true_state.y(), localizer_.y(), 1e-5);
- // TODO(james): Uncomment this.
- //EXPECT_NEAR(true_state(2, 0), localizer_.theta(), 1e-3);
+ EXPECT_NEAR(true_state(2, 0), localizer_.theta(), 1e-3);
EXPECT_NEAR(true_state(3, 0), localizer_.left_velocity(), 1e-4);
EXPECT_NEAR(true_state(4, 0), localizer_.right_velocity(), 1e-4);
}
@@ -383,10 +381,9 @@
VerifyEstimatorAccurate(0.2);
// Due to the fact that we aren't modulating the throttle, we don't try to hit
// the target exactly. Instead, just run slightly past the target:
- // TODO(james): Uncomment this.
- //EXPECT_LT(
- // ::std::abs(::aos::math::DiffAngle(M_PI, drivetrain_plant_.state()(2, 0))),
- // 0.5);
+ EXPECT_LT(
+ ::std::abs(::aos::math::DiffAngle(M_PI, drivetrain_plant_.state()(2, 0))),
+ 0.5);
EXPECT_GT(HPSlotLeft().abs_pos().x(), drivetrain_plant_.state().x());
EXPECT_NEAR(HPSlotLeft().abs_pos().y(), drivetrain_plant_.state().y(), 0.2);
}
diff --git a/y2019/control_loops/drivetrain/localizer_test.cc b/y2019/control_loops/drivetrain/localizer_test.cc
index aa7f22f..7b25ca5 100644
--- a/y2019/control_loops/drivetrain/localizer_test.cc
+++ b/y2019/control_loops/drivetrain/localizer_test.cc
@@ -142,9 +142,6 @@
}
void SetUp() {
- // Turn on -v 1
- FLAGS_v = std::max(FLAGS_v, 1);
-
flatbuffers::DetachedBuffer goal_buffer;
{
flatbuffers::FlatBufferBuilder fbb;
@@ -315,8 +312,12 @@
}
// The differential equation for the dynamics of the system.
TestLocalizer::State DiffEq(const TestLocalizer::State &X,
- const TestLocalizer::Input &U) {
- return localizer_.DiffEq(X, U);
+ const Eigen::Vector2d &voltage) {
+ TestLocalizer::Input U;
+ U.setZero();
+ U(0) = voltage(0);
+ U(1) = voltage(1);
+ return localizer_.DiffEq(X, U, true);
}
Field field_;
@@ -433,7 +434,7 @@
output.left_voltage = 0;
output.right_voltage = 0;
spline_drivetrain_.SetOutput(&output);
- TestLocalizer::Input U(output.left_voltage, output.right_voltage);
+ Eigen::Vector2d U(output.left_voltage, output.right_voltage);
const ::Eigen::Matrix<double, 5, 1> goal_state =
spline_drivetrain_.CurrentGoalState();
@@ -454,7 +455,7 @@
U(1, 0) = ::std::max(::std::min(U(1, 0), 12.0), -12.0);
state = ::frc971::control_loops::RungeKuttaU(
- [this](const ::Eigen::Matrix<double, 10, 1> &X,
+ [this](const ::Eigen::Matrix<double, 12, 1> &X,
const ::Eigen::Matrix<double, 2, 1> &U) { return DiffEq(X, U); },
state, U, ::aos::time::DurationInSeconds(dt_config_.dt));
@@ -462,6 +463,8 @@
// interest here are that we (a) stop adding disturbances at the very end of
// the trajectory, to allow us to actually converge to the goal, and (b)
// scale disturbances by the corruent velocity.
+ // TODO(james): Figure out how to persist good accelerometer values through
+ // the disturbances.
if (GetParam().disturb && i % 75 == 0) {
// Scale the disturbance so that when we have near-zero velocity we don't
// have much disturbance.
@@ -470,7 +473,8 @@
::std::pow(state(StateIdx::kRightVelocity, 0), 2)) /
3.0);
TestLocalizer::State disturbance;
- disturbance << 0.02, 0.02, 0.001, 0.03, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0;
+ disturbance << 0.02, 0.02, 0.001, 0.03, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0;
disturbance *= disturbance_scale;
state += disturbance;
}
@@ -482,13 +486,18 @@
const double left_enc = state(StateIdx::kLeftEncoder, 0);
const double right_enc = state(StateIdx::kRightEncoder, 0);
- const double gyro = (state(StateIdx::kRightVelocity, 0) -
- state(StateIdx::kLeftVelocity, 0)) /
+ const double gyro = (state(StateIdx::kRightVelocity) -
+ state(StateIdx::kLeftVelocity)) /
dt_config_.robot_radius / 2.0;
+ const TestLocalizer::State xdot = DiffEq(state, U);
+ const Eigen::Vector3d accel(
+ localizer_.CalcLongitudinalVelocity(xdot) -
+ gyro * state(StateIdx::kLateralVelocity),
+ gyro * localizer_.CalcLongitudinalVelocity(state), 1.0);
localizer_.UpdateEncodersAndGyro(left_enc + Normal(encoder_noise_),
right_enc + Normal(encoder_noise_),
- gyro + Normal(gyro_noise_), U, t);
+ gyro + Normal(gyro_noise_), U, accel, t);
for (size_t cam_idx = 0; cam_idx < camera_queues.size(); ++cam_idx) {
auto &camera_queue = camera_queues[cam_idx];
@@ -551,14 +560,14 @@
/*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
/*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
(TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0)
+ 0.0, 0.0, 0.0, 0.0)
.finished(),
(TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0)
+ 0.0, 0.0, 0.0, 0.0)
.finished(),
/*noisify=*/false,
/*disturb=*/false,
- /*estimate_tolerance=*/1e-5,
+ /*estimate_tolerance=*/1e-2,
/*goal_tolerance=*/2e-2,
}),
// Checks "perfect" estimation, but start off the spline and check
@@ -567,14 +576,14 @@
/*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
/*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
(TestLocalizer::State() << 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0)
+ 0.0, 0.0, 0.0, 0.0)
.finished(),
(TestLocalizer::State() << 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0)
+ 0.0, 0.0, 0.0, 0.0)
.finished(),
/*noisify=*/false,
/*disturb=*/false,
- /*estimate_tolerance=*/1e-5,
+ /*estimate_tolerance=*/1e-2,
/*goal_tolerance=*/2e-2,
}),
// Repeats perfect scenario, but add sensor noise.
@@ -582,10 +591,10 @@
/*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
/*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
(TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0)
+ 0.0, 0.0, 0.0, 0.0)
.finished(),
(TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0)
+ 0.0, 0.0, 0.0, 0.0)
.finished(),
/*noisify=*/true,
/*disturb=*/false,
@@ -597,14 +606,14 @@
/*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
/*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
(TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0)
+ 0.0, 0.0, 0.0, 0.0)
.finished(),
(TestLocalizer::State() << 0.1, -5.1, -0.01, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0, 0.0)
+ 0.0, 0.0, 0.0, 0.0, 0.0)
.finished(),
/*noisify=*/false,
/*disturb=*/false,
- /*estimate_tolerance=*/1e-4,
+ /*estimate_tolerance=*/1e-2,
/*goal_tolerance=*/2e-2,
}),
// Repeats perfect scenario, but add voltage + angular errors:
@@ -612,14 +621,14 @@
/*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
/*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
(TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,
- 0.5, 0.02)
+ 0.5, 0.02, 0.0, 0.0)
.finished(),
(TestLocalizer::State() << 0.1, -5.1, -0.01, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0, 0.0)
+ 0.0, 0.0, 0.0, 0.0, 0.0)
.finished(),
/*noisify=*/false,
/*disturb=*/false,
- /*estimate_tolerance=*/1e-4,
+ /*estimate_tolerance=*/1e-2,
/*goal_tolerance=*/2e-2,
}),
// Add disturbances while we are driving:
@@ -627,14 +636,14 @@
/*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
/*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
(TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0)
+ 0.0, 0.0, 0.0, 0.0)
.finished(),
(TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0)
+ 0.0, 0.0, 0.0, 0.0)
.finished(),
/*noisify=*/false,
/*disturb=*/true,
- /*estimate_tolerance=*/3e-2,
+ /*estimate_tolerance=*/4e-2,
/*goal_tolerance=*/0.15,
}),
// Add noise and some initial error in addition:
@@ -642,15 +651,15 @@
/*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
/*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
(TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0)
+ 0.0, 0.0, 0.0, 0.0)
.finished(),
(TestLocalizer::State() << 0.1, -5.1, 0.03, 0.0, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0)
+ 0.0, 0.0, 0.0, 0.0)
.finished(),
/*noisify=*/true,
/*disturb=*/true,
- /*estimate_tolerance=*/0.2,
- /*goal_tolerance=*/0.5,
+ /*estimate_tolerance=*/0.5,
+ /*goal_tolerance=*/1.0,
}),
// Try another spline, just in case the one I was using is special for
// some reason; this path will also go straight up to a target, to
@@ -659,10 +668,10 @@
/*control_pts_x=*/{{0.5, 3.5, 4.0, 8.0, 11.0, 10.2}},
/*control_pts_y=*/{{1.0, 1.0, -3.0, -2.0, -3.5, -3.65}},
(TestLocalizer::State() << 0.6, 1.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0)
+ 0.0, 0.0, 0.0, 0.0)
.finished(),
(TestLocalizer::State() << 0.5, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0)
+ 0.0, 0.0, 0.0, 0.0)
.finished(),
/*noisify=*/true,
/*disturb=*/false,
diff --git a/y2020/BUILD b/y2020/BUILD
index 9a233d4..f91f532 100644
--- a/y2020/BUILD
+++ b/y2020/BUILD
@@ -30,7 +30,6 @@
"//aos/mutex",
"//aos/network:team_number",
"//frc971:constants",
- "//frc971/control_loops:pose",
"//frc971/control_loops:static_zeroing_single_dof_profiled_subsystem",
"//y2020/control_loops/drivetrain:polydrivetrain_plants",
"//y2020/control_loops/superstructure/accelerator:accelerator_plants",
diff --git a/y2020/constants.h b/y2020/constants.h
index 4713072..7fbb704 100644
--- a/y2020/constants.h
+++ b/y2020/constants.h
@@ -7,7 +7,6 @@
#include <array>
#include "frc971/constants.h"
-#include "frc971/control_loops/pose.h"
#include "frc971/control_loops/static_zeroing_single_dof_profiled_subsystem.h"
#include "y2020/control_loops/drivetrain/drivetrain_dog_motor_plant.h"
#include "y2020/control_loops/superstructure/accelerator/accelerator_plant.h"
diff --git a/y2020/control_loops/drivetrain/BUILD b/y2020/control_loops/drivetrain/BUILD
index 9abb2a1..de669d6 100644
--- a/y2020/control_loops/drivetrain/BUILD
+++ b/y2020/control_loops/drivetrain/BUILD
@@ -1,4 +1,5 @@
load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
+load("//aos:config.bzl", "aos_config")
load("//tools/build_rules:select.bzl", "compiler_select", "cpu_select")
genrule(
@@ -53,6 +54,21 @@
)
cc_library(
+ name = "localizer",
+ srcs = ["localizer.cc"],
+ hdrs = ["localizer.h"],
+ deps = [
+ "//aos/containers:ring_buffer",
+ "//frc971/control_loops:profiled_subsystem_fbs",
+ "//frc971/control_loops/drivetrain:hybrid_ekf",
+ "//frc971/control_loops/drivetrain:localizer",
+ "//y2020:constants",
+ "//y2020/control_loops/superstructure:superstructure_status_fbs",
+ "//y2020/vision/sift:sift_fbs",
+ ],
+)
+
+cc_library(
name = "drivetrain_base",
srcs = [
"drivetrain_base.cc",
@@ -82,6 +98,54 @@
],
)
+aos_config(
+ name = "simulation_config",
+ src = "drivetrain_simulation_config.json",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//frc971/control_loops/drivetrain:simulation_channels",
+ "//y2020:config",
+ ],
+)
+
+cc_test(
+ name = "localizer_test",
+ srcs = ["localizer_test.cc"],
+ data = [":simulation_config.json"],
+ deps = [
+ ":drivetrain_base",
+ ":localizer",
+ "//aos/controls:control_loop_test",
+ "//aos/events:simulated_event_loop",
+ "//aos/events/logging:logger",
+ "//aos/network:team_number",
+ "//frc971/control_loops:team_number_test_environment",
+ "//frc971/control_loops/drivetrain:drivetrain_lib",
+ "//frc971/control_loops/drivetrain:drivetrain_test_lib",
+ ],
+)
+
+cc_test(
+ name = "drivetrain_replay_test",
+ srcs = ["drivetrain_replay_test.cc"],
+ data = [
+ "//y2020:config.json",
+ "@drivetrain_replay//file:spinning_wheels_while_still.bfbs",
+ ],
+ deps = [
+ ":drivetrain_base",
+ "//aos:configuration",
+ "//aos:init",
+ "//aos/events:shm_event_loop",
+ "//aos/events:simulated_event_loop",
+ "//aos/events/logging:logger",
+ "//aos/testing:googletest",
+ "//frc971/control_loops/drivetrain:drivetrain_lib",
+ "@com_github_gflags_gflags//:gflags",
+ "@com_github_google_glog//:glog",
+ ],
+)
+
cc_binary(
name = "drivetrain_replay",
srcs = ["drivetrain_replay.cc"],
diff --git a/y2020/control_loops/drivetrain/drivetrain_replay_test.cc b/y2020/control_loops/drivetrain/drivetrain_replay_test.cc
new file mode 100644
index 0000000..6933a51
--- /dev/null
+++ b/y2020/control_loops/drivetrain/drivetrain_replay_test.cc
@@ -0,0 +1,99 @@
+// This file serves to test replaying the localizer over existing logfiles to
+// check for regressions. Currently, it just uses a single logfile pulled from
+// running the 2016 robot against a wall and confirming that the X/Y estimate
+// does not change too much.
+//
+// Note that the current logfile test will break once we update the drivetrain
+// config for 2020, since both the gear ratios and IMU transformation wil no
+// longer be valid.
+// TODO(james): Do something about that when the time comes--could just copy
+// the existing drivetrain config into this file and use it directly.
+#include "gtest/gtest.h"
+
+#include "aos/configuration.h"
+#include "aos/events/logging/logger.h"
+#include "aos/events/simulated_event_loop.h"
+#include "aos/init.h"
+#include "aos/json_to_flatbuffer.h"
+#include "aos/network/team_number.h"
+#include "frc971/control_loops/drivetrain/drivetrain.h"
+#include "gflags/gflags.h"
+#include "y2020/control_loops/drivetrain/drivetrain_base.h"
+
+DEFINE_string(
+ logfile, "external/drivetrain_replay/file/spinning_wheels_while_still.bfbs",
+ "Name of the logfile to read from.");
+DEFINE_string(config, "y2020/config.json",
+ "Name of the config file to replay using.");
+
+namespace y2020 {
+namespace control_loops {
+namespace drivetrain {
+namespace testing {
+
+class DrivetrainReplayTest : public ::testing::Test {
+ public:
+ DrivetrainReplayTest()
+ : config_(aos::configuration::ReadConfig(FLAGS_config)),
+ reader_(FLAGS_logfile, &config_.message()) {
+ aos::network::OverrideTeamNumber(971);
+
+ // TODO(james): Actually enforce not sending on the same buses as the
+ // logfile spews out.
+ reader_.RemapLoggedChannel("/drivetrain",
+ "frc971.control_loops.drivetrain.Status");
+ reader_.RemapLoggedChannel("/drivetrain",
+ "frc971.control_loops.drivetrain.Output");
+ reader_.Register();
+
+ drivetrain_event_loop_ =
+ reader_.event_loop_factory()->MakeEventLoop("drivetrain");
+ drivetrain_event_loop_->SkipTimingReport();
+
+ localizer_ =
+ std::make_unique<frc971::control_loops::drivetrain::DeadReckonEkf>(
+ drivetrain_event_loop_.get(), GetDrivetrainConfig());
+ drivetrain_ =
+ std::make_unique<frc971::control_loops::drivetrain::DrivetrainLoop>(
+ GetDrivetrainConfig(), drivetrain_event_loop_.get(),
+ localizer_.get());
+
+ test_event_loop_ =
+ reader_.event_loop_factory()->MakeEventLoop("drivetrain");
+ status_fetcher_ = test_event_loop_->MakeFetcher<
+ frc971::control_loops::drivetrain::Status>("/drivetrain");
+ }
+
+ const aos::FlatbufferDetachedBuffer<aos::Configuration> config_;
+ aos::logger::LogReader reader_;
+
+ std::unique_ptr<aos::EventLoop> drivetrain_event_loop_;
+ std::unique_ptr<frc971::control_loops::drivetrain::DeadReckonEkf> localizer_;
+ std::unique_ptr<frc971::control_loops::drivetrain::DrivetrainLoop>
+ drivetrain_;
+ std::unique_ptr<aos::EventLoop> test_event_loop_;
+
+ aos::Fetcher<frc971::control_loops::drivetrain::Status> status_fetcher_;
+};
+
+// Tests that we do a good job of trusting the IMU when the wheels are spinning
+// and the actual robot is not moving.
+TEST_F(DrivetrainReplayTest, SpinningWheels) {
+ reader_.event_loop_factory()->Run();
+
+ ASSERT_TRUE(status_fetcher_.Fetch());
+ ASSERT_TRUE(status_fetcher_->has_x());
+ ASSERT_TRUE(status_fetcher_->has_y());
+ ASSERT_TRUE(status_fetcher_->has_theta());
+ EXPECT_LT(std::abs(status_fetcher_->x()), 0.1);
+ // Because the encoders should not be affecting the y or yaw axes, expect a
+ // reasonably precise result (although, since this is a real worl dtest, the
+ // robot probably did actually move be some non-zero amount).
+ EXPECT_LT(std::abs(status_fetcher_->y()), 0.05);
+ EXPECT_LT(std::abs(status_fetcher_->theta()), 0.02);
+}
+
+} // namespace testing
+} // namespace drivetrain
+} // namespace control_loops
+} // namespace y2020
diff --git a/y2020/control_loops/drivetrain/drivetrain_simulation_config.json b/y2020/control_loops/drivetrain/drivetrain_simulation_config.json
new file mode 100644
index 0000000..f4bfbea
--- /dev/null
+++ b/y2020/control_loops/drivetrain/drivetrain_simulation_config.json
@@ -0,0 +1,6 @@
+{
+ "imports": [
+ "../../y2020.json",
+ "../../../frc971/control_loops/drivetrain/drivetrain_simulation_channels.json"
+ ]
+}
diff --git a/y2020/control_loops/drivetrain/localizer.cc b/y2020/control_loops/drivetrain/localizer.cc
new file mode 100644
index 0000000..a9e454a
--- /dev/null
+++ b/y2020/control_loops/drivetrain/localizer.cc
@@ -0,0 +1,182 @@
+#include "y2020/control_loops/drivetrain/localizer.h"
+
+#include "y2020/constants.h"
+
+namespace y2020 {
+namespace control_loops {
+namespace drivetrain {
+
+namespace {
+// Converts a flatbuffer TransformationMatrix to an Eigen matrix. Technically,
+// this should be able to do a single memcpy, but the extra verbosity here seems
+// appropriate.
+Eigen::Matrix<double, 4, 4> FlatbufferToTransformationMatrix(
+ const frc971::vision::sift::TransformationMatrix &flatbuffer) {
+ CHECK_EQ(16u, CHECK_NOTNULL(flatbuffer.data())->size());
+ Eigen::Matrix<double, 4, 4> result;
+ result.setIdentity();
+ for (int row = 0; row < 4; ++row) {
+ for (int col = 0; col < 4; ++col) {
+ result(row, col) = (*flatbuffer.data())[row * 4 + col];
+ }
+ }
+ return result;
+}
+} // namespace
+
+Localizer::Localizer(
+ aos::EventLoop *event_loop,
+ const frc971::control_loops::drivetrain::DrivetrainConfig<double>
+ &dt_config)
+ : event_loop_(event_loop), dt_config_(dt_config), ekf_(dt_config) {
+ // TODO(james): This doesn't really need to be a watcher; we could just use a
+ // fetcher for the superstructure status.
+ // This probably should be a Fetcher instead of a Watcher, but this
+ // seems simpler for the time being (although technically it should be
+ // possible to do everything we need to using just a Fetcher without
+ // even maintaining a separate buffer, but that seems overly cute).
+ event_loop_->MakeWatcher("/superstructure",
+ [this](const superstructure::Status &status) {
+ HandleSuperstructureStatus(status);
+ });
+
+ image_fetchers_.emplace_back(
+ event_loop_->MakeFetcher<frc971::vision::sift::ImageMatchResult>(
+ "/camera"));
+
+ target_selector_.set_has_target(false);
+}
+
+void Localizer::HandleSuperstructureStatus(
+ const y2020::control_loops::superstructure::Status &status) {
+ CHECK(status.has_turret());
+ turret_data_.Push({event_loop_->monotonic_now(), status.turret()->position(),
+ status.turret()->velocity()});
+}
+
+Localizer::TurretData Localizer::GetTurretDataForTime(
+ aos::monotonic_clock::time_point time) {
+ if (turret_data_.empty()) {
+ return {};
+ }
+
+ aos::monotonic_clock::duration lowest_time_error =
+ aos::monotonic_clock::duration::max();
+ TurretData best_data_match;
+ for (const auto &sample : turret_data_) {
+ const aos::monotonic_clock::duration time_error =
+ std::chrono::abs(sample.receive_time - time);
+ if (time_error < lowest_time_error) {
+ lowest_time_error = time_error;
+ best_data_match = sample;
+ }
+ }
+ return best_data_match;
+}
+
+void Localizer::Update(const ::Eigen::Matrix<double, 2, 1> &U,
+ aos::monotonic_clock::time_point now,
+ double left_encoder, double right_encoder,
+ double gyro_rate, const Eigen::Vector3d &accel) {
+ for (auto &image_fetcher : image_fetchers_) {
+ while (image_fetcher.FetchNext()) {
+ HandleImageMatch(*image_fetcher);
+ }
+ }
+ ekf_.UpdateEncodersAndGyro(left_encoder, right_encoder, gyro_rate, U, accel,
+ now);
+}
+
+void Localizer::HandleImageMatch(
+ const frc971::vision::sift::ImageMatchResult &result) {
+ // TODO(james): compensate for capture time across multiple nodes correctly.
+ aos::monotonic_clock::time_point capture_time(
+ std::chrono::nanoseconds(result.image_monotonic_timestamp_ns()));
+ CHECK(result.has_camera_calibration());
+ // Per the ImageMatchResult specification, we can actually determine whether
+ // the camera is the turret camera just from the presence of the
+ // turret_extrinsics member.
+ const bool is_turret = result.camera_calibration()->has_turret_extrinsics();
+ const TurretData turret_data = GetTurretDataForTime(capture_time);
+ // Ignore readings when the turret is spinning too fast, on the assumption
+ // that the odds of screwing up the time compensation are higher.
+ // Note that the current number here is chosen pretty arbitrarily--1 rad / sec
+ // seems reasonable, but may be unnecessarily low or high.
+ constexpr double kMaxTurretVelocity = 1.0;
+ if (is_turret && std::abs(turret_data.velocity) > kMaxTurretVelocity) {
+ return;
+ }
+ CHECK(result.camera_calibration()->has_fixed_extrinsics());
+ const Eigen::Matrix<double, 4, 4> fixed_extrinsics =
+ FlatbufferToTransformationMatrix(
+ *result.camera_calibration()->fixed_extrinsics());
+ // Calculate the pose of the camera relative to the robot origin.
+ Eigen::Matrix<double, 4, 4> H_camera_robot = fixed_extrinsics;
+ if (is_turret) {
+ H_camera_robot = H_camera_robot *
+ frc971::control_loops::TransformationMatrixForYaw(
+ turret_data.position) *
+ FlatbufferToTransformationMatrix(
+ *result.camera_calibration()->turret_extrinsics());
+ }
+
+ if (!result.has_camera_poses()) {
+ return;
+ }
+
+ for (const frc971::vision::sift::CameraPose *vision_result :
+ *result.camera_poses()) {
+ if (!vision_result->has_camera_to_target() ||
+ !vision_result->has_field_to_target()) {
+ continue;
+ }
+ const Eigen::Matrix<double, 4, 4> H_target_camera =
+ FlatbufferToTransformationMatrix(*vision_result->camera_to_target());
+ const Eigen::Matrix<double, 4, 4> H_target_field =
+ FlatbufferToTransformationMatrix(*vision_result->field_to_target());
+ // Back out the robot position that is implied by the current camera
+ // reading.
+ const Pose measured_pose(H_target_field *
+ (H_camera_robot * H_target_camera).inverse());
+ const Eigen::Matrix<double, 3, 1> Z(measured_pose.rel_pos().x(),
+ measured_pose.rel_pos().y(),
+ measured_pose.rel_theta());
+ // TODO(james): Figure out how to properly handle calculating the
+ // noise. Currently, the values are deliberately tuned so that image updates
+ // will not be trusted overly much. In theory, we should probably also be
+ // populating some cross-correlation terms.
+ // Note that these are the noise standard deviations (they are squared below
+ // to get variances).
+ Eigen::Matrix<double, 3, 1> noises(1.0, 1.0, 0.1);
+ // Augment the noise by the approximate rotational speed of the
+ // camera. This should help account for the fact that, while we are
+ // spinning, slight timing errors in the camera/turret data will tend to
+ // have mutch more drastic effects on the results.
+ noises *= 1.0 + std::abs((right_velocity() - left_velocity()) /
+ (2.0 * dt_config_.robot_radius) +
+ (is_turret ? turret_data.velocity : 0.0));
+ Eigen::Matrix3d R = Eigen::Matrix3d::Zero();
+ R.diagonal() = noises.cwiseAbs2();
+ Eigen::Matrix<double, HybridEkf::kNOutputs, HybridEkf::kNStates> H;
+ H.setZero();
+ H(0, StateIdx::kX) = 1;
+ H(1, StateIdx::kY) = 1;
+ H(2, StateIdx::kTheta) = 1;
+ ekf_.Correct(Z, nullptr, {}, [H, Z](const State &X, const Input &) {
+ Eigen::Vector3d Zhat = H * X;
+ // In order to deal with wrapping of the
+ // angle, calculate an expected angle that is
+ // in the range (Z(2) - pi, Z(2) + pi].
+ const double angle_error =
+ aos::math::NormalizeAngle(
+ X(StateIdx::kTheta) - Z(2));
+ Zhat(2) = Z(2) + angle_error;
+ return Zhat;
+ },
+ [H](const State &) { return H; }, R, capture_time);
+ }
+}
+
+} // namespace drivetrain
+} // namespace control_loops
+} // namespace y2020
diff --git a/y2020/control_loops/drivetrain/localizer.h b/y2020/control_loops/drivetrain/localizer.h
new file mode 100644
index 0000000..0b79361
--- /dev/null
+++ b/y2020/control_loops/drivetrain/localizer.h
@@ -0,0 +1,102 @@
+#ifndef Y2020_CONTROL_LOOPS_DRIVETRAIN_LOCALIZER_H_
+#define Y2020_CONTROL_LOOPS_DRIVETRAIN_LOCALIZER_H_
+
+#include "aos/containers/ring_buffer.h"
+#include "aos/events/event_loop.h"
+#include "frc971/control_loops/drivetrain/hybrid_ekf.h"
+#include "frc971/control_loops/drivetrain/localizer.h"
+#include "y2020/control_loops/superstructure/superstructure_status_generated.h"
+#include "y2020/vision/sift/sift_generated.h"
+
+namespace y2020 {
+namespace control_loops {
+namespace drivetrain {
+
+// This class handles the localization for the 2020 robot. In order to handle
+// camera updates, we get the ImageMatchResult message from the cameras and then
+// project the result onto the 2-D X/Y plane and use the implied robot
+// position/heading from that as the measurement. This is distinct from 2019,
+// when we used a heading/distance/skew measurement update. This is because
+// updating with x/y/theta directly seems to be better conditioned (even if it
+// may not reflect the measurement noise quite as accurately). The poor
+// conditioning seemed to work in 2019, but due to the addition of a couple of
+// velocity offset states that allow us to use the accelerometer more
+// effectively, things started to become unstable.
+class Localizer : public frc971::control_loops::drivetrain::LocalizerInterface {
+ public:
+ typedef frc971::control_loops::TypedPose<double> Pose;
+ typedef frc971::control_loops::drivetrain::HybridEkf<double> HybridEkf;
+ typedef typename HybridEkf::State State;
+ typedef typename HybridEkf::StateIdx StateIdx;
+ typedef typename HybridEkf::StateSquare StateSquare;
+ typedef typename HybridEkf::Input Input;
+ typedef typename HybridEkf::Output Output;
+ Localizer(aos::EventLoop *event_loop,
+ const frc971::control_loops::drivetrain::DrivetrainConfig<double>
+ &dt_config);
+ State Xhat() const override { return ekf_.X_hat(); }
+ frc971::control_loops::drivetrain::TrivialTargetSelector *target_selector()
+ override {
+ return &target_selector_;
+ }
+
+ void Update(const ::Eigen::Matrix<double, 2, 1> &U,
+ aos::monotonic_clock::time_point now, double left_encoder,
+ double right_encoder, double gyro_rate,
+ const Eigen::Vector3d &accel) override;
+
+ void Reset(aos::monotonic_clock::time_point t, const State &state) {
+ ekf_.ResetInitialState(t, state, ekf_.P());
+ }
+
+ void ResetPosition(aos::monotonic_clock::time_point t, double x, double y,
+ double theta, double /*theta_override*/,
+ bool /*reset_theta*/) override {
+ const double left_encoder = ekf_.X_hat(StateIdx::kLeftEncoder);
+ const double right_encoder = ekf_.X_hat(StateIdx::kRightEncoder);
+ ekf_.ResetInitialState(t, (Ekf::State() << x, y, theta, left_encoder, 0,
+ right_encoder, 0, 0, 0, 0, 0, 0).finished(),
+ ekf_.P());
+ };
+
+ private:
+ // Storage for a single turret position data point.
+ struct TurretData {
+ aos::monotonic_clock::time_point receive_time =
+ aos::monotonic_clock::min_time;
+ double position = 0.0; // rad
+ double velocity = 0.0; // rad/sec
+ };
+
+ // Processes new image data and updates the EKF.
+ void HandleImageMatch(const frc971::vision::sift::ImageMatchResult &result);
+
+ // Processes the most recent turret position and stores it in the turret_data_
+ // buffer.
+ void HandleSuperstructureStatus(
+ const y2020::control_loops::superstructure::Status &status);
+
+ // Retrieves the turret data closest to the provided time.
+ TurretData GetTurretDataForTime(aos::monotonic_clock::time_point time);
+
+ aos::EventLoop *const event_loop_;
+ const frc971::control_loops::drivetrain::DrivetrainConfig<double> dt_config_;
+ HybridEkf ekf_;
+
+ std::vector<aos::Fetcher<frc971::vision::sift::ImageMatchResult>>
+ image_fetchers_;
+
+ // Buffer of recent turret data--this is used so that when we receive a camera
+ // frame from the turret, we can back out what the turret angle was at that
+ // time.
+ aos::RingBuffer<TurretData, 200> turret_data_;
+
+ // Target selector to allow us to satisfy the LocalizerInterface requirements.
+ frc971::control_loops::drivetrain::TrivialTargetSelector target_selector_;
+};
+
+} // namespace drivetrain
+} // namespace control_loops
+} // namespace y2020
+
+#endif // Y2020_CONTROL_LOOPS_DRIVETRAIN_LOCALIZER_H_
diff --git a/y2020/control_loops/drivetrain/localizer_test.cc b/y2020/control_loops/drivetrain/localizer_test.cc
new file mode 100644
index 0000000..3be86da
--- /dev/null
+++ b/y2020/control_loops/drivetrain/localizer_test.cc
@@ -0,0 +1,497 @@
+#include <queue>
+
+#include "gtest/gtest.h"
+
+#include "aos/controls/control_loop_test.h"
+#include "aos/events/logging/logger.h"
+#include "aos/network/team_number.h"
+#include "frc971/control_loops/drivetrain/drivetrain.h"
+#include "frc971/control_loops/drivetrain/drivetrain_test_lib.h"
+#include "frc971/control_loops/team_number_test_environment.h"
+#include "y2020/control_loops/drivetrain/drivetrain_base.h"
+#include "y2020/control_loops/drivetrain/localizer.h"
+#include "y2020/control_loops/superstructure/superstructure_status_generated.h"
+
+DEFINE_string(output_file, "",
+ "If set, logs all channels to the provided logfile.");
+
+// This file tests that the full 2020 localizer behaves sanely.
+
+namespace y2020 {
+namespace control_loops {
+namespace drivetrain {
+namespace testing {
+
+using frc971::control_loops::drivetrain::DrivetrainConfig;
+using frc971::control_loops::drivetrain::Goal;
+using frc971::control_loops::drivetrain::LocalizerControl;
+using frc971::vision::sift::ImageMatchResult;
+using frc971::vision::sift::ImageMatchResultT;
+using frc971::vision::sift::CameraPoseT;
+using frc971::vision::sift::CameraCalibrationT;
+using frc971::vision::sift::TransformationMatrixT;
+
+namespace {
+DrivetrainConfig<double> GetTest2020DrivetrainConfig() {
+ DrivetrainConfig<double> config = GetDrivetrainConfig();
+ return config;
+}
+
+// Copies an Eigen matrix into a row-major vector of the data.
+std::vector<float> MatrixToVector(const Eigen::Matrix<double, 4, 4> &H) {
+ std::vector<float> data;
+ for (int row = 0; row < 4; ++row) {
+ for (int col = 0; col < 4; ++col) {
+ data.push_back(H(row, col));
+ }
+ }
+ return data;
+}
+
+// Provides the location of the turret to use for simulation. Mostly we care
+// about providing a location that is not perfectly aligned with the robot's
+// origin.
+Eigen::Matrix<double, 4, 4> TurretRobotTransformation() {
+ Eigen::Matrix<double, 4, 4> H;
+ H.setIdentity();
+ H.block<3, 1>(0, 3) << 1, 1.1, 0.9;
+ return H;
+}
+
+// Provides the location of the camera on the turret.
+// TODO(james): Also simulate a fixed camera that is *not* on the turret.
+Eigen::Matrix<double, 4, 4> CameraTurretTransformation() {
+ Eigen::Matrix<double, 4, 4> H;
+ H.setIdentity();
+ H.block<3, 1>(0, 3) << 0.1, 0, 0;
+ // Introduce a bit of pitch to make sure that we're exercising all the code.
+ H.block<3, 3>(0, 0) =
+ Eigen::AngleAxis<double>(0.1, Eigen::Vector3d::UnitY()) *
+ H.block<3, 3>(0, 0);
+ return H;
+}
+
+// The absolute target location to use. Not meant to correspond with a
+// particular field target.
+// TODO(james): Make more targets.
+std::vector<Eigen::Matrix<double, 4, 4>> TargetLocations() {
+ std::vector<Eigen::Matrix<double, 4, 4>> locations;
+ Eigen::Matrix<double, 4, 4> H;
+ H.setIdentity();
+ H.block<3, 1>(0, 3) << 10.0, 0, 0;
+ locations.push_back(H);
+ H.block<3, 1>(0, 3) << -10.0, 0, 0;
+ locations.push_back(H);
+ return locations;
+}
+} // namespace
+
+namespace chrono = std::chrono;
+using frc971::control_loops::drivetrain::testing::DrivetrainSimulation;
+using frc971::control_loops::drivetrain::DrivetrainLoop;
+using frc971::control_loops::drivetrain::testing::GetTestDrivetrainConfig;
+using aos::monotonic_clock;
+
+class LocalizedDrivetrainTest : public aos::testing::ControlLoopTest {
+ protected:
+ // We must use the 2020 drivetrain config so that we don't have to deal
+ // with shifting:
+ LocalizedDrivetrainTest()
+ : aos::testing::ControlLoopTest(
+ aos::configuration::ReadConfig(
+ "y2020/control_loops/drivetrain/simulation_config.json"),
+ GetTest2020DrivetrainConfig().dt),
+ test_event_loop_(MakeEventLoop("test")),
+ drivetrain_goal_sender_(
+ test_event_loop_->MakeSender<Goal>("/drivetrain")),
+ drivetrain_goal_fetcher_(
+ test_event_loop_->MakeFetcher<Goal>("/drivetrain")),
+ localizer_control_sender_(
+ test_event_loop_->MakeSender<LocalizerControl>("/drivetrain")),
+ superstructure_status_sender_(
+ test_event_loop_->MakeSender<superstructure::Status>(
+ "/superstructure")),
+ drivetrain_event_loop_(MakeEventLoop("drivetrain")),
+ dt_config_(GetTest2020DrivetrainConfig()),
+ camera_sender_(
+ test_event_loop_->MakeSender<ImageMatchResult>("/camera")),
+ localizer_(drivetrain_event_loop_.get(), dt_config_),
+ drivetrain_(dt_config_, drivetrain_event_loop_.get(), &localizer_),
+ drivetrain_plant_event_loop_(MakeEventLoop("plant")),
+ drivetrain_plant_(drivetrain_plant_event_loop_.get(), dt_config_),
+ last_frame_(monotonic_now()) {
+ set_team_id(frc971::control_loops::testing::kTeamNumber);
+ SetStartingPosition({3.0, 2.0, 0.0});
+ set_battery_voltage(12.0);
+
+ if (!FLAGS_output_file.empty()) {
+ log_buffer_writer_ = std::make_unique<aos::logger::DetachedBufferWriter>(
+ FLAGS_output_file);
+ logger_event_loop_ = MakeEventLoop("logger");
+ logger_ = std::make_unique<aos::logger::Logger>(log_buffer_writer_.get(),
+ logger_event_loop_.get());
+ }
+
+ test_event_loop_->MakeWatcher(
+ "/drivetrain",
+ [this](const frc971::control_loops::drivetrain::Status &) {
+ // Needs to do camera updates right after we run the control loop.
+ if (enable_cameras_) {
+ SendDelayedFrames();
+ if (last_frame_ + std::chrono::milliseconds(100) <
+ monotonic_now()) {
+ CaptureFrames();
+ last_frame_ = monotonic_now();
+ }
+ }
+ });
+
+ test_event_loop_->AddPhasedLoop(
+ [this](int) {
+ // Also use the opportunity to send out turret messages.
+ UpdateTurretPosition();
+ auto builder = superstructure_status_sender_.MakeBuilder();
+ auto turret_builder =
+ builder
+ .MakeBuilder<frc971::control_loops::
+ PotAndAbsoluteEncoderProfiledJointStatus>();
+ turret_builder.add_position(turret_position_);
+ turret_builder.add_velocity(turret_velocity_);
+ auto turret_offset = turret_builder.Finish();
+ auto status_builder = builder.MakeBuilder<superstructure::Status>();
+ status_builder.add_turret(turret_offset);
+ builder.Send(status_builder.Finish());
+ },
+ chrono::milliseconds(5));
+
+ // Run for enough time to allow the gyro/imu zeroing code to run.
+ RunFor(std::chrono::seconds(10));
+ }
+
+ virtual ~LocalizedDrivetrainTest() override {}
+
+ void SetStartingPosition(const Eigen::Matrix<double, 3, 1> &xytheta) {
+ *drivetrain_plant_.mutable_state() << xytheta.x(), xytheta.y(),
+ xytheta(2, 0), 0.0, 0.0;
+ Eigen::Matrix<double, Localizer::HybridEkf::kNStates, 1> localizer_state;
+ localizer_state.setZero();
+ localizer_state.block<3, 1>(0, 0) = xytheta;
+ localizer_.Reset(monotonic_now(), localizer_state);
+ }
+
+ void VerifyNearGoal(double eps = 1e-3) {
+ drivetrain_goal_fetcher_.Fetch();
+ EXPECT_NEAR(drivetrain_goal_fetcher_->left_goal(),
+ drivetrain_plant_.GetLeftPosition(), eps);
+ EXPECT_NEAR(drivetrain_goal_fetcher_->right_goal(),
+ drivetrain_plant_.GetRightPosition(), eps);
+ }
+
+ ::testing::AssertionResult IsNear(double expected, double actual,
+ double epsilon) {
+ if (std::abs(expected - actual) < epsilon) {
+ return ::testing::AssertionSuccess();
+ } else {
+ return ::testing::AssertionFailure()
+ << "Expected " << expected << " but got " << actual
+ << " with a max difference of " << epsilon
+ << " and an actual difference of " << std::abs(expected - actual);
+ }
+ }
+ ::testing::AssertionResult VerifyEstimatorAccurate(double eps) {
+ const Eigen::Matrix<double, 5, 1> true_state = drivetrain_plant_.state();
+ ::testing::AssertionResult result(true);
+ if (!(result = IsNear(localizer_.x(), true_state(0), eps))) {
+ return result;
+ }
+ if (!(result = IsNear(localizer_.y(), true_state(1), eps))) {
+ return result;
+ }
+ if (!(result = IsNear(localizer_.theta(), true_state(2), eps))) {
+ return result;
+ }
+ if (!(result = IsNear(localizer_.left_velocity(), true_state(3), eps))) {
+ return result;
+ }
+ if (!(result = IsNear(localizer_.right_velocity(), true_state(4), eps))) {
+ return result;
+ }
+ return result;
+ }
+
+ // Goes through and captures frames on the camera(s), queueing them up to be
+ // sent by SendDelayedFrames().
+ void CaptureFrames() {
+ const frc971::control_loops::Pose robot_pose(
+ {drivetrain_plant_.GetPosition().x(),
+ drivetrain_plant_.GetPosition().y(), 0.0},
+ drivetrain_plant_.state()(2, 0));
+ std::unique_ptr<ImageMatchResultT> frame(new ImageMatchResultT());
+
+ for (const auto &H_target_field : TargetLocations()) {
+ std::unique_ptr<CameraPoseT> camera_target(new CameraPoseT());
+
+ camera_target->field_to_target.reset(new TransformationMatrixT());
+ camera_target->field_to_target->data = MatrixToVector(H_target_field);
+
+ Eigen::Matrix<double, 4, 4> H_camera_turret =
+ Eigen::Matrix<double, 4, 4>::Identity();
+ if (is_turreted_) {
+ H_camera_turret = frc971::control_loops::TransformationMatrixForYaw(
+ turret_position_) *
+ CameraTurretTransformation();
+ }
+
+ // TODO(james): Use non-zero turret angles.
+ camera_target->camera_to_target.reset(new TransformationMatrixT());
+ camera_target->camera_to_target->data = MatrixToVector(
+ (robot_pose.AsTransformationMatrix() * TurretRobotTransformation() *
+ H_camera_turret).inverse() *
+ H_target_field);
+
+ frame->camera_poses.emplace_back(std::move(camera_target));
+ }
+
+ frame->image_monotonic_timestamp_ns =
+ chrono::duration_cast<chrono::nanoseconds>(
+ monotonic_now().time_since_epoch())
+ .count();
+ frame->camera_calibration.reset(new CameraCalibrationT());
+ {
+ frame->camera_calibration->fixed_extrinsics.reset(
+ new TransformationMatrixT());
+ TransformationMatrixT *H_turret_robot =
+ frame->camera_calibration->fixed_extrinsics.get();
+ H_turret_robot->data = MatrixToVector(TurretRobotTransformation());
+ }
+
+ if (is_turreted_) {
+ frame->camera_calibration->turret_extrinsics.reset(
+ new TransformationMatrixT());
+ TransformationMatrixT *H_camera_turret =
+ frame->camera_calibration->turret_extrinsics.get();
+ H_camera_turret->data = MatrixToVector(CameraTurretTransformation());
+ }
+
+ camera_delay_queue_.emplace(monotonic_now(), std::move(frame));
+ }
+
+ // Actually sends out all the camera frames.
+ void SendDelayedFrames() {
+ const std::chrono::milliseconds camera_latency(150);
+ while (!camera_delay_queue_.empty() &&
+ std::get<0>(camera_delay_queue_.front()) <
+ monotonic_now() - camera_latency) {
+ auto builder = camera_sender_.MakeBuilder();
+ ASSERT_TRUE(builder.Send(ImageMatchResult::Pack(
+ *builder.fbb(), std::get<1>(camera_delay_queue_.front()).get())));
+ camera_delay_queue_.pop();
+ }
+ }
+
+ std::unique_ptr<aos::EventLoop> test_event_loop_;
+ aos::Sender<Goal> drivetrain_goal_sender_;
+ aos::Fetcher<Goal> drivetrain_goal_fetcher_;
+ aos::Sender<LocalizerControl> localizer_control_sender_;
+ aos::Sender<superstructure::Status> superstructure_status_sender_;
+
+ std::unique_ptr<aos::EventLoop> drivetrain_event_loop_;
+ const frc971::control_loops::drivetrain::DrivetrainConfig<double>
+ dt_config_;
+
+ aos::Sender<ImageMatchResult> camera_sender_;
+
+ Localizer localizer_;
+ DrivetrainLoop drivetrain_;
+
+ std::unique_ptr<aos::EventLoop> drivetrain_plant_event_loop_;
+ DrivetrainSimulation drivetrain_plant_;
+ monotonic_clock::time_point last_frame_;
+
+ // A queue of camera frames so that we can add a time delay to the data
+ // coming from the cameras.
+ std::queue<std::tuple<aos::monotonic_clock::time_point,
+ std::unique_ptr<ImageMatchResultT>>>
+ camera_delay_queue_;
+
+ void set_enable_cameras(bool enable) { enable_cameras_ = enable; }
+ void set_camera_is_turreted(bool turreted) { is_turreted_ = turreted; }
+
+ void set_turret(double position, double velocity) {
+ turret_position_ = position;
+ turret_velocity_ = velocity;
+ }
+
+ void SendGoal(double left, double right) {
+ auto builder = drivetrain_goal_sender_.MakeBuilder();
+
+ Goal::Builder drivetrain_builder = builder.MakeBuilder<Goal>();
+ drivetrain_builder.add_controller_type(
+ frc971::control_loops::drivetrain::ControllerType::MOTION_PROFILE);
+ drivetrain_builder.add_left_goal(left);
+ drivetrain_builder.add_right_goal(right);
+
+ EXPECT_TRUE(builder.Send(drivetrain_builder.Finish()));
+ }
+
+ private:
+ void UpdateTurretPosition() {
+ turret_position_ +=
+ turret_velocity_ *
+ aos::time::DurationInSeconds(monotonic_now() - last_turret_update_);
+ last_turret_update_ = monotonic_now();
+ }
+
+ bool enable_cameras_ = false;
+ // Whether to make the camera be on the turret or not.
+ bool is_turreted_ = true;
+
+ // The time at which we last incremented turret_position_.
+ monotonic_clock::time_point last_turret_update_ = monotonic_clock::min_time;
+ // Current turret position and velocity. These are set directly by the user in
+ // the test, and if velocity is non-zero, then we will automatically increment
+ // turret_position_ with every timestep.
+ double turret_position_ = 0.0; // rad
+ double turret_velocity_ = 0.0; // rad / sec
+
+ std::unique_ptr<aos::EventLoop> logger_event_loop_;
+ std::unique_ptr<aos::logger::DetachedBufferWriter> log_buffer_writer_;
+ std::unique_ptr<aos::logger::Logger> logger_;
+};
+
+// Tests that no camera updates, combined with a perfect model, results in no
+// error.
+TEST_F(LocalizedDrivetrainTest, NoCameraUpdate) {
+ SetEnabled(true);
+ set_enable_cameras(false);
+ EXPECT_TRUE(VerifyEstimatorAccurate(1e-7));
+
+ SendGoal(-1.0, 1.0);
+
+ RunFor(chrono::seconds(3));
+ VerifyNearGoal();
+ EXPECT_TRUE(VerifyEstimatorAccurate(5e-4));
+}
+
+// Tests that camera updates with a perfect models results in no errors.
+TEST_F(LocalizedDrivetrainTest, PerfectCameraUpdate) {
+ SetEnabled(true);
+ set_enable_cameras(true);
+ set_camera_is_turreted(true);
+
+ SendGoal(-1.0, 1.0);
+
+ RunFor(chrono::seconds(3));
+ VerifyNearGoal();
+ EXPECT_TRUE(VerifyEstimatorAccurate(5e-4));
+}
+
+// Tests that camera updates with a constant initial error in the position
+// results in convergence.
+TEST_F(LocalizedDrivetrainTest, InitialPositionError) {
+ SetEnabled(true);
+ set_enable_cameras(true);
+ set_camera_is_turreted(true);
+ drivetrain_plant_.mutable_state()->topRows(3) +=
+ Eigen::Vector3d(0.1, 0.1, 0.01);
+
+ // Confirm that some translational movement does get handled correctly.
+ SendGoal(-0.9, 1.0);
+
+ // Give the filters enough time to converge.
+ RunFor(chrono::seconds(10));
+ VerifyNearGoal(5e-3);
+ EXPECT_TRUE(VerifyEstimatorAccurate(1e-2));
+}
+
+// Tests that camera updates using a non-turreted camera work.
+TEST_F(LocalizedDrivetrainTest, InitialPositionErrorNoTurret) {
+ SetEnabled(true);
+ set_enable_cameras(true);
+ set_camera_is_turreted(false);
+ drivetrain_plant_.mutable_state()->topRows(3) +=
+ Eigen::Vector3d(0.1, 0.1, 0.01);
+
+ SendGoal(-1.0, 1.0);
+
+ // Give the filters enough time to converge.
+ RunFor(chrono::seconds(10));
+ VerifyNearGoal(5e-3);
+ EXPECT_TRUE(VerifyEstimatorAccurate(1e-2));
+}
+
+// Tests that we are able to handle a constant, non-zero turret angle.
+TEST_F(LocalizedDrivetrainTest, NonZeroTurret) {
+ SetEnabled(true);
+ set_enable_cameras(true);
+ set_camera_is_turreted(true);
+ set_turret(1.0, 0.0);
+ drivetrain_plant_.mutable_state()->topRows(3) +=
+ Eigen::Vector3d(0.1, 0.1, 0.0);
+
+ SendGoal(-1.0, 1.0);
+
+ // Give the filters enough time to converge.
+ RunFor(chrono::seconds(10));
+ VerifyNearGoal(5e-3);
+ EXPECT_TRUE(VerifyEstimatorAccurate(1e-3));
+}
+
+// Tests that we are able to handle a constant velocity turret.
+TEST_F(LocalizedDrivetrainTest, MovingTurret) {
+ SetEnabled(true);
+ set_enable_cameras(true);
+ set_camera_is_turreted(true);
+ set_turret(0.0, 0.2);
+ drivetrain_plant_.mutable_state()->topRows(3) +=
+ Eigen::Vector3d(0.1, 0.1, 0.0);
+
+ SendGoal(-1.0, 1.0);
+
+ // Give the filters enough time to converge.
+ RunFor(chrono::seconds(10));
+ VerifyNearGoal(5e-3);
+ EXPECT_TRUE(VerifyEstimatorAccurate(1e-3));
+}
+
+// Tests that we reject camera measurements when the turret is spinning too
+// fast.
+TEST_F(LocalizedDrivetrainTest, TooFastTurret) {
+ SetEnabled(true);
+ set_enable_cameras(true);
+ set_camera_is_turreted(true);
+ set_turret(0.0, -10.0);
+ const Eigen::Vector3d disturbance(0.1, 0.1, 0.0);
+ drivetrain_plant_.mutable_state()->topRows(3) += disturbance;
+
+ SendGoal(-1.0, 1.0);
+
+ RunFor(chrono::seconds(10));
+ EXPECT_FALSE(VerifyEstimatorAccurate(1e-3));
+ // If we remove the disturbance, we should now be correct.
+ drivetrain_plant_.mutable_state()->topRows(3) -= disturbance;
+ VerifyNearGoal(5e-3);
+ EXPECT_TRUE(VerifyEstimatorAccurate(1e-3));
+}
+
+// Tests that we don't reject camera measurements when the turret is spinning
+// too fast but we aren't using a camera attached to the turret.
+TEST_F(LocalizedDrivetrainTest, TooFastTurretDoesntAffectFixedCamera) {
+ SetEnabled(true);
+ set_enable_cameras(true);
+ set_camera_is_turreted(false);
+ set_turret(0.0, -10.0);
+ const Eigen::Vector3d disturbance(0.1, 0.1, 0.0);
+ drivetrain_plant_.mutable_state()->topRows(3) += disturbance;
+
+ SendGoal(-1.0, 1.0);
+
+ RunFor(chrono::seconds(10));
+ VerifyNearGoal(5e-3);
+ EXPECT_TRUE(VerifyEstimatorAccurate(1e-3));
+}
+
+} // namespace testing
+} // namespace drivetrain
+} // namespace control_loops
+} // namespace y2020
diff --git a/y2020/vision/sift/BUILD b/y2020/vision/sift/BUILD
index 5cfb6aa..02507a9 100644
--- a/y2020/vision/sift/BUILD
+++ b/y2020/vision/sift/BUILD
@@ -1,5 +1,5 @@
load(":fast_gaussian.bzl", "fast_gaussian")
-load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library", "flatbuffer_py_library")
+load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library", "flatbuffer_py_library", "flatbuffer_ts_library")
cc_binary(
name = "fast_gaussian_generator",
@@ -204,6 +204,7 @@
"TrainingImage",
"TrainingData",
],
+ visibility = ["//visibility:public"],
)
flatbuffer_cc_library(
@@ -213,6 +214,12 @@
visibility = ["//visibility:public"],
)
+flatbuffer_ts_library(
+ name = "sift_ts_fbs",
+ srcs = ["sift.fbs"],
+ visibility = ["//y2020:__subpackages__"],
+)
+
flatbuffer_cc_library(
name = "sift_training_fbs",
srcs = ["sift_training.fbs"],
diff --git a/y2020/vision/tools/python_code/BUILD b/y2020/vision/tools/python_code/BUILD
new file mode 100644
index 0000000..670b664
--- /dev/null
+++ b/y2020/vision/tools/python_code/BUILD
@@ -0,0 +1,49 @@
+load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library", "flatbuffer_py_library")
+
+py_binary(
+ name = "load_sift_training",
+ data = [
+ ":test_images/train_power_port_red.png",
+ ":test_images/train_power_port_red_webcam.png",
+ ":test_images/train_power_port_blue.png",
+ ":test_images/train_loading_bay_red.png",
+ ":test_images/train_loading_bay_blue.png",
+ ],
+ srcs = ["load_sift_training.py",
+ "camera_definition.py",
+ "define_training_data.py",
+ "target_definition.py",
+ "train_and_match.py",
+ ],
+ args = ["sift_training_data.h",
+ ],
+ default_python_version = "PY3",
+ srcs_version = "PY2AND3",
+ deps = [
+ "//y2020/vision/sift:sift_fbs_python",
+ "@opencv_contrib_nonfree_amd64//:python_opencv",
+ "@bazel_tools//tools/python/runfiles",
+ ],
+)
+
+genrule(
+ name = "run_load_sift_training",
+ outs = [
+ "sift_training_data.h",
+ ],
+ cmd = " ".join([
+ "$(location :load_sift_training)",
+ "$(location sift_training_data.h)",
+ ]),
+ tools = [
+ ":load_sift_training",
+ ],
+)
+
+cc_library(
+ name = "sift_training",
+ hdrs = [
+ "sift_training_data.h",
+ ],
+ visibility = ["//visibility:public"],
+)
diff --git a/y2020/vision/tools/python_code/camera_definition.py b/y2020/vision/tools/python_code/camera_definition.py
new file mode 100644
index 0000000..59f008f
--- /dev/null
+++ b/y2020/vision/tools/python_code/camera_definition.py
@@ -0,0 +1,57 @@
+import argparse
+import cv2
+import json
+import math
+import numpy as np
+import time
+
+
+class CameraIntrinsics:
+ def __init__(self):
+ self.camera_matrix = []
+ self.distortion_coeffs = []
+
+ pass
+
+class CameraExtrinsics:
+ def __init__(self):
+ self.R = []
+ self.T = []
+
+ pass
+
+class CameraParameters:
+ def __init__(self):
+ self.camera_int = CameraIntrinsics()
+ self.camera_ext = CameraExtrinsics()
+
+ pass
+
+
+### CAMERA DEFINITIONS
+
+# Robot camera has:
+# FOV_H = 93.*math.pi()/180.
+# FOV_V = 70.*math.pi()/180.
+
+# Create fake camera (based on USB webcam params)
+fx = 810.
+fy = 810.
+cx = 320.
+cy = 240.
+
+# Define a web_cam
+web_cam_int = CameraIntrinsics()
+web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
+web_cam_int.distortion_coeffs = np.zeros((5,1))
+
+web_cam_ext = CameraExtrinsics()
+# Camera rotation from robot x,y,z to opencv (z, -x, -y)
+web_cam_ext.R = np.array([[0., 0., 1.],
+ [-1, 0, 0],
+ [0, -1., 0]])
+web_cam_ext.T = np.array([[0., 0., 0.]]).T
+
+web_cam_params = CameraParameters()
+web_cam_params.camera_int = web_cam_int
+web_cam_params.camera_ext = web_cam_ext
diff --git a/y2020/vision/tools/python_code/define_training_data.py b/y2020/vision/tools/python_code/define_training_data.py
new file mode 100644
index 0000000..22eb6ce
--- /dev/null
+++ b/y2020/vision/tools/python_code/define_training_data.py
@@ -0,0 +1,255 @@
+import argparse
+import cv2
+import json
+import math
+import numpy as np
+import time
+
+import train_and_match as tam
+
+# Points for current polygon
+point_list = []
+current_mouse = (0, 0)
+
+
+def get_mouse_event(event, x, y, flags, param):
+ global point_list
+ global current_mouse
+ current_mouse = (x, y)
+ if event == cv2.EVENT_LBUTTONUP:
+ #print("Adding point at %d, %d" % (x,y))
+ point_list.append([x, y])
+ pass
+
+
+def draw_polygon(image, polygon, color=(255, 0, 0), close_polygon=False):
+ for point in polygon:
+ image = cv2.circle(image, (point[0], point[1]), 5, (255, 0, 0), -1)
+ if (len(polygon) > 1):
+ np_poly = np.array(polygon)
+ image = cv2.polylines(
+ image, [np_poly], close_polygon, color, thickness=3)
+ return image
+
+
+# Close out polygon, return True if size is 3 or more points
+def finish_polygon(image, polygon):
+ global point_list
+ # If we have at least 3 points, close and show the polygon
+ if len(point_list) <= 2:
+ return False
+
+ point_list.append(point_list[0])
+ image = draw_polygon(image, point_list, color=(0, 0, 255))
+ cv2.imshow("image", image)
+ cv2.waitKey(500)
+ return True
+
+
+def define_polygon(image):
+ global point_list
+ # Set of all defined polygons
+ point_list = []
+ polygon_list = []
+
+ display_image = image.copy()
+
+ cv2.namedWindow("image")
+ cv2.setMouseCallback("image", get_mouse_event)
+
+ while True:
+ cv2.imshow("image", display_image)
+ key = cv2.waitKey(1) & 0xFF
+
+ # if the 'r' key is pressed, reset the current polygon
+ # Leaves previously defined polygons intact
+ if key == ord("r"):
+ display_image = image.copy()
+ point_list = []
+
+ # if the 'd' key is pressed, delete the last point
+ if key == ord("d"):
+ display_image = image.copy()
+ point_list.pop()
+
+ # if the 'n' key is pressed, save current polygon, and move to next
+ if key == ord("n"):
+ if (finish_polygon(display_image, point_list)):
+ polygon_list.append(point_list)
+ display_image = image.copy()
+ point_list = []
+
+ # if the 'q' key is pressed, break from the loop and finish polygon
+ elif key == ord("q"):
+ if (finish_polygon(display_image, point_list)):
+ polygon_list.append(point_list)
+ break
+
+ display_image = draw_polygon(display_image, point_list)
+
+ return polygon_list
+
+
+# Given a list of points on an image, prompt the user to click on the
+# corresponding points within the image.
+# Return the list of those points that have been clicked
+def define_points_by_list(image, points):
+ global point_list
+ global current_mouse
+ point_list = []
+
+ display_image = image.copy()
+
+ cv2.namedWindow("image")
+ cv2.setMouseCallback("image", get_mouse_event)
+
+ while (len(point_list) < len(points)):
+ i = len(point_list)
+ # Draw mouse location and suggested target
+ display_image = image.copy()
+ display_image = cv2.circle(display_image, (points[i][0], points[i][1]),
+ 15, (0, 255, 0), 2)
+ cursor_length = 5
+ display_image = cv2.line(
+ display_image,
+ (current_mouse[0] - cursor_length, current_mouse[1]),
+ (current_mouse[0] + cursor_length, current_mouse[1]), (255, 0, 0),
+ 2, cv2.LINE_AA)
+ display_image = cv2.line(
+ display_image,
+ (current_mouse[0], current_mouse[1] - cursor_length),
+ (current_mouse[0], current_mouse[1] + cursor_length), (255, 0, 0),
+ 2, cv2.LINE_AA)
+
+ cv2.imshow("image", display_image)
+
+ key = cv2.waitKey(1) & 0xFF
+
+ # if the 'r' key is pressed, reset the current point collection
+ # Leaves previously defined polygons intact
+ if key == ord("r"):
+ draw_image = image.copy()
+ point_list = []
+
+ # if the 'd' key is pressed, delete the last point
+ elif key == ord("d"):
+ draw_image = image.copy()
+ point_list.pop()
+
+ # if the 'q' key is pressed, quit
+ elif key == ord("q"):
+ quit()
+
+ return point_list
+
+# Determine whether a given point lies within (or on border of) a set of polygons
+# Return true if it does
+def point_in_polygons(point, polygons):
+ for poly in polygons:
+ np_poly = np.asarray(poly)
+ dist = cv2.pointPolygonTest(np_poly, (point[0], point[1]), True)
+ if dist >=0:
+ return True
+
+ return False
+
+## Filter keypoints by polygons
+def filter_keypoints_by_polygons(keypoint_list, descriptor_list, polygons):
+ # TODO: Need to make sure we've got the right numpy array / list
+ keep_keypoint_list = []
+ keep_descriptor_list = []
+ reject_keypoint_list = []
+ reject_descriptor_list = []
+ keep_list = []
+ reject_list = []
+
+ # For now, pretend keypoints are just points
+ for i in range(len(keypoint_list)):
+ if point_in_polygons((keypoint_list[i].pt[0], keypoint_list[i].pt[1]), polygons):
+ keep_list.append(i)
+ else:
+ reject_list.append(i)
+
+ keep_keypoint_list = [keypoint_list[kp_ind] for kp_ind in keep_list]
+ reject_keypoint_list = [keypoint_list[kp_ind] for kp_ind in reject_list]
+ # Allow function to be called with no descriptors, and just return empty list
+ if descriptor_list is not None:
+ keep_descriptor_list = descriptor_list[keep_list,:]
+ reject_descriptor_list = descriptor_list[reject_list,:]
+
+ return keep_keypoint_list, keep_descriptor_list, reject_keypoint_list, reject_descriptor_list, keep_list
+
+# Helper function that appends a column of ones to a list (of 2d points)
+def append_ones(point_list):
+ return np.hstack([point_list, np.ones((len(point_list),1))])
+
+# Given a list of 2d points and thei corresponding 3d locations, compute map
+# between them.
+# pt_3d = (pt_2d, 1) * reprojection_map
+# TODO: We should really look at residuals to see if things are messed up
+def compute_reprojection_map(polygon_2d, polygon_3d):
+ pts_2d = np.asarray(np.float32(polygon_2d)).reshape(-1,2)
+ pts_2d_lstsq = append_ones(pts_2d)
+ pts_3d_lstsq = np.asarray(np.float32(polygon_3d)).reshape(-1,3)
+
+ reprojection_map = np.linalg.lstsq(pts_2d_lstsq, pts_3d_lstsq, rcond=-1)[0]
+
+ return reprojection_map
+
+# Given set of keypoints (w/ 2d location), a reprojection map, and a polygon
+# that defines regions for where this is valid
+# Returns a numpy array of 3d locations the same size as the keypoint list
+def compute_3d_points(keypoint_2d_list, reprojection_map):
+ pt_2d_lstsq = append_ones(np.asarray(np.float32(keypoint_2d_list)).reshape(-1,2))
+ pt_3d = pt_2d_lstsq.dot(reprojection_map)
+
+ return pt_3d
+
+# Given 2d and 3d locations, and camera location and projection model,
+# display locations on an image
+def visualize_reprojections(img, pts_2d, pts_3d, cam_mat, distortion_coeffs):
+ # Compute camera location
+ # TODO: Warn on bad inliers
+ # TODO: Change this to not have to recast to np
+ pts_2d_np = np.asarray(np.float32(pts_2d)).reshape(-1, 1, 2)
+ pts_3d_np = np.asarray(np.float32(pts_3d)).reshape(-1, 1, 3)
+ retval, R, T, inliers = cv2.solvePnPRansac(pts_3d_np, pts_2d_np, cam_mat,
+ distortion_coeffs)
+ pts_3d_proj_2d, jac_2d = cv2.projectPoints(pts_3d_np, R, T, cam_mat,
+ distortion_coeffs)
+ if inliers is None:
+ print("WARNING: Didn't get any inliers when reprojecting polygons")
+ return img
+ for i in range(len(pts_2d)):
+ pt_2d = pts_2d_np[i][0]
+ pt_3d_proj = pts_3d_proj_2d[i][0]
+ pt_color = (0, 255, 0)
+ if i not in inliers:
+ pt_color = (0, 0, 255)
+
+ img = cv2.circle(img, (pt_2d[0], pt_2d[1]), 3, pt_color, 3)
+ img = cv2.circle(img, (pt_3d_proj[0], pt_3d_proj[1]), 15, pt_color, 3)
+
+ cv2.imshow("image", img)
+ cv2.waitKey(0)
+ return img
+
+
+def sample_define_polygon_usage():
+ image = cv2.imread("test_images/train_power_port_red.png")
+
+ polygon_list = define_polygon(image)
+ print(polygon_list)
+
+
+def sample_define_points_by_list_usage():
+ image = cv2.imread("test_images/train_power_port_red.png")
+
+ test_points = [(451, 679), (451, 304),
+ (100, 302), (451, 74),
+ (689, 74), (689, 302),
+ (689, 679)]
+
+ polygon_list = define_points_by_list(image, test_points)
+ print(polygon_list)
+
diff --git a/y2020/vision/tools/python_code/field_display.py b/y2020/vision/tools/python_code/field_display.py
index a3f1024..93f645c 100644
--- a/y2020/vision/tools/python_code/field_display.py
+++ b/y2020/vision/tools/python_code/field_display.py
@@ -1,6 +1,5 @@
import cv2
import math
-from matplotlib import pyplot as plt
import numpy as np
diff --git a/y2020/vision/tools/python_code/image_match_test.py b/y2020/vision/tools/python_code/image_match_test.py
index bcf610f..d6b7deb 100644
--- a/y2020/vision/tools/python_code/image_match_test.py
+++ b/y2020/vision/tools/python_code/image_match_test.py
@@ -100,7 +100,7 @@
looping = True
# Grab images until 'q' is pressed (or just once for canned images)
-while (looping):
+while looping:
if (query_image_index is -1):
# Capture frame-by-frame
ret, frame = cap.read()
@@ -305,3 +305,4 @@
cap.release()
cv2.destroyAllWindows()
+
diff --git a/y2020/vision/tools/python_code/load_sift_training.py b/y2020/vision/tools/python_code/load_sift_training.py
new file mode 100644
index 0000000..9fa4acf
--- /dev/null
+++ b/y2020/vision/tools/python_code/load_sift_training.py
@@ -0,0 +1,100 @@
+#!/usr/bin/python3
+
+import cv2
+import sys
+import flatbuffers
+import target_definition
+
+import frc971.vision.sift.TrainingImage as TrainingImage
+import frc971.vision.sift.TrainingData as TrainingData
+import frc971.vision.sift.Feature as Feature
+
+def main():
+
+ output_path = sys.argv[1]
+ print("Writing file to ", output_path)
+
+ target_data_list = target_definition.compute_target_definition()
+
+ fbb = flatbuffers.Builder(0)
+
+ images_vector = []
+
+ for target_data in target_data_list:
+
+ features_vector = []
+
+ for keypoint, keypoint_3d, descriptor in zip(target_data.keypoint_list,
+ target_data.keypoint_list_3d,
+ target_data.descriptor_list):
+
+ Feature.FeatureStartDescriptorVector(fbb, len(descriptor))
+ for n in reversed(descriptor):
+ fbb.PrependFloat32(n)
+ descriptor_vector = fbb.EndVector(len(descriptor))
+
+ Feature.FeatureStart(fbb)
+
+ Feature.FeatureAddDescriptor(fbb, descriptor_vector)
+ Feature.FeatureAddX(fbb, keypoint.pt[0])
+ Feature.FeatureAddY(fbb, keypoint.pt[1])
+ Feature.FeatureAddSize(fbb, keypoint.size)
+ Feature.FeatureAddAngle(fbb, keypoint.angle)
+ Feature.FeatureAddResponse(fbb, keypoint.response)
+ Feature.FeatureAddOctave(fbb, keypoint.octave)
+
+ features_vector.append(Feature.FeatureEnd(fbb))
+
+ ## TODO: Write 3d vector here
+
+ TrainingImage.TrainingImageStartFeaturesVector(fbb, len(features_vector))
+ for feature in reversed(features_vector):
+ fbb.PrependUOffsetTRelative(feature)
+ features_vector_table = fbb.EndVector(len(features_vector))
+
+ TrainingImage.TrainingImageStart(fbb)
+ TrainingImage.TrainingImageAddFeatures(fbb, features_vector_table)
+ # TODO(Brian): Fill out the transformation matrices.
+ images_vector.append(TrainingImage.TrainingImageEnd(fbb))
+
+ TrainingData.TrainingDataStartImagesVector(fbb, len(images_vector))
+ for training_image in reversed(images_vector):
+ fbb.PrependUOffsetTRelative(training_image)
+ images_vector_table = fbb.EndVector(len(images_vector))
+
+ TrainingData.TrainingDataStart(fbb)
+ TrainingData.TrainingDataAddImages(fbb, images_vector_table)
+ fbb.Finish(TrainingData.TrainingDataEnd(fbb))
+
+ bfbs = fbb.Output()
+
+ output_prefix = [
+ b'#ifndef Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
+ b'#define Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
+ b'#include <string_view>',
+ b'namespace frc971 {',
+ b'namespace vision {',
+ b'inline std::string_view SiftTrainingData() {',
+ ]
+ output_suffix = [
+ b' return std::string_view(kData, sizeof(kData));',
+ b'}',
+ b'} // namespace vision',
+ b'} // namespace frc971',
+ b'#endif // Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
+ ]
+
+ with open(output_path, 'wb') as output:
+ for line in output_prefix:
+ output.write(line)
+ output.write(b'\n')
+ output.write(b'alignas(64) static constexpr char kData[] = "')
+ for byte in fbb.Output():
+ output.write(b'\\x' + (b'%x' % byte).zfill(2))
+ output.write(b'";\n')
+ for line in output_suffix:
+ output.write(line)
+ output.write(b'\n')
+
+if __name__ == '__main__':
+ main()
diff --git a/y2020/vision/tools/python_code/target_definition.py b/y2020/vision/tools/python_code/target_definition.py
new file mode 100644
index 0000000..779eb0b
--- /dev/null
+++ b/y2020/vision/tools/python_code/target_definition.py
@@ -0,0 +1,414 @@
+import argparse
+import cv2
+import json
+import math
+import numpy as np
+
+import camera_definition
+import define_training_data as dtd
+import train_and_match as tam
+
+global VISUALIZE_KEYPOINTS
+global USE_BAZEL
+USE_BAZEL = True
+VISUALIZE_KEYPOINTS = False
+
+def bazel_name_fix(filename):
+ ret_name = filename
+ if USE_BAZEL:
+ ret_name = 'org_frc971/y2020/vision/tools/python_code/' + filename
+
+ return ret_name
+
+class TargetData:
+ def __init__(self, filename):
+ self.image_filename = filename
+ # Load an image (will come in as a 1-element list)
+ if USE_BAZEL:
+ from bazel_tools.tools.python.runfiles import runfiles
+ r = runfiles.Create()
+ self.image_filename = r.Rlocation(bazel_name_fix(self.image_filename))
+ self.image = tam.load_images([self.image_filename])[0]
+ self.polygon_list = []
+ self.polygon_list_3d = []
+ self.reprojection_map_list = []
+ self.keypoint_list = []
+ self.keypoint_list_3d = None # numpy array of 3D points
+ self.descriptor_list = []
+
+ def extract_features(self, feature_extractor):
+ kp_lists, desc_lists = tam.detect_and_compute(feature_extractor,
+ [self.image])
+ self.keypoint_list = kp_lists[0]
+ self.descriptor_list = desc_lists[0]
+
+ def filter_keypoints_by_polygons(self):
+ self.keypoint_list, self.descriptor_list, _, _, _ = dtd.filter_keypoints_by_polygons(
+ self.keypoint_list, self.descriptor_list, self.polygon_list)
+
+ def compute_reprojection_maps(self):
+ for poly_ind in range(len(self.polygon_list)):
+ reprojection_map = dtd.compute_reprojection_map(
+ self.polygon_list[poly_ind], self.polygon_list_3d[poly_ind])
+ self.reprojection_map_list.append(reprojection_map)
+
+ def project_keypoint_to_3d_by_polygon(self, keypoint_list):
+ # Create dummy array of correct size that we can put values in
+ point_list_3d = np.asarray(
+ [(0., 0., 0.) for kp in keypoint_list]).reshape(-1, 3)
+ # Iterate through our polygons
+ for poly_ind in range(len(self.polygon_list)):
+ # Filter and project points for each polygon in the list
+ filtered_keypoints, _, _, _, keep_list = dtd.filter_keypoints_by_polygons(
+ keypoint_list, None, [self.polygon_list[poly_ind]])
+ print("Filtering kept %d of %d features" % (len(keep_list),
+ len(keypoint_list)))
+ filtered_point_array = np.asarray(
+ [(keypoint.pt[0], keypoint.pt[1])
+ for keypoint in filtered_keypoints]).reshape(-1, 2)
+ filtered_point_list_3d = dtd.compute_3d_points(
+ filtered_point_array, self.reprojection_map_list[poly_ind])
+ for i in range(len(keep_list)):
+ point_list_3d[keep_list[i]] = filtered_point_list_3d[i]
+
+ return point_list_3d
+
+def compute_target_definition():
+ ############################################################
+ # TARGET DEFINITIONS
+ ############################################################
+
+ ideal_target_list = []
+ training_target_list = []
+
+ # Some general info about our field and targets
+ # Assume camera centered on target at 1 m above ground and distance of 4.85m
+ field_length = 15.98
+ power_port_total_height = 3.10
+ power_port_center_y = 1.67
+ power_port_width = 1.22
+ power_port_bottom_wing_height = 1.88
+ power_port_wing_width = 1.83
+ loading_bay_edge_y = 1.11
+ loading_bay_width = 1.52
+ loading_bay_height = 0.94
+
+ # Pick the target center location at halfway between top and bottom of the top panel
+ target_center_height = (power_port_total_height + power_port_bottom_wing_height) / 2.
+
+ # TODO: Still need to figure out what this angle actually is
+ wing_angle = 20. * math.pi / 180.
+
+ ###
+ ### Red Power Port
+ ###
+
+ # Create the reference "ideal" image
+ ideal_power_port_red = TargetData('test_images/train_power_port_red.png')
+
+ # Start at lower left corner, and work around clockwise
+ # These are taken by manually finding the points in gimp for this image
+ power_port_red_main_panel_polygon_points_2d = [(451, 679), (451, 304),
+ (100, 302), (451, 74),
+ (689, 74), (689, 302),
+ (689, 679)]
+
+ # These are "virtual" 3D points based on the expected geometry
+ power_port_red_main_panel_polygon_points_3d = [
+ (field_length, -power_port_center_y + power_port_width / 2., 0.),
+ (field_length, -power_port_center_y + power_port_width / 2.,
+ power_port_bottom_wing_height),
+ (field_length, -power_port_center_y + power_port_width / 2.
+ + power_port_wing_width, power_port_bottom_wing_height),
+ (field_length, -power_port_center_y + power_port_width / 2.,
+ power_port_total_height),
+ (field_length, -power_port_center_y - power_port_width / 2.,
+ power_port_total_height),
+ (field_length, -power_port_center_y - power_port_width / 2.,
+ power_port_bottom_wing_height),
+ (field_length, -power_port_center_y - power_port_width / 2., 0.)
+ ]
+
+ power_port_red_wing_panel_polygon_points_2d = [(689, 74), (1022, 302),
+ (689, 302)]
+ # These are "virtual" 3D points based on the expected geometry
+ power_port_red_wing_panel_polygon_points_3d = [
+ (field_length, -power_port_center_y - power_port_width / 2.,
+ power_port_total_height),
+ (field_length - power_port_wing_width * math.sin(wing_angle),
+ -power_port_center_y - power_port_width / 2.
+ - power_port_wing_width * math.cos(wing_angle),
+ power_port_bottom_wing_height),
+ (field_length, -power_port_center_y - power_port_width / 2.,
+ power_port_bottom_wing_height)
+ ]
+
+ # Populate the red power port
+ ideal_power_port_red.polygon_list.append(power_port_red_main_panel_polygon_points_2d)
+ ideal_power_port_red.polygon_list_3d.append(power_port_red_main_panel_polygon_points_3d)
+
+ ideal_power_port_red.polygon_list.append(power_port_red_wing_panel_polygon_points_2d)
+ ideal_power_port_red.polygon_list_3d.append(power_port_red_wing_panel_polygon_points_3d)
+
+ # Add the ideal 3D target to our list
+ ideal_target_list.append(ideal_power_port_red)
+ # And add the training image we'll actually use to the training list
+ training_target_list.append(TargetData('test_images/train_power_port_red_webcam.png'))
+
+ ###
+ ### Red Loading Bay
+ ###
+
+ ideal_loading_bay_red = TargetData('test_images/train_loading_bay_red.png')
+
+ # Start at lower left corner, and work around clockwise
+ # These are taken by manually finding the points in gimp for this image
+ loading_bay_red_polygon_points_2d = [(42, 406), (42, 35), (651, 34), (651, 406)]
+
+ # These are "virtual" 3D points based on the expected geometry
+ loading_bay_red_polygon_points_3d = [
+ (field_length, loading_bay_edge_y + loading_bay_width, 0.),
+ (field_length, loading_bay_edge_y + loading_bay_width, loading_bay_height),
+ (field_length, loading_bay_edge_y, loading_bay_height),
+ (field_length, loading_bay_edge_y, 0.)
+ ]
+
+ ideal_loading_bay_red.polygon_list.append(loading_bay_red_polygon_points_2d)
+ ideal_loading_bay_red.polygon_list_3d.append(loading_bay_red_polygon_points_3d)
+
+ ideal_target_list.append(ideal_loading_bay_red)
+ training_target_list.append(TargetData('test_images/train_loading_bay_red.png'))
+
+ ###
+ ### Blue Power Port
+ ###
+
+ ideal_power_port_blue = TargetData('test_images/train_power_port_blue.png')
+
+ # Start at lower left corner, and work around clockwise
+ # These are taken by manually finding the points in gimp for this image
+ power_port_blue_main_panel_polygon_points_2d = [(438, 693), (438, 285),
+ (93, 285), (440, 50),
+ (692, 50), (692, 285),
+ (692, 693)]
+
+ # These are "virtual" 3D points based on the expected geometry
+ power_port_blue_main_panel_polygon_points_3d = [
+ (0., power_port_center_y - power_port_width / 2., 0.),
+ (0., power_port_center_y - power_port_width / 2.,
+ power_port_bottom_wing_height),
+ (0., power_port_center_y - power_port_width / 2. - power_port_wing_width,
+ power_port_bottom_wing_height),
+ (0., power_port_center_y - power_port_width / 2.,
+ power_port_total_height),
+ (0., power_port_center_y + power_port_width / 2.,
+ power_port_total_height),
+ (0., power_port_center_y + power_port_width / 2.,
+ power_port_bottom_wing_height),
+ (0., power_port_center_y + power_port_width / 2., 0.)
+ ]
+
+ power_port_blue_wing_panel_polygon_points_2d = [(692, 50), (1047, 285),
+ (692, 285)]
+ # These are "virtual" 3D points based on the expected geometry
+ power_port_blue_wing_panel_polygon_points_3d = [
+ (0., power_port_center_y + power_port_width / 2.,
+ power_port_total_height),
+ (power_port_wing_width * math.sin(wing_angle),
+ power_port_center_y - power_port_width / 2. +
+ power_port_wing_width * math.cos(wing_angle),
+ power_port_bottom_wing_height),
+ (0., power_port_center_y + power_port_width / 2.,
+ power_port_bottom_wing_height)
+ ]
+
+ # Populate the blue power port
+ ideal_power_port_blue.polygon_list.append(power_port_blue_main_panel_polygon_points_2d)
+ ideal_power_port_blue.polygon_list_3d.append(power_port_blue_main_panel_polygon_points_3d)
+
+ ideal_power_port_blue.polygon_list.append(power_port_blue_wing_panel_polygon_points_2d)
+ ideal_power_port_blue.polygon_list_3d.append(power_port_blue_wing_panel_polygon_points_3d)
+
+ ideal_target_list.append(ideal_power_port_blue)
+ training_target_list.append(TargetData('test_images/train_power_port_blue.png'))
+
+ ###
+ ### Blue Loading Bay
+ ###
+
+ ideal_loading_bay_blue = TargetData('test_images/train_loading_bay_blue.png')
+
+ # Start at lower left corner, and work around clockwise
+ # These are taken by manually finding the points in gimp for this image
+ loading_bay_blue_polygon_points_2d = [(7, 434), (7, 1), (729, 1), (729, 434)]
+
+ # These are "virtual" 3D points based on the expected geometry
+ loading_bay_blue_polygon_points_3d = [
+ (field_length, loading_bay_edge_y + loading_bay_width, 0.),
+ (field_length, loading_bay_edge_y + loading_bay_width, loading_bay_height),
+ (field_length, loading_bay_edge_y, loading_bay_height),
+ (field_length, loading_bay_edge_y, 0.)
+ ]
+
+ ideal_loading_bay_blue.polygon_list.append(loading_bay_blue_polygon_points_2d)
+ ideal_loading_bay_blue.polygon_list_3d.append(loading_bay_blue_polygon_points_3d)
+
+ ideal_target_list.append(ideal_loading_bay_blue)
+ training_target_list.append(TargetData('test_images/train_loading_bay_blue.png'))
+
+ # Create feature extractor
+ feature_extractor = tam.load_feature_extractor()
+
+ # Use webcam parameters for now
+ camera_params = camera_definition.web_cam_params
+
+ for ideal_target in ideal_target_list:
+ print("\nPreparing target for image %s" % ideal_target.image_filename)
+ ideal_target.extract_features(feature_extractor)
+ ideal_target.filter_keypoints_by_polygons()
+ ideal_target.compute_reprojection_maps()
+ ideal_target.keypoint_list_3d = ideal_target.project_keypoint_to_3d_by_polygon(
+ ideal_target.keypoint_list)
+
+ if VISUALIZE_KEYPOINTS:
+ for i in range(len(ideal_target.polygon_list)):
+ ideal_pts_tmp = np.asarray(ideal_target.polygon_list[i]).reshape(
+ -1, 2)
+ ideal_pts_3d_tmp = np.asarray(
+ ideal_target.polygon_list_3d[i]).reshape(-1, 3)
+ # We can only compute pose if we have at least 4 points
+ # Only matters for reprojection for visualization
+ # Keeping this code here, since it's helpful when testing
+ if (len(ideal_target.polygon_list[i]) >= 4):
+ img_copy = dtd.draw_polygon(ideal_target.image.copy(), ideal_target.polygon_list[i], (0,255,0), True)
+ dtd.visualize_reprojections(img_copy, ideal_pts_tmp, ideal_pts_3d_tmp, camera_params.camera_int.camera_matrix, camera_params.camera_int.distortion_coeffs)
+
+ for polygon in ideal_target.polygon_list:
+ img_copy = ideal_target.image.copy()
+ kp_in_poly2d = []
+ kp_in_poly3d = []
+ for kp, kp_3d in zip(ideal_target.keypoint_list,
+ ideal_target.keypoint_list_3d):
+ if dtd.point_in_polygons((kp.pt[0], kp.pt[1]), [polygon]):
+ kp_in_poly2d.append((kp.pt[0], kp.pt[1]))
+ kp_in_poly3d.append(kp_3d)
+
+ dtd.visualize_reprojections(
+ img_copy,
+ np.asarray(kp_in_poly2d).reshape(-1, 2),
+ np.asarray(kp_in_poly3d).reshape(
+ -1, 3), camera_params.camera_int.camera_matrix,
+ camera_params.camera_int.distortion_coeffs)
+
+ ###############
+ ### Compute 3D points on actual training images
+ ### TODO: Add code to do manual point selection
+ ###############
+ AUTO_PROJECTION = True
+
+ if AUTO_PROJECTION:
+ print("\n\nAuto projection of training keypoints to 3D using ideal images")
+ # Match the captured training image against the "ideal" training image
+ # and use those matches to pin down the 3D locations of the keypoints
+
+ for target_ind in range(len(training_target_list)):
+ # Assumes we have 1 ideal view for each training target
+ training_target = training_target_list[target_ind]
+ ideal_target = ideal_target_list[target_ind]
+
+ print("\nPreparing target for image %s" % training_target.image_filename)
+ # Extract keypoints and descriptors for model
+ training_target.extract_features(feature_extractor)
+
+ # Create matcher that we'll use to match with ideal
+ matcher = tam.train_matcher([training_target.descriptor_list])
+
+ matches_list = tam.compute_matches(matcher,
+ [training_target.descriptor_list],
+ [ideal_target.descriptor_list])
+
+ homography_list, matches_mask_list = tam.compute_homographies(
+ [training_target.keypoint_list], [ideal_target.keypoint_list],
+ matches_list)
+
+ for polygon in ideal_target.polygon_list:
+ ideal_pts_2d = np.asarray(np.float32(polygon)).reshape(-1, 1, 2)
+ H_inv = np.linalg.inv(homography_list[0])
+ # We use the ideal target's polygons to define the polygons on
+ # the training target
+ transformed_polygon = cv2.perspectiveTransform(ideal_pts_2d, H_inv)
+ training_target.polygon_list.append(transformed_polygon)
+
+ print("Started with %d keypoints" % len(training_target.keypoint_list))
+
+ training_target.keypoint_list, training_target.descriptor_list, rejected_keypoint_list, rejected_descriptor_list, _ = dtd.filter_keypoints_by_polygons(
+ training_target.keypoint_list, training_target.descriptor_list,
+ training_target.polygon_list)
+ print("After filtering by polygons, had %d keypoints" % len(
+ training_target.keypoint_list))
+ if VISUALIZE_KEYPOINTS:
+ tam.show_keypoints(training_target.image,
+ training_target.keypoint_list)
+
+ # Now comes the fun part
+ # Go through all my training keypoints to define 3D location using ideal
+ training_3d_list = []
+ for kp_ind in range(len(training_target.keypoint_list)):
+ # We're going to look for the first time this keypoint is in a polygon
+ found_3d_loc = False
+ # First, is it in the correct polygon
+ kp_loc = (training_target.keypoint_list[kp_ind].pt[0],
+ training_target.keypoint_list[kp_ind].pt[1])
+ for poly_ind in range(len(training_target.polygon_list)):
+ if dtd.point_in_polygons(
+ kp_loc, [training_target.polygon_list[poly_ind]
+ ]) and not found_3d_loc:
+ found_3d_loc = True
+ # If so, transform keypoint location to ideal using homography, and compute 3D
+ kp_loc_array = np.asarray(np.float32(kp_loc)).reshape(
+ -1, 1, 2)
+ training_2d_in_ideal = cv2.perspectiveTransform(
+ kp_loc_array, homography_list[0])
+ # Get 3D from this 2D point in ideal image
+ training_3d_pt = dtd.compute_3d_points(
+ training_2d_in_ideal,
+ ideal_target.reprojection_map_list[poly_ind])
+ training_3d_list.append(training_3d_pt)
+
+ training_target.keypoint_list_3d = np.asarray(
+ training_3d_list).reshape(-1, 1, 3)
+
+ if VISUALIZE_KEYPOINTS:
+ # Sanity check these:
+ img_copy = training_target.image.copy()
+ for polygon in training_target.polygon_list:
+ pts = polygon.astype(int).reshape(-1, 2)
+ img_copy = dtd.draw_polygon(img_copy, pts,
+ (255, 0, 0), True)
+ kp_tmp = np.asarray([
+ (kp.pt[0], kp.pt[1]) for kp in training_target.keypoint_list
+ ]).reshape(-1, 2)
+ dtd.visualize_reprojections(
+ img_copy, kp_tmp, training_target.keypoint_list_3d,
+ camera_params.camera_int.camera_matrix,
+ camera_params.camera_int.distortion_coeffs)
+
+ y2020_target_list = training_target_list
+ return y2020_target_list
+
+if __name__ == '__main__':
+ ap = argparse.ArgumentParser()
+ ap.add_argument("--visualize", help="Whether to visualize the results", default=False, action='store_true')
+ ap.add_argument("--no_use_bazel", help="Don't run using Bazel", default=True, action='store_false')
+ args = vars(ap.parse_args())
+
+ VISUALIZE_KEYPOINTS = args["visualize"]
+ if args["visualize"]:
+ print("Visualizing results")
+
+ USE_BAZEL = args["no_use_bazel"]
+ if args["no_use_bazel"]:
+ print("Running on command line (no Bazel)")
+
+ compute_target_definition()
+ pass
diff --git a/y2020/vision/tools/python_code/test_images/train_power_port_blue.png b/y2020/vision/tools/python_code/test_images/train_power_port_blue.png
index 6ade26f..a3a7597 100644
--- a/y2020/vision/tools/python_code/test_images/train_power_port_blue.png
+++ b/y2020/vision/tools/python_code/test_images/train_power_port_blue.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/train_and_match.py b/y2020/vision/tools/python_code/train_and_match.py
index 8fed4c3..e5a7f5b 100644
--- a/y2020/vision/tools/python_code/train_and_match.py
+++ b/y2020/vision/tools/python_code/train_and_match.py
@@ -1,14 +1,8 @@
import cv2
import math
-from matplotlib import pyplot as plt
import numpy as np
-#import tkinter
-# pip install pillow
-#import PIL.Image, PIL.ImageTk
import time
-import field_display
-
### DEFINITIONS
MIN_MATCH_COUNT = 10 # 10 is min; more gives better matches
FEATURE_EXTRACTOR_NAME = 'SIFT'
@@ -285,7 +279,9 @@
# keypoint_list: List of opencv keypoints
def show_keypoints(image, keypoint_list):
ret_img = image.copy()
- ret_img = cv2.drawKeypoints(ret_img, keypoint_list, ret_img)
+ ret_img = cv2.drawKeypoints(ret_img, keypoint_list, ret_img,
+ flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow("Keypoints", ret_img)
cv2.waitKey(0)
return ret_img
+
diff --git a/y2020/vision/tools/python_code/transform_projection_test.py b/y2020/vision/tools/python_code/transform_projection_test.py
index b8d94fd..00f68bb 100644
--- a/y2020/vision/tools/python_code/transform_projection_test.py
+++ b/y2020/vision/tools/python_code/transform_projection_test.py
@@ -1,111 +1,90 @@
import cv2
import math
-from matplotlib import pyplot as plt
import numpy as np
import field_display
+import camera_definition
-# Assume image is 640x480 (VGA)
-num_pixels_x = 640
-num_pixels_y = 480
-
-# Camera center is 320, 240
-c_x = num_pixels_x / 2
-c_y = num_pixels_y / 2
-
-# Horiz. FOV is 54 degrees
-FOV_h = 54 * math.pi / 180.0 # (in radians)
-# Vert FOV is horizontal FOV scaled by aspect ratio (through tan function)
-FOV_v = math.atan(c_y / c_x * math.tan(FOV_h / 2.)) * 2
-
-# Assuming square pixels
-# focal length is (640/2)/tan(FOV_h/2)
-f_x = c_x / (math.tan(FOV_h / 2.))
-f_y = c_y / (math.tan(FOV_v / 2.))
+# Import camera from camera_definition
+camera_params = camera_definition.web_cam_params
+cam_mat = camera_params.camera_int.camera_matrix
+distortion_coeffs = camera_params.camera_int.distortion_coeffs
# Height of camera on robot above ground
cam_above_ground = 0.5
-
-# TODO: Should fix f_y entry below.
-cam_mat = np.array([[f_x, 0, c_x], [0, f_y, c_y], [0, 0, 1.]])
-
-# Use default distortion (i.e., none)
-distortion_coeffs = np.zeros((5, 1))
-
depth = 5.0 # meters
-R_w_tarj_mat = np.array([[0., 0., 1.], [-1, 0, 0], [0, -1., 0]])
-R_w_tarj, jac = cv2.Rodrigues(R_w_tarj_mat)
-T_w_tarj = np.array([[15.98 - depth, -4.10 + 2.36, cam_above_ground]]).T
+# Define camera location (cam) relative to origin (w)
+R_w_cam_mat = np.array([[0., 0., 1.], [-1, 0, 0], [0, -1., 0]])
+R_w_cam, jac = cv2.Rodrigues(R_w_cam_mat)
+T_w_cam = np.array([[15.98 - depth, -4.10 + 2.36, cam_above_ground]]).T
-img_ret = field_display.plot_bot_on_field(None, (0, 255, 0), T_w_tarj)
-#field_display.show_field(img_ret)
+img_ret = field_display.plot_bot_on_field(None, (0, 255, 0), T_w_cam)
# Create fake set of points relative to camera capture (target) frame
# at +/- 1 meter in x, 5 meter depth, and every 1 meter in y from 0 to 4m (above the camera, so negative y values)
-pts_3d_T_t = np.array([[-1., 0., depth], [1., 0., depth], [-1., -1., depth], [
- 1., -1., depth
-], [-1., -2., depth], [0., -2., depth], [1., -2., depth], [-1., -3., depth],
- [1., -3., depth], [-1., -4., depth], [1., -4., depth]])
+pts_3d_target = np.array(
+ [[-1., 0., depth], [1., 0., depth], [-1., -1., depth], [1., -1., depth],
+ [-1., -2., depth], [0., -2., depth], [1., -2., depth], [-1., -3., depth],
+ [1., -3., depth], [-1., -4., depth], [1., -4., depth]])
-# Ground truth shift of camera, to compute projections
-R_tarj_ci_gt = np.array([[0., 0.2, 0.2]]).T
+# Ground truth shift of camera from (cam) to (cam2), to compute projections
+R_cam_cam2_gt = np.array([[0., 0.2, 0.2]]).T
-R_tarj_ci_gt_mat, jac = cv2.Rodrigues(R_tarj_ci_gt)
+R_cam_cam2_gt_mat, jac = cv2.Rodrigues(R_cam_cam2_gt)
-T_tarj_ci_gt = np.array([[1., 2., -5.]]).T
+T_cam_cam2_gt = np.array([[1., 2., -5.]]).T
# To transform vectors, we apply the inverse transformation
-R_tarj_ci_gt_mat_inv = R_tarj_ci_gt_mat.T
-T_tarj_ci_gt_inv = -R_tarj_ci_gt_mat_inv.dot(T_tarj_ci_gt)
+R_cam_cam2_gt_mat_inv = R_cam_cam2_gt_mat.T
+T_cam_cam2_gt_inv = -R_cam_cam2_gt_mat_inv.dot(T_cam_cam2_gt)
-#pts_3d_T_t_shifted = (R_tarj_ci_gt_mat_inv.dot(pts_3d_T_t.T) + T_tarj_ci_gt_inv).T
+#pts_3d_target_shifted = (R_cam_cam2_gt_mat_inv.dot(pts_3d_target.T) + T_cam_cam2_gt_inv).T
# Now project from new position
# This was the manual way to do it-- use cv2.projectPoints instead
-#pts_proj_3d = cam_mat.dot(pts_3d_T_t_shifted.T).T
+#pts_proj_3d = cam_mat.dot(pts_3d_target_shifted.T).T
#pts_proj_2d = np.divide(pts_proj_3d[:,0:2],(pts_proj_3d[:,2].reshape(-1,1)))
-pts_proj_2d_ci, jac_2d = cv2.projectPoints(pts_3d_T_t, R_tarj_ci_gt_mat_inv,
- T_tarj_ci_gt_inv, cam_mat,
- distortion_coeffs)
-#print(pts_proj_2d_ci)
+pts_proj_2d_cam2, jac_2d = cv2.projectPoints(
+ pts_3d_target, R_cam_cam2_gt_mat_inv, T_cam_cam2_gt_inv, cam_mat,
+ distortion_coeffs)
# Now, solve for the pose using the original 3d points (pts_3d_T_t) and the projections from the new location
-retval, R_ci_tarj_est, T_ci_tarj_est, inliers = cv2.solvePnPRansac(
- pts_3d_T_t, pts_proj_2d_ci, cam_mat, distortion_coeffs)
+retval, R_cam2_cam_est, T_cam2_cam_est, inliers = cv2.solvePnPRansac(
+ pts_3d_target, pts_proj_2d_cam2, cam_mat, distortion_coeffs)
# This is the pose from camera to original target spot. We need to invert to get back to the pose we want
-R_tarj_ci_est_mat = cv2.Rodrigues(R_ci_tarj_est)[0].T
-T_tarj_ci_est = -R_tarj_ci_est_mat.dot(T_ci_tarj_est)
-R_tarj_ci_est = cv2.Rodrigues(R_tarj_ci_est_mat)[0]
+R_cam_cam2_est_mat = cv2.Rodrigues(R_cam2_cam_est)[0].T
+T_cam_cam2_est = -R_cam_cam2_est_mat.dot(T_cam2_cam_est)
+R_cam_cam2_est = cv2.Rodrigues(R_cam_cam2_est_mat)[0]
-print("Check:\n", "Rot error:\n", R_tarj_ci_gt - R_tarj_ci_est,
- "\nTrans error:\n", T_tarj_ci_gt - T_tarj_ci_est,
+print("Check:\n", "Rot error:\n", R_cam_cam2_gt - R_cam_cam2_est,
+ "\nTrans error:\n", T_cam_cam2_gt - T_cam_cam2_est,
"\nError is < 0.001 in R & T: ",
- np.linalg.norm(R_tarj_ci_gt - R_tarj_ci_est) < 0.001,
- np.linalg.norm(T_tarj_ci_gt - T_tarj_ci_est) < 0.001)
+ np.linalg.norm(R_cam_cam2_gt - R_cam_cam2_est) < 0.001, " & ",
+ np.linalg.norm(T_cam_cam2_gt - T_cam_cam2_est) < 0.001)
# Compute camera location in world coordinates
-R_w_ci, T_w_ci, d1, d2, d3, d4, d5, d6, d7, d8 = cv2.composeRT(
- R_tarj_ci_est, T_tarj_ci_est, R_w_tarj, T_w_tarj)
+R_w_cam2, T_w_cam2, d1, d2, d3, d4, d5, d6, d7, d8 = cv2.composeRT(
+ R_cam_cam2_est, T_cam_cam2_est, R_w_cam, T_w_cam)
print("Estimate in world coordinates")
-print("R, T:\n", R_w_ci, "\n", T_w_ci)
-img_ret = field_display.plot_bot_on_field(img_ret, (0, 255, 255), T_w_ci)
+print("R, T:\n", R_w_cam2, "\n", T_w_cam2)
+img_ret = field_display.plot_bot_on_field(img_ret, (0, 255, 255), T_w_cam2)
field_display.show_field(img_ret)
# Compute vector to target
# TODO: Likely better to do this from the homography, rather than the pose estimate...
T_w_target_pt = np.array([[15.98, -4.10 + 2.36, 2.0 - cam_above_ground]]).T
-vector_to_target = T_w_target_pt - T_w_ci
-d_ci_target = np.linalg.norm(vector_to_target)
-phi_ci_target = math.atan2(vector_to_target[1][0], vector_to_target[0][0])
+vector_to_target = T_w_target_pt - T_w_cam2
+d_cam2_target = np.linalg.norm(vector_to_target)
+phi_cam2_target = math.atan2(vector_to_target[1][0], vector_to_target[0][0])
print("Vector to target (from cam frame):\n", vector_to_target,
- "\nDistance to target: ", d_ci_target, "\nAngle to target (deg): ",
- phi_ci_target * 180. / math.pi)
+ "\nDistance to target: ", d_cam2_target, "\nAngle to target (deg): ",
+ phi_cam2_target * 180. / math.pi)
-img_ret = field_display.plot_line_on_field(img_ret, (255, 255, 0), T_w_ci,
+img_ret = field_display.plot_line_on_field(img_ret, (255, 255, 0), T_w_cam2,
T_w_target_pt)
field_display.show_field(img_ret)
diff --git a/y2020/vision/tools/python_code/usb_camera_stream.py b/y2020/vision/tools/python_code/usb_camera_stream.py
index 42cb1f1..5d3ae91 100644
--- a/y2020/vision/tools/python_code/usb_camera_stream.py
+++ b/y2020/vision/tools/python_code/usb_camera_stream.py
@@ -7,7 +7,7 @@
print("Could not open video device")
quit()
-while (True):
+while True:
# Capture frame-by-frame
ret, frame = cap.read()
diff --git a/y2020/www/BUILD b/y2020/www/BUILD
index 146456f..c0daa07 100644
--- a/y2020/www/BUILD
+++ b/y2020/www/BUILD
@@ -10,6 +10,7 @@
deps = [
"//aos/network/www:proxy",
"//y2020/vision:vision_ts_fbs",
+ "//y2020/vision/sift:sift_ts_fbs",
],
visibility = ["//y2020:__subpackages__"],
)
diff --git a/y2020/www/image_handler.ts b/y2020/www/image_handler.ts
index abaf831..802e448 100644
--- a/y2020/www/image_handler.ts
+++ b/y2020/www/image_handler.ts
@@ -1,22 +1,27 @@
-import {frc971} from 'y2020/vision/vision_generated';
+import {CameraImage} from 'y2020/vision/vision_generated';
export class ImageHandler {
private canvas = document.createElement('canvas');
+ private imageBuffer: Uint8ClampedArray|null = null;
+ private imageTimestamp: flatbuffers.Long|null = null;
+ private result: fr971.vision.ImageMatchResult|null = null;
+ private resultTimestamp: flatbuffers.Long|null = null;
constructor() {
document.body.appendChild(this.canvas);
}
- handleImage(data: Uint8Array) {
+ handleImage(data: Uint8Array): void {
const fbBuffer = new flatbuffers.ByteBuffer(data);
- const image = frc971.vision.CameraImage.getRootAsCameraImage(fbBuffer);
+ const image = CameraImage.getRootAsCameraImage(fbBuffer);
+ this.imageTimestamp = image.monotonicTimestampNs();
const width = image.cols();
const height = image.rows();
if (width === 0 || height === 0) {
return;
}
- const imageBuffer = new Uint8ClampedArray(width * height * 4); // RGBA
+ this.imageBuffer = new Uint8ClampedArray(width * height * 4); // RGBA
// Read four bytes (YUYV) from the data and transform into two pixels of
// RGBA for canvas
@@ -46,16 +51,47 @@
}
}
+ draw();
+ }
+
+ handleImageMetadata(data: Uint8Array): void {
+ const fbBuffer = new flatbuffers.ByteBuffer(data);
+ this.result = frc971.vision.ImageMatchResult.getRootAsImageMatchResult(fbBuffer);
+ this.resultTimestamp = result.imageMonotonicTimestampNs();
+ draw();
+ }
+
+ draw(): void {
+ if (imageTimestamp.low !== resultTimestamp.low ||
+ imageTimestamp.high !== resultTimestamp.high) {
+ return;
+ }
const ctx = this.canvas.getContext('2d');
this.canvas.width = width;
this.canvas.height = height;
const idata = ctx.createImageData(width, height);
- idata.data.set(imageBuffer);
+ idata.data.set(this.imageBuffer);
ctx.putImageData(idata, 0, 0);
+ ctx.beginPath();
+ for (const feature of this.result.getFeatures()) {
+ // Based on OpenCV drawKeypoint.
+ ctx.arc(feature.x, feature.y, feature.size, 0, 2 * Math.PI);
+ ctx.moveTo(feature.x, feature.y);
+ // TODO(alex): check that angle is correct (0?, direction?)
+ const angle = feature.angle * Math.PI / 180;
+ ctx.lineTo(
+ feature.x + feature.radius * cos(angle),
+ feature.y + feature.radius * sin(angle));
+ }
+ ctx.stroke();
}
- getId() {
- return frc971.vision.CameraImage.getFullyQualifiedName();
+ getId(): string {
+ return CameraImage.getFullyQualifiedName();
+ }
+
+ getResultId(): string {
+ return frc971.vision.ImageMatchResult.getFullyQualifiedName();
}
}
diff --git a/y2020/www/main.ts b/y2020/www/main.ts
index 7831713..d414eac 100644
--- a/y2020/www/main.ts
+++ b/y2020/www/main.ts
@@ -9,3 +9,5 @@
const iHandler = new ImageHandler();
conn.addHandler(iHandler.getId(), (data) => iHandler.handleImage(data));
+conn.addHandler(
+ iHandler.getResultId(), (data) => iHandler.handleImageMetadata(data));